1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
31 #include "aarch64-opc.h"
34 int debug_dump
= FALSE
;
35 #endif /* DEBUG_AARCH64 */
37 /* Helper functions to determine which operand to be used to encode/decode
38 the size:Q fields for AdvSIMD instructions. */
40 static inline bfd_boolean
41 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
43 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
44 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
48 static inline bfd_boolean
49 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
51 return ((qualifier
>= AARCH64_OPND_QLF_S_B
52 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
62 DP_VECTOR_ACROSS_LANES
,
65 static const char significant_operand_index
[] =
67 0, /* DP_UNKNOWN, by default using operand 0. */
68 0, /* DP_VECTOR_3SAME */
69 1, /* DP_VECTOR_LONG */
70 2, /* DP_VECTOR_WIDE */
71 1, /* DP_VECTOR_ACROSS_LANES */
74 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
76 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77 corresponds to one of a sequence of operands. */
79 static enum data_pattern
80 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
82 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
84 /* e.g. v.4s, v.4s, v.4s
85 or v.4h, v.4h, v.h[3]. */
86 if (qualifiers
[0] == qualifiers
[1]
87 && vector_qualifier_p (qualifiers
[2]) == TRUE
88 && (aarch64_get_qualifier_esize (qualifiers
[0])
89 == aarch64_get_qualifier_esize (qualifiers
[1]))
90 && (aarch64_get_qualifier_esize (qualifiers
[0])
91 == aarch64_get_qualifier_esize (qualifiers
[2])))
92 return DP_VECTOR_3SAME
;
93 /* e.g. v.8h, v.8b, v.8b.
94 or v.4s, v.4h, v.h[2].
96 if (vector_qualifier_p (qualifiers
[1]) == TRUE
97 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
98 && (aarch64_get_qualifier_esize (qualifiers
[0])
99 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
100 return DP_VECTOR_LONG
;
101 /* e.g. v.8h, v.8h, v.8b. */
102 if (qualifiers
[0] == qualifiers
[1]
103 && vector_qualifier_p (qualifiers
[2]) == TRUE
104 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
105 && (aarch64_get_qualifier_esize (qualifiers
[0])
106 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
107 && (aarch64_get_qualifier_esize (qualifiers
[0])
108 == aarch64_get_qualifier_esize (qualifiers
[1])))
109 return DP_VECTOR_WIDE
;
111 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
113 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
114 if (vector_qualifier_p (qualifiers
[1]) == TRUE
115 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
116 return DP_VECTOR_ACROSS_LANES
;
122 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123 the AdvSIMD instructions. */
124 /* N.B. it is possible to do some optimization that doesn't call
125 get_data_pattern each time when we need to select an operand. We can
126 either buffer the caculated the result or statically generate the data,
127 however, it is not obvious that the optimization will bring significant
131 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
134 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
137 const aarch64_field fields
[] =
140 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
141 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
142 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
143 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
144 { 5, 19 }, /* imm19: e.g. in CBZ. */
145 { 5, 19 }, /* immhi: e.g. in ADRP. */
146 { 29, 2 }, /* immlo: e.g. in ADRP. */
147 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
148 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
149 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
150 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
151 { 0, 5 }, /* Rt: in load/store instructions. */
152 { 0, 5 }, /* Rd: in many integer instructions. */
153 { 5, 5 }, /* Rn: in many integer instructions. */
154 { 10, 5 }, /* Rt2: in load/store pair instructions. */
155 { 10, 5 }, /* Ra: in fp instructions. */
156 { 5, 3 }, /* op2: in the system instructions. */
157 { 8, 4 }, /* CRm: in the system instructions. */
158 { 12, 4 }, /* CRn: in the system instructions. */
159 { 16, 3 }, /* op1: in the system instructions. */
160 { 19, 2 }, /* op0: in the system instructions. */
161 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
162 { 12, 4 }, /* cond: condition flags as a source operand. */
163 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
164 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
165 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
166 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
167 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
168 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
169 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
170 { 12, 1 }, /* S: in load/store reg offset instructions. */
171 { 21, 2 }, /* hw: in move wide constant instructions. */
172 { 22, 2 }, /* opc: in load/store reg offset instructions. */
173 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
174 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
175 { 22, 2 }, /* type: floating point type field in fp data inst. */
176 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
177 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
178 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
179 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
180 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
181 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
182 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
183 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
184 { 5, 14 }, /* imm14: in test bit and branch instructions. */
185 { 5, 16 }, /* imm16: in exception instructions. */
186 { 0, 26 }, /* imm26: in unconditional branch instructions. */
187 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
188 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
189 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
190 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
191 { 22, 1 }, /* N: in logical (immediate) instructions. */
192 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
193 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
194 { 31, 1 }, /* sf: in integer data processing instructions. */
195 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
196 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
197 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
198 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
199 { 31, 1 }, /* b5: in the test bit and branch instructions. */
200 { 19, 5 }, /* b40: in the test bit and branch instructions. */
201 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
204 enum aarch64_operand_class
205 aarch64_get_operand_class (enum aarch64_opnd type
)
207 return aarch64_operands
[type
].op_class
;
211 aarch64_get_operand_name (enum aarch64_opnd type
)
213 return aarch64_operands
[type
].name
;
216 /* Get operand description string.
217 This is usually for the diagnosis purpose. */
219 aarch64_get_operand_desc (enum aarch64_opnd type
)
221 return aarch64_operands
[type
].desc
;
224 /* Table of all conditional affixes. */
225 const aarch64_cond aarch64_conds
[16] =
230 {{"cc", "lo", "ul"}, 0x3},
246 get_cond_from_value (aarch64_insn value
)
249 return &aarch64_conds
[(unsigned int) value
];
253 get_inverted_cond (const aarch64_cond
*cond
)
255 return &aarch64_conds
[cond
->value
^ 0x1];
258 /* Table describing the operand extension/shifting operators; indexed by
259 enum aarch64_modifier_kind.
261 The value column provides the most common values for encoding modifiers,
262 which enables table-driven encoding/decoding for the modifiers. */
263 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
282 enum aarch64_modifier_kind
283 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
285 return desc
- aarch64_operand_modifiers
;
289 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
291 return aarch64_operand_modifiers
[kind
].value
;
294 enum aarch64_modifier_kind
295 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
296 bfd_boolean extend_p
)
298 if (extend_p
== TRUE
)
299 return AARCH64_MOD_UXTB
+ value
;
301 return AARCH64_MOD_LSL
- value
;
305 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
307 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
311 static inline bfd_boolean
312 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
314 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
318 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
338 /* Table describing the operands supported by the aliases of the HINT
341 The name column is the operand that is accepted for the alias. The value
342 column is the hint number of the alias. The list of operands is terminated
343 by NULL in the name column. */
345 const struct aarch64_name_value_pair aarch64_hint_options
[] =
347 { "csync", 0x11 }, /* PSB CSYNC. */
351 /* op -> op: load = 0 instruction = 1 store = 2
353 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
354 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
355 const struct aarch64_name_value_pair aarch64_prfops
[32] =
357 { "pldl1keep", B(0, 1, 0) },
358 { "pldl1strm", B(0, 1, 1) },
359 { "pldl2keep", B(0, 2, 0) },
360 { "pldl2strm", B(0, 2, 1) },
361 { "pldl3keep", B(0, 3, 0) },
362 { "pldl3strm", B(0, 3, 1) },
365 { "plil1keep", B(1, 1, 0) },
366 { "plil1strm", B(1, 1, 1) },
367 { "plil2keep", B(1, 2, 0) },
368 { "plil2strm", B(1, 2, 1) },
369 { "plil3keep", B(1, 3, 0) },
370 { "plil3strm", B(1, 3, 1) },
373 { "pstl1keep", B(2, 1, 0) },
374 { "pstl1strm", B(2, 1, 1) },
375 { "pstl2keep", B(2, 2, 0) },
376 { "pstl2strm", B(2, 2, 1) },
377 { "pstl3keep", B(2, 3, 0) },
378 { "pstl3strm", B(2, 3, 1) },
392 /* Utilities on value constraint. */
395 value_in_range_p (int64_t value
, int low
, int high
)
397 return (value
>= low
&& value
<= high
) ? 1 : 0;
401 value_aligned_p (int64_t value
, int align
)
403 return ((value
& (align
- 1)) == 0) ? 1 : 0;
406 /* A signed value fits in a field. */
408 value_fit_signed_field_p (int64_t value
, unsigned width
)
411 if (width
< sizeof (value
) * 8)
413 int64_t lim
= (int64_t)1 << (width
- 1);
414 if (value
>= -lim
&& value
< lim
)
420 /* An unsigned value fits in a field. */
422 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
425 if (width
< sizeof (value
) * 8)
427 int64_t lim
= (int64_t)1 << width
;
428 if (value
>= 0 && value
< lim
)
434 /* Return 1 if OPERAND is SP or WSP. */
436 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
438 return ((aarch64_get_operand_class (operand
->type
)
439 == AARCH64_OPND_CLASS_INT_REG
)
440 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
441 && operand
->reg
.regno
== 31);
444 /* Return 1 if OPERAND is XZR or WZP. */
446 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
448 return ((aarch64_get_operand_class (operand
->type
)
449 == AARCH64_OPND_CLASS_INT_REG
)
450 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
451 && operand
->reg
.regno
== 31);
454 /* Return true if the operand *OPERAND that has the operand code
455 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
456 qualified by the qualifier TARGET. */
459 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
460 aarch64_opnd_qualifier_t target
)
462 switch (operand
->qualifier
)
464 case AARCH64_OPND_QLF_W
:
465 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
468 case AARCH64_OPND_QLF_X
:
469 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
472 case AARCH64_OPND_QLF_WSP
:
473 if (target
== AARCH64_OPND_QLF_W
474 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
477 case AARCH64_OPND_QLF_SP
:
478 if (target
== AARCH64_OPND_QLF_X
479 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
489 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
490 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
492 Return NIL if more than one expected qualifiers are found. */
494 aarch64_opnd_qualifier_t
495 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
497 const aarch64_opnd_qualifier_t known_qlf
,
504 When the known qualifier is NIL, we have to assume that there is only
505 one qualifier sequence in the *QSEQ_LIST and return the corresponding
506 qualifier directly. One scenario is that for instruction
507 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
508 which has only one possible valid qualifier sequence
510 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
511 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
513 Because the qualifier NIL has dual roles in the qualifier sequence:
514 it can mean no qualifier for the operand, or the qualifer sequence is
515 not in use (when all qualifiers in the sequence are NILs), we have to
516 handle this special case here. */
517 if (known_qlf
== AARCH64_OPND_NIL
)
519 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
520 return qseq_list
[0][idx
];
523 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
525 if (qseq_list
[i
][known_idx
] == known_qlf
)
528 /* More than one sequences are found to have KNOWN_QLF at
530 return AARCH64_OPND_NIL
;
535 return qseq_list
[saved_i
][idx
];
538 enum operand_qualifier_kind
546 /* Operand qualifier description. */
547 struct operand_qualifier_data
549 /* The usage of the three data fields depends on the qualifier kind. */
556 enum operand_qualifier_kind kind
;
559 /* Indexed by the operand qualifier enumerators. */
560 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
562 {0, 0, 0, "NIL", OQK_NIL
},
564 /* Operand variant qualifiers.
566 element size, number of elements and common value for encoding. */
568 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
569 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
570 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
571 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
573 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
574 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
575 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
576 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
577 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
579 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
580 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
581 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
582 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
583 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
584 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
585 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
586 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
587 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
588 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
590 /* Qualifiers constraining the value range.
592 Lower bound, higher bound, unused. */
594 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
595 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
596 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
597 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
598 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
599 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
601 /* Qualifiers for miscellaneous purpose.
603 unused, unused and unused. */
608 {0, 0, 0, "retrieving", 0},
611 static inline bfd_boolean
612 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
614 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
618 static inline bfd_boolean
619 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
621 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
626 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
628 return aarch64_opnd_qualifiers
[qualifier
].desc
;
631 /* Given an operand qualifier, return the expected data element size
632 of a qualified operand. */
634 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
636 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
637 return aarch64_opnd_qualifiers
[qualifier
].data0
;
641 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
643 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
644 return aarch64_opnd_qualifiers
[qualifier
].data1
;
648 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
650 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
651 return aarch64_opnd_qualifiers
[qualifier
].data2
;
655 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
657 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
658 return aarch64_opnd_qualifiers
[qualifier
].data0
;
662 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
664 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
665 return aarch64_opnd_qualifiers
[qualifier
].data1
;
670 aarch64_verbose (const char *str
, ...)
681 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
685 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
686 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
691 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
692 const aarch64_opnd_qualifier_t
*qualifier
)
695 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
697 aarch64_verbose ("dump_match_qualifiers:");
698 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
699 curr
[i
] = opnd
[i
].qualifier
;
700 dump_qualifier_sequence (curr
);
701 aarch64_verbose ("against");
702 dump_qualifier_sequence (qualifier
);
704 #endif /* DEBUG_AARCH64 */
706 /* TODO improve this, we can have an extra field at the runtime to
707 store the number of operands rather than calculating it every time. */
710 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
713 const enum aarch64_opnd
*opnds
= opcode
->operands
;
714 while (opnds
[i
++] != AARCH64_OPND_NIL
)
717 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
721 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
722 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
724 N.B. on the entry, it is very likely that only some operands in *INST
725 have had their qualifiers been established.
727 If STOP_AT is not -1, the function will only try to match
728 the qualifier sequence for operands before and including the operand
729 of index STOP_AT; and on success *RET will only be filled with the first
730 (STOP_AT+1) qualifiers.
732 A couple examples of the matching algorithm:
740 Apart from serving the main encoding routine, this can also be called
741 during or after the operand decoding. */
744 aarch64_find_best_match (const aarch64_inst
*inst
,
745 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
746 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
750 const aarch64_opnd_qualifier_t
*qualifiers
;
752 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
755 DEBUG_TRACE ("SUCCEED: no operand");
759 if (stop_at
< 0 || stop_at
>= num_opnds
)
760 stop_at
= num_opnds
- 1;
762 /* For each pattern. */
763 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
766 qualifiers
= *qualifiers_list
;
768 /* Start as positive. */
771 DEBUG_TRACE ("%d", i
);
774 dump_match_qualifiers (inst
->operands
, qualifiers
);
777 /* Most opcodes has much fewer patterns in the list.
778 First NIL qualifier indicates the end in the list. */
779 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
781 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
787 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
789 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
791 /* Either the operand does not have qualifier, or the qualifier
792 for the operand needs to be deduced from the qualifier
794 In the latter case, any constraint checking related with
795 the obtained qualifier should be done later in
796 operand_general_constraint_met_p. */
799 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
801 /* Unless the target qualifier can also qualify the operand
802 (which has already had a non-nil qualifier), non-equal
803 qualifiers are generally un-matched. */
804 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
813 continue; /* Equal qualifiers are certainly matched. */
816 /* Qualifiers established. */
823 /* Fill the result in *RET. */
825 qualifiers
= *qualifiers_list
;
827 DEBUG_TRACE ("complete qualifiers using list %d", i
);
830 dump_qualifier_sequence (qualifiers
);
833 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
834 ret
[j
] = *qualifiers
;
835 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
836 ret
[j
] = AARCH64_OPND_QLF_NIL
;
838 DEBUG_TRACE ("SUCCESS");
842 DEBUG_TRACE ("FAIL");
846 /* Operand qualifier matching and resolving.
848 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
849 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
851 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
855 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
858 aarch64_opnd_qualifier_seq_t qualifiers
;
860 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
863 DEBUG_TRACE ("matching FAIL");
867 /* Update the qualifiers. */
868 if (update_p
== TRUE
)
869 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
871 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
873 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
874 "update %s with %s for operand %d",
875 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
876 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
877 inst
->operands
[i
].qualifier
= qualifiers
[i
];
880 DEBUG_TRACE ("matching SUCCESS");
884 /* Return TRUE if VALUE is a wide constant that can be moved into a general
887 IS32 indicates whether value is a 32-bit immediate or not.
888 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
889 amount will be returned in *SHIFT_AMOUNT. */
892 aarch64_wide_constant_p (int64_t value
, int is32
, unsigned int *shift_amount
)
896 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
900 /* Allow all zeros or all ones in top 32-bits, so that
901 32-bit constant expressions like ~0x80000000 are
903 uint64_t ext
= value
;
904 if (ext
>> 32 != 0 && ext
>> 32 != (uint64_t) 0xffffffff)
905 /* Immediate out of range. */
907 value
&= (int64_t) 0xffffffff;
910 /* first, try movz then movn */
912 if ((value
& ((int64_t) 0xffff << 0)) == value
)
914 else if ((value
& ((int64_t) 0xffff << 16)) == value
)
916 else if (!is32
&& (value
& ((int64_t) 0xffff << 32)) == value
)
918 else if (!is32
&& (value
& ((int64_t) 0xffff << 48)) == value
)
923 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
927 if (shift_amount
!= NULL
)
928 *shift_amount
= amount
;
930 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
935 /* Build the accepted values for immediate logical SIMD instructions.
937 The standard encodings of the immediate value are:
938 N imms immr SIMD size R S
939 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
940 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
941 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
942 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
943 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
944 0 11110s 00000r 2 UInt(r) UInt(s)
945 where all-ones value of S is reserved.
947 Let's call E the SIMD size.
949 The immediate value is: S+1 bits '1' rotated to the right by R.
951 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
952 (remember S != E - 1). */
954 #define TOTAL_IMM_NB 5334
959 aarch64_insn encoding
;
962 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
965 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
967 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
968 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
970 if (imm1
->imm
< imm2
->imm
)
972 if (imm1
->imm
> imm2
->imm
)
977 /* immediate bitfield standard encoding
978 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
979 1 ssssss rrrrrr 64 rrrrrr ssssss
980 0 0sssss 0rrrrr 32 rrrrr sssss
981 0 10ssss 00rrrr 16 rrrr ssss
982 0 110sss 000rrr 8 rrr sss
983 0 1110ss 0000rr 4 rr ss
984 0 11110s 00000r 2 r s */
986 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
988 return (is64
<< 12) | (r
<< 6) | s
;
992 build_immediate_table (void)
994 uint32_t log_e
, e
, s
, r
, s_mask
;
1000 for (log_e
= 1; log_e
<= 6; log_e
++)
1002 /* Get element size. */
1007 mask
= 0xffffffffffffffffull
;
1013 mask
= (1ull << e
) - 1;
1015 1 ((1 << 4) - 1) << 2 = 111100
1016 2 ((1 << 3) - 1) << 3 = 111000
1017 3 ((1 << 2) - 1) << 4 = 110000
1018 4 ((1 << 1) - 1) << 5 = 100000
1019 5 ((1 << 0) - 1) << 6 = 000000 */
1020 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1022 for (s
= 0; s
< e
- 1; s
++)
1023 for (r
= 0; r
< e
; r
++)
1025 /* s+1 consecutive bits to 1 (s < 63) */
1026 imm
= (1ull << (s
+ 1)) - 1;
1027 /* rotate right by r */
1029 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1030 /* replicate the constant depending on SIMD size */
1033 case 1: imm
= (imm
<< 2) | imm
;
1034 case 2: imm
= (imm
<< 4) | imm
;
1035 case 3: imm
= (imm
<< 8) | imm
;
1036 case 4: imm
= (imm
<< 16) | imm
;
1037 case 5: imm
= (imm
<< 32) | imm
;
1041 simd_immediates
[nb_imms
].imm
= imm
;
1042 simd_immediates
[nb_imms
].encoding
=
1043 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1047 assert (nb_imms
== TOTAL_IMM_NB
);
1048 qsort(simd_immediates
, nb_imms
,
1049 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1052 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1053 be accepted by logical (immediate) instructions
1054 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1056 IS32 indicates whether or not VALUE is a 32-bit immediate.
1057 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1058 VALUE will be returned in *ENCODING. */
1061 aarch64_logical_immediate_p (uint64_t value
, int is32
, aarch64_insn
*encoding
)
1063 simd_imm_encoding imm_enc
;
1064 const simd_imm_encoding
*imm_encoding
;
1065 static bfd_boolean initialized
= FALSE
;
1067 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), is32: %d", value
,
1070 if (initialized
== FALSE
)
1072 build_immediate_table ();
1078 /* Allow all zeros or all ones in top 32-bits, so that
1079 constant expressions like ~1 are permitted. */
1080 if (value
>> 32 != 0 && value
>> 32 != 0xffffffff)
1083 /* Replicate the 32 lower bits to the 32 upper bits. */
1084 value
&= 0xffffffff;
1085 value
|= value
<< 32;
1088 imm_enc
.imm
= value
;
1089 imm_encoding
= (const simd_imm_encoding
*)
1090 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1091 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1092 if (imm_encoding
== NULL
)
1094 DEBUG_TRACE ("exit with FALSE");
1097 if (encoding
!= NULL
)
1098 *encoding
= imm_encoding
->encoding
;
1099 DEBUG_TRACE ("exit with TRUE");
1103 /* If 64-bit immediate IMM is in the format of
1104 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1105 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1106 of value "abcdefgh". Otherwise return -1. */
1108 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1114 for (i
= 0; i
< 8; i
++)
1116 byte
= (imm
>> (8 * i
)) & 0xff;
1119 else if (byte
!= 0x00)
1125 /* Utility inline functions for operand_general_constraint_met_p. */
1128 set_error (aarch64_operand_error
*mismatch_detail
,
1129 enum aarch64_operand_error_kind kind
, int idx
,
1132 if (mismatch_detail
== NULL
)
1134 mismatch_detail
->kind
= kind
;
1135 mismatch_detail
->index
= idx
;
1136 mismatch_detail
->error
= error
;
1140 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1143 if (mismatch_detail
== NULL
)
1145 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1149 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1150 int idx
, int lower_bound
, int upper_bound
,
1153 if (mismatch_detail
== NULL
)
1155 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1156 mismatch_detail
->data
[0] = lower_bound
;
1157 mismatch_detail
->data
[1] = upper_bound
;
1161 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1162 int idx
, int lower_bound
, int upper_bound
)
1164 if (mismatch_detail
== NULL
)
1166 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1167 _("immediate value"));
1171 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1172 int idx
, int lower_bound
, int upper_bound
)
1174 if (mismatch_detail
== NULL
)
1176 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1177 _("immediate offset"));
1181 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1182 int idx
, int lower_bound
, int upper_bound
)
1184 if (mismatch_detail
== NULL
)
1186 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1187 _("register number"));
1191 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1192 int idx
, int lower_bound
, int upper_bound
)
1194 if (mismatch_detail
== NULL
)
1196 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1197 _("register element index"));
1201 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1202 int idx
, int lower_bound
, int upper_bound
)
1204 if (mismatch_detail
== NULL
)
1206 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1211 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1214 if (mismatch_detail
== NULL
)
1216 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1217 mismatch_detail
->data
[0] = alignment
;
1221 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1224 if (mismatch_detail
== NULL
)
1226 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1227 mismatch_detail
->data
[0] = expected_num
;
1231 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1234 if (mismatch_detail
== NULL
)
1236 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1239 /* General constraint checking based on operand code.
1241 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1242 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1244 This function has to be called after the qualifiers for all operands
1247 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1248 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1249 of error message during the disassembling where error message is not
1250 wanted. We avoid the dynamic construction of strings of error messages
1251 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1252 use a combination of error code, static string and some integer data to
1253 represent an error. */
1256 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1257 enum aarch64_opnd type
,
1258 const aarch64_opcode
*opcode
,
1259 aarch64_operand_error
*mismatch_detail
)
1264 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1265 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1267 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1269 switch (aarch64_operands
[type
].op_class
)
1271 case AARCH64_OPND_CLASS_INT_REG
:
1272 /* Check pair reg constraints for cas* instructions. */
1273 if (type
== AARCH64_OPND_PAIRREG
)
1275 assert (idx
== 1 || idx
== 3);
1276 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1278 set_syntax_error (mismatch_detail
, idx
- 1,
1279 _("reg pair must start from even reg"));
1282 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1284 set_syntax_error (mismatch_detail
, idx
,
1285 _("reg pair must be contiguous"));
1291 /* <Xt> may be optional in some IC and TLBI instructions. */
1292 if (type
== AARCH64_OPND_Rt_SYS
)
1294 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1295 == AARCH64_OPND_CLASS_SYSTEM
));
1296 if (opnds
[1].present
1297 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1299 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1302 if (!opnds
[1].present
1303 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1305 set_other_error (mismatch_detail
, idx
, _("missing register"));
1311 case AARCH64_OPND_QLF_WSP
:
1312 case AARCH64_OPND_QLF_SP
:
1313 if (!aarch64_stack_pointer_p (opnd
))
1315 set_other_error (mismatch_detail
, idx
,
1316 _("stack pointer register expected"));
1325 case AARCH64_OPND_CLASS_COND
:
1326 if (type
== AARCH64_OPND_COND1
1327 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1329 /* Not allow AL or NV. */
1330 set_syntax_error (mismatch_detail
, idx
, NULL
);
1334 case AARCH64_OPND_CLASS_ADDRESS
:
1335 /* Check writeback. */
1336 switch (opcode
->iclass
)
1340 case ldstnapair_offs
:
1343 if (opnd
->addr
.writeback
== 1)
1345 set_syntax_error (mismatch_detail
, idx
,
1346 _("unexpected address writeback"));
1351 case ldstpair_indexed
:
1354 if (opnd
->addr
.writeback
== 0)
1356 set_syntax_error (mismatch_detail
, idx
,
1357 _("address writeback expected"));
1362 assert (opnd
->addr
.writeback
== 0);
1367 case AARCH64_OPND_ADDR_SIMM7
:
1368 /* Scaled signed 7 bits immediate offset. */
1369 /* Get the size of the data element that is accessed, which may be
1370 different from that of the source register size,
1371 e.g. in strb/ldrb. */
1372 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1373 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1375 set_offset_out_of_range_error (mismatch_detail
, idx
,
1376 -64 * size
, 63 * size
);
1379 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1381 set_unaligned_error (mismatch_detail
, idx
, size
);
1385 case AARCH64_OPND_ADDR_SIMM9
:
1386 /* Unscaled signed 9 bits immediate offset. */
1387 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1389 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1394 case AARCH64_OPND_ADDR_SIMM9_2
:
1395 /* Unscaled signed 9 bits immediate offset, which has to be negative
1397 size
= aarch64_get_qualifier_esize (qualifier
);
1398 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1399 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1400 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1402 set_other_error (mismatch_detail
, idx
,
1403 _("negative or unaligned offset expected"));
1406 case AARCH64_OPND_SIMD_ADDR_POST
:
1407 /* AdvSIMD load/store multiple structures, post-index. */
1409 if (opnd
->addr
.offset
.is_reg
)
1411 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1415 set_other_error (mismatch_detail
, idx
,
1416 _("invalid register offset"));
1422 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1423 unsigned num_bytes
; /* total number of bytes transferred. */
1424 /* The opcode dependent area stores the number of elements in
1425 each structure to be loaded/stored. */
1426 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1427 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1428 /* Special handling of loading single structure to all lane. */
1429 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1430 * aarch64_get_qualifier_esize (prev
->qualifier
);
1432 num_bytes
= prev
->reglist
.num_regs
1433 * aarch64_get_qualifier_esize (prev
->qualifier
)
1434 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1435 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1437 set_other_error (mismatch_detail
, idx
,
1438 _("invalid post-increment amount"));
1444 case AARCH64_OPND_ADDR_REGOFF
:
1445 /* Get the size of the data element that is accessed, which may be
1446 different from that of the source register size,
1447 e.g. in strb/ldrb. */
1448 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1449 /* It is either no shift or shift by the binary logarithm of SIZE. */
1450 if (opnd
->shifter
.amount
!= 0
1451 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1453 set_other_error (mismatch_detail
, idx
,
1454 _("invalid shift amount"));
1457 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1459 switch (opnd
->shifter
.kind
)
1461 case AARCH64_MOD_UXTW
:
1462 case AARCH64_MOD_LSL
:
1463 case AARCH64_MOD_SXTW
:
1464 case AARCH64_MOD_SXTX
: break;
1466 set_other_error (mismatch_detail
, idx
,
1467 _("invalid extend/shift operator"));
1472 case AARCH64_OPND_ADDR_UIMM12
:
1473 imm
= opnd
->addr
.offset
.imm
;
1474 /* Get the size of the data element that is accessed, which may be
1475 different from that of the source register size,
1476 e.g. in strb/ldrb. */
1477 size
= aarch64_get_qualifier_esize (qualifier
);
1478 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1480 set_offset_out_of_range_error (mismatch_detail
, idx
,
1484 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1486 set_unaligned_error (mismatch_detail
, idx
, size
);
1491 case AARCH64_OPND_ADDR_PCREL14
:
1492 case AARCH64_OPND_ADDR_PCREL19
:
1493 case AARCH64_OPND_ADDR_PCREL21
:
1494 case AARCH64_OPND_ADDR_PCREL26
:
1495 imm
= opnd
->imm
.value
;
1496 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1498 /* The offset value in a PC-relative branch instruction is alway
1499 4-byte aligned and is encoded without the lowest 2 bits. */
1500 if (!value_aligned_p (imm
, 4))
1502 set_unaligned_error (mismatch_detail
, idx
, 4);
1505 /* Right shift by 2 so that we can carry out the following check
1509 size
= get_operand_fields_width (get_operand_from_code (type
));
1510 if (!value_fit_signed_field_p (imm
, size
))
1512 set_other_error (mismatch_detail
, idx
,
1513 _("immediate out of range"));
1523 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
1524 /* The opcode dependent area stores the number of elements in
1525 each structure to be loaded/stored. */
1526 num
= get_opcode_dependent_value (opcode
);
1529 case AARCH64_OPND_LVt
:
1530 assert (num
>= 1 && num
<= 4);
1531 /* Unless LD1/ST1, the number of registers should be equal to that
1532 of the structure elements. */
1533 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
1535 set_reg_list_error (mismatch_detail
, idx
, num
);
1539 case AARCH64_OPND_LVt_AL
:
1540 case AARCH64_OPND_LEt
:
1541 assert (num
>= 1 && num
<= 4);
1542 /* The number of registers should be equal to that of the structure
1544 if (opnd
->reglist
.num_regs
!= num
)
1546 set_reg_list_error (mismatch_detail
, idx
, num
);
1555 case AARCH64_OPND_CLASS_IMMEDIATE
:
1556 /* Constraint check on immediate operand. */
1557 imm
= opnd
->imm
.value
;
1558 /* E.g. imm_0_31 constrains value to be 0..31. */
1559 if (qualifier_value_in_range_constraint_p (qualifier
)
1560 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
1561 get_upper_bound (qualifier
)))
1563 set_imm_out_of_range_error (mismatch_detail
, idx
,
1564 get_lower_bound (qualifier
),
1565 get_upper_bound (qualifier
));
1571 case AARCH64_OPND_AIMM
:
1572 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1574 set_other_error (mismatch_detail
, idx
,
1575 _("invalid shift operator"));
1578 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
1580 set_other_error (mismatch_detail
, idx
,
1581 _("shift amount expected to be 0 or 12"));
1584 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
1586 set_other_error (mismatch_detail
, idx
,
1587 _("immediate out of range"));
1592 case AARCH64_OPND_HALF
:
1593 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
1594 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1596 set_other_error (mismatch_detail
, idx
,
1597 _("invalid shift operator"));
1600 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1601 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
1603 set_other_error (mismatch_detail
, idx
,
1604 _("shift amount should be a multiple of 16"));
1607 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
1609 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
1613 if (opnd
->imm
.value
< 0)
1615 set_other_error (mismatch_detail
, idx
,
1616 _("negative immediate value not allowed"));
1619 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
1621 set_other_error (mismatch_detail
, idx
,
1622 _("immediate out of range"));
1627 case AARCH64_OPND_IMM_MOV
:
1629 int is32
= aarch64_get_qualifier_esize (opnds
[0].qualifier
) == 4;
1630 imm
= opnd
->imm
.value
;
1634 case OP_MOV_IMM_WIDEN
:
1636 /* Fall through... */
1637 case OP_MOV_IMM_WIDE
:
1638 if (!aarch64_wide_constant_p (imm
, is32
, NULL
))
1640 set_other_error (mismatch_detail
, idx
,
1641 _("immediate out of range"));
1645 case OP_MOV_IMM_LOG
:
1646 if (!aarch64_logical_immediate_p (imm
, is32
, NULL
))
1648 set_other_error (mismatch_detail
, idx
,
1649 _("immediate out of range"));
1660 case AARCH64_OPND_NZCV
:
1661 case AARCH64_OPND_CCMP_IMM
:
1662 case AARCH64_OPND_EXCEPTION
:
1663 case AARCH64_OPND_UIMM4
:
1664 case AARCH64_OPND_UIMM7
:
1665 case AARCH64_OPND_UIMM3_OP1
:
1666 case AARCH64_OPND_UIMM3_OP2
:
1667 size
= get_operand_fields_width (get_operand_from_code (type
));
1669 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
1671 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1677 case AARCH64_OPND_WIDTH
:
1678 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
1679 && opnds
[0].type
== AARCH64_OPND_Rd
);
1680 size
= get_upper_bound (qualifier
);
1681 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
1682 /* lsb+width <= reg.size */
1684 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
1685 size
- opnds
[idx
-1].imm
.value
);
1690 case AARCH64_OPND_LIMM
:
1692 int is32
= opnds
[0].qualifier
== AARCH64_OPND_QLF_W
;
1693 uint64_t uimm
= opnd
->imm
.value
;
1694 if (opcode
->op
== OP_BIC
)
1696 if (aarch64_logical_immediate_p (uimm
, is32
, NULL
) == FALSE
)
1698 set_other_error (mismatch_detail
, idx
,
1699 _("immediate out of range"));
1705 case AARCH64_OPND_IMM0
:
1706 case AARCH64_OPND_FPIMM0
:
1707 if (opnd
->imm
.value
!= 0)
1709 set_other_error (mismatch_detail
, idx
,
1710 _("immediate zero expected"));
1715 case AARCH64_OPND_SHLL_IMM
:
1717 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
1718 if (opnd
->imm
.value
!= size
)
1720 set_other_error (mismatch_detail
, idx
,
1721 _("invalid shift amount"));
1726 case AARCH64_OPND_IMM_VLSL
:
1727 size
= aarch64_get_qualifier_esize (qualifier
);
1728 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
1730 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
1736 case AARCH64_OPND_IMM_VLSR
:
1737 size
= aarch64_get_qualifier_esize (qualifier
);
1738 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
1740 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
1745 case AARCH64_OPND_SIMD_IMM
:
1746 case AARCH64_OPND_SIMD_IMM_SFT
:
1747 /* Qualifier check. */
1750 case AARCH64_OPND_QLF_LSL
:
1751 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1753 set_other_error (mismatch_detail
, idx
,
1754 _("invalid shift operator"));
1758 case AARCH64_OPND_QLF_MSL
:
1759 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
1761 set_other_error (mismatch_detail
, idx
,
1762 _("invalid shift operator"));
1766 case AARCH64_OPND_QLF_NIL
:
1767 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1769 set_other_error (mismatch_detail
, idx
,
1770 _("shift is not permitted"));
1778 /* Is the immediate valid? */
1780 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
1782 /* uimm8 or simm8 */
1783 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
1785 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
1789 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
1792 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1793 ffffffffgggggggghhhhhhhh'. */
1794 set_other_error (mismatch_detail
, idx
,
1795 _("invalid value for immediate"));
1798 /* Is the shift amount valid? */
1799 switch (opnd
->shifter
.kind
)
1801 case AARCH64_MOD_LSL
:
1802 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
1803 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
1805 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
1809 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
1811 set_unaligned_error (mismatch_detail
, idx
, 8);
1815 case AARCH64_MOD_MSL
:
1816 /* Only 8 and 16 are valid shift amount. */
1817 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
1819 set_other_error (mismatch_detail
, idx
,
1820 _("shift amount expected to be 0 or 16"));
1825 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1827 set_other_error (mismatch_detail
, idx
,
1828 _("invalid shift operator"));
1835 case AARCH64_OPND_FPIMM
:
1836 case AARCH64_OPND_SIMD_FPIMM
:
1837 if (opnd
->imm
.is_fp
== 0)
1839 set_other_error (mismatch_detail
, idx
,
1840 _("floating-point immediate expected"));
1843 /* The value is expected to be an 8-bit floating-point constant with
1844 sign, 3-bit exponent and normalized 4 bits of precision, encoded
1845 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1847 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
1849 set_other_error (mismatch_detail
, idx
,
1850 _("immediate out of range"));
1853 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
1855 set_other_error (mismatch_detail
, idx
,
1856 _("invalid shift operator"));
1866 case AARCH64_OPND_CLASS_CP_REG
:
1867 /* Cn or Cm: 4-bit opcode field named for historical reasons.
1868 valid range: C0 - C15. */
1869 if (opnd
->reg
.regno
> 15)
1871 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1876 case AARCH64_OPND_CLASS_SYSTEM
:
1879 case AARCH64_OPND_PSTATEFIELD
:
1880 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
1882 The immediate must be #0 or #1. */
1883 if (opnd
->pstatefield
== 0x04 /* PAN. */
1884 && opnds
[1].imm
.value
> 1)
1886 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1889 /* MSR SPSel, #uimm4
1890 Uses uimm4 as a control value to select the stack pointer: if
1891 bit 0 is set it selects the current exception level's stack
1892 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1893 Bits 1 to 3 of uimm4 are reserved and should be zero. */
1894 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
1896 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
1905 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
1906 /* Get the upper bound for the element index. */
1907 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
1908 /* Index out-of-range. */
1909 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
1911 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
1914 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1915 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
1916 number is encoded in "size:M:Rm":
1922 if (type
== AARCH64_OPND_Em
&& qualifier
== AARCH64_OPND_QLF_S_H
1923 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
1925 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
1930 case AARCH64_OPND_CLASS_MODIFIED_REG
:
1931 assert (idx
== 1 || idx
== 2);
1934 case AARCH64_OPND_Rm_EXT
:
1935 if (aarch64_extend_operator_p (opnd
->shifter
.kind
) == FALSE
1936 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
1938 set_other_error (mismatch_detail
, idx
,
1939 _("extend operator expected"));
1942 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1943 (i.e. SP), in which case it defaults to LSL. The LSL alias is
1944 only valid when "Rd" or "Rn" is '11111', and is preferred in that
1946 if (!aarch64_stack_pointer_p (opnds
+ 0)
1947 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
1949 if (!opnd
->shifter
.operator_present
)
1951 set_other_error (mismatch_detail
, idx
,
1952 _("missing extend operator"));
1955 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
1957 set_other_error (mismatch_detail
, idx
,
1958 _("'LSL' operator not allowed"));
1962 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
1963 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
1964 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
1966 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
1969 /* In the 64-bit form, the final register operand is written as Wm
1970 for all but the (possibly omitted) UXTX/LSL and SXTX
1972 N.B. GAS allows X register to be used with any operator as a
1973 programming convenience. */
1974 if (qualifier
== AARCH64_OPND_QLF_X
1975 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
1976 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
1977 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
1979 set_other_error (mismatch_detail
, idx
, _("W register expected"));
1984 case AARCH64_OPND_Rm_SFT
:
1985 /* ROR is not available to the shifted register operand in
1986 arithmetic instructions. */
1987 if (aarch64_shift_operator_p (opnd
->shifter
.kind
) == FALSE
)
1989 set_other_error (mismatch_detail
, idx
,
1990 _("shift operator expected"));
1993 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
1994 && opcode
->iclass
!= log_shift
)
1996 set_other_error (mismatch_detail
, idx
,
1997 _("'ROR' operator not allowed"));
2000 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2001 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2003 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2020 /* Main entrypoint for the operand constraint checking.
2022 Return 1 if operands of *INST meet the constraint applied by the operand
2023 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2024 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2025 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2026 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2027 error kind when it is notified that an instruction does not pass the check).
2029 Un-determined operand qualifiers may get established during the process. */
2032 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2033 aarch64_operand_error
*mismatch_detail
)
2037 DEBUG_TRACE ("enter");
2039 /* Match operands' qualifier.
2040 *INST has already had qualifier establish for some, if not all, of
2041 its operands; we need to find out whether these established
2042 qualifiers match one of the qualifier sequence in
2043 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2044 with the corresponding qualifier in such a sequence.
2045 Only basic operand constraint checking is done here; the more thorough
2046 constraint checking will carried out by operand_general_constraint_met_p,
2047 which has be to called after this in order to get all of the operands'
2048 qualifiers established. */
2049 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2051 DEBUG_TRACE ("FAIL on operand qualifier matching");
2052 if (mismatch_detail
)
2054 /* Return an error type to indicate that it is the qualifier
2055 matching failure; we don't care about which operand as there
2056 are enough information in the opcode table to reproduce it. */
2057 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2058 mismatch_detail
->index
= -1;
2059 mismatch_detail
->error
= NULL
;
2064 /* Match operands' constraint. */
2065 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2067 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2068 if (type
== AARCH64_OPND_NIL
)
2070 if (inst
->operands
[i
].skip
)
2072 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2075 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2076 inst
->opcode
, mismatch_detail
) == 0)
2078 DEBUG_TRACE ("FAIL on operand %d", i
);
2083 DEBUG_TRACE ("PASS");
2088 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2089 Also updates the TYPE of each INST->OPERANDS with the corresponding
2090 value of OPCODE->OPERANDS.
2092 Note that some operand qualifiers may need to be manually cleared by
2093 the caller before it further calls the aarch64_opcode_encode; by
2094 doing this, it helps the qualifier matching facilities work
2097 const aarch64_opcode
*
2098 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2101 const aarch64_opcode
*old
= inst
->opcode
;
2103 inst
->opcode
= opcode
;
2105 /* Update the operand types. */
2106 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2108 inst
->operands
[i
].type
= opcode
->operands
[i
];
2109 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2113 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2119 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2122 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2123 if (operands
[i
] == operand
)
2125 else if (operands
[i
] == AARCH64_OPND_NIL
)
2130 /* [0][0] 32-bit integer regs with sp Wn
2131 [0][1] 64-bit integer regs with sp Xn sf=1
2132 [1][0] 32-bit integer regs with #0 Wn
2133 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2134 static const char *int_reg
[2][2][32] = {
2137 { { R32
"0", R32
"1", R32
"2", R32
"3", R32
"4", R32
"5", R32
"6", R32
"7",
2138 R32
"8", R32
"9", R32
"10", R32
"11", R32
"12", R32
"13", R32
"14", R32
"15",
2139 R32
"16", R32
"17", R32
"18", R32
"19", R32
"20", R32
"21", R32
"22", R32
"23",
2140 R32
"24", R32
"25", R32
"26", R32
"27", R32
"28", R32
"29", R32
"30", "wsp" },
2141 { R64
"0", R64
"1", R64
"2", R64
"3", R64
"4", R64
"5", R64
"6", R64
"7",
2142 R64
"8", R64
"9", R64
"10", R64
"11", R64
"12", R64
"13", R64
"14", R64
"15",
2143 R64
"16", R64
"17", R64
"18", R64
"19", R64
"20", R64
"21", R64
"22", R64
"23",
2144 R64
"24", R64
"25", R64
"26", R64
"27", R64
"28", R64
"29", R64
"30", "sp" } },
2145 { { R32
"0", R32
"1", R32
"2", R32
"3", R32
"4", R32
"5", R32
"6", R32
"7",
2146 R32
"8", R32
"9", R32
"10", R32
"11", R32
"12", R32
"13", R32
"14", R32
"15",
2147 R32
"16", R32
"17", R32
"18", R32
"19", R32
"20", R32
"21", R32
"22", R32
"23",
2148 R32
"24", R32
"25", R32
"26", R32
"27", R32
"28", R32
"29", R32
"30", R32
"zr" },
2149 { R64
"0", R64
"1", R64
"2", R64
"3", R64
"4", R64
"5", R64
"6", R64
"7",
2150 R64
"8", R64
"9", R64
"10", R64
"11", R64
"12", R64
"13", R64
"14", R64
"15",
2151 R64
"16", R64
"17", R64
"18", R64
"19", R64
"20", R64
"21", R64
"22", R64
"23",
2152 R64
"24", R64
"25", R64
"26", R64
"27", R64
"28", R64
"29", R64
"30", R64
"zr" } }
2157 /* Return the integer register name.
2158 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2160 static inline const char *
2161 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2163 const int has_zr
= sp_reg_p
? 0 : 1;
2164 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2165 return int_reg
[has_zr
][is_64
][regno
];
2168 /* Like get_int_reg_name, but IS_64 is always 1. */
2170 static inline const char *
2171 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2173 const int has_zr
= sp_reg_p
? 0 : 1;
2174 return int_reg
[has_zr
][1][regno
];
2177 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2197 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2198 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2199 (depending on the type of the instruction). IMM8 will be expanded to a
2200 single-precision floating-point value (SIZE == 4) or a double-precision
2201 floating-point value (SIZE == 8). A half-precision floating-point value
2202 (SIZE == 2) is expanded to a single-precision floating-point value. The
2203 expanded value is returned. */
2206 expand_fp_imm (int size
, uint32_t imm8
)
2209 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2211 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2212 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2213 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2214 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2215 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2218 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2219 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2220 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2221 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2222 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2225 else if (size
== 4 || size
== 2)
2227 imm
= (imm8_7
<< 31) /* imm8<7> */
2228 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2229 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2230 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2234 /* An unsupported size. */
2241 /* Produce the string representation of the register list operand *OPND
2242 in the buffer pointed by BUF of size SIZE. */
2244 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
)
2246 const int num_regs
= opnd
->reglist
.num_regs
;
2247 const int first_reg
= opnd
->reglist
.first_regno
;
2248 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
2249 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
2250 char tb
[8]; /* Temporary buffer. */
2252 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
2253 assert (num_regs
>= 1 && num_regs
<= 4);
2255 /* Prepare the index if any. */
2256 if (opnd
->reglist
.has_index
)
2257 snprintf (tb
, 8, "[%d]", opnd
->reglist
.index
);
2261 /* The hyphenated form is preferred for disassembly if there are
2262 more than two registers in the list, and the register numbers
2263 are monotonically increasing in increments of one. */
2264 if (num_regs
> 2 && last_reg
> first_reg
)
2265 snprintf (buf
, size
, "{v%d.%s-v%d.%s}%s", first_reg
, qlf_name
,
2266 last_reg
, qlf_name
, tb
);
2269 const int reg0
= first_reg
;
2270 const int reg1
= (first_reg
+ 1) & 0x1f;
2271 const int reg2
= (first_reg
+ 2) & 0x1f;
2272 const int reg3
= (first_reg
+ 3) & 0x1f;
2277 snprintf (buf
, size
, "{v%d.%s}%s", reg0
, qlf_name
, tb
);
2280 snprintf (buf
, size
, "{v%d.%s, v%d.%s}%s", reg0
, qlf_name
,
2281 reg1
, qlf_name
, tb
);
2284 snprintf (buf
, size
, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0
, qlf_name
,
2285 reg1
, qlf_name
, reg2
, qlf_name
, tb
);
2288 snprintf (buf
, size
, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2289 reg0
, qlf_name
, reg1
, qlf_name
, reg2
, qlf_name
,
2290 reg3
, qlf_name
, tb
);
2296 /* Produce the string representation of the register offset address operand
2297 *OPND in the buffer pointed by BUF of size SIZE. */
2299 print_register_offset_address (char *buf
, size_t size
,
2300 const aarch64_opnd_info
*opnd
)
2302 const size_t tblen
= 16;
2303 char tb
[tblen
]; /* Temporary buffer. */
2304 bfd_boolean lsl_p
= FALSE
; /* Is LSL shift operator? */
2305 bfd_boolean wm_p
= FALSE
; /* Should Rm be Wm? */
2306 bfd_boolean print_extend_p
= TRUE
;
2307 bfd_boolean print_amount_p
= TRUE
;
2308 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
2310 switch (opnd
->shifter
.kind
)
2312 case AARCH64_MOD_UXTW
: wm_p
= TRUE
; break;
2313 case AARCH64_MOD_LSL
: lsl_p
= TRUE
; break;
2314 case AARCH64_MOD_SXTW
: wm_p
= TRUE
; break;
2315 case AARCH64_MOD_SXTX
: break;
2316 default: assert (0);
2319 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
2320 || !opnd
->shifter
.amount_present
))
2322 /* Not print the shift/extend amount when the amount is zero and
2323 when it is not the special case of 8-bit load/store instruction. */
2324 print_amount_p
= FALSE
;
2325 /* Likewise, no need to print the shift operator LSL in such a
2328 print_extend_p
= FALSE
;
2331 /* Prepare for the extend/shift. */
2335 snprintf (tb
, tblen
, ",%s #%d", shift_name
, opnd
->shifter
.amount
);
2337 snprintf (tb
, tblen
, ",%s", shift_name
);
2342 snprintf (buf
, size
, "[%s,%s%s]",
2343 get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
2344 get_int_reg_name (opnd
->addr
.offset
.regno
,
2345 wm_p
? AARCH64_OPND_QLF_W
: AARCH64_OPND_QLF_X
,
2350 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2351 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
2352 PC, PCREL_P and ADDRESS are used to pass in and return information about
2353 the PC-relative address calculation, where the PC value is passed in
2354 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2355 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2356 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2358 The function serves both the disassembler and the assembler diagnostics
2359 issuer, which is the reason why it lives in this file. */
2362 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
2363 const aarch64_opcode
*opcode
,
2364 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
2368 const char *name
= NULL
;
2369 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
2370 enum aarch64_modifier_kind kind
;
2379 case AARCH64_OPND_Rd
:
2380 case AARCH64_OPND_Rn
:
2381 case AARCH64_OPND_Rm
:
2382 case AARCH64_OPND_Rt
:
2383 case AARCH64_OPND_Rt2
:
2384 case AARCH64_OPND_Rs
:
2385 case AARCH64_OPND_Ra
:
2386 case AARCH64_OPND_Rt_SYS
:
2387 case AARCH64_OPND_PAIRREG
:
2388 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2389 the <ic_op>, therefore we we use opnd->present to override the
2390 generic optional-ness information. */
2391 if (opnd
->type
== AARCH64_OPND_Rt_SYS
&& !opnd
->present
)
2393 /* Omit the operand, e.g. RET. */
2394 if (optional_operand_p (opcode
, idx
)
2395 && opnd
->reg
.regno
== get_optional_operand_default_value (opcode
))
2397 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2398 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2399 snprintf (buf
, size
, "%s",
2400 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2403 case AARCH64_OPND_Rd_SP
:
2404 case AARCH64_OPND_Rn_SP
:
2405 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2406 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
2407 || opnd
->qualifier
== AARCH64_OPND_QLF_X
2408 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
2409 snprintf (buf
, size
, "%s",
2410 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
2413 case AARCH64_OPND_Rm_EXT
:
2414 kind
= opnd
->shifter
.kind
;
2415 assert (idx
== 1 || idx
== 2);
2416 if ((aarch64_stack_pointer_p (opnds
)
2417 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
2418 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
2419 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
2420 && kind
== AARCH64_MOD_UXTW
)
2421 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
2422 && kind
== AARCH64_MOD_UXTX
)))
2424 /* 'LSL' is the preferred form in this case. */
2425 kind
= AARCH64_MOD_LSL
;
2426 if (opnd
->shifter
.amount
== 0)
2428 /* Shifter omitted. */
2429 snprintf (buf
, size
, "%s",
2430 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2434 if (opnd
->shifter
.amount
)
2435 snprintf (buf
, size
, "%s, %s #%d",
2436 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2437 aarch64_operand_modifiers
[kind
].name
,
2438 opnd
->shifter
.amount
);
2440 snprintf (buf
, size
, "%s, %s",
2441 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2442 aarch64_operand_modifiers
[kind
].name
);
2445 case AARCH64_OPND_Rm_SFT
:
2446 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
2447 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
2448 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2449 snprintf (buf
, size
, "%s",
2450 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
2452 snprintf (buf
, size
, "%s, %s #%d",
2453 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
2454 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2455 opnd
->shifter
.amount
);
2458 case AARCH64_OPND_Fd
:
2459 case AARCH64_OPND_Fn
:
2460 case AARCH64_OPND_Fm
:
2461 case AARCH64_OPND_Fa
:
2462 case AARCH64_OPND_Ft
:
2463 case AARCH64_OPND_Ft2
:
2464 case AARCH64_OPND_Sd
:
2465 case AARCH64_OPND_Sn
:
2466 case AARCH64_OPND_Sm
:
2467 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
2471 case AARCH64_OPND_Vd
:
2472 case AARCH64_OPND_Vn
:
2473 case AARCH64_OPND_Vm
:
2474 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
2475 aarch64_get_qualifier_name (opnd
->qualifier
));
2478 case AARCH64_OPND_Ed
:
2479 case AARCH64_OPND_En
:
2480 case AARCH64_OPND_Em
:
2481 snprintf (buf
, size
, "v%d.%s[%d]", opnd
->reglane
.regno
,
2482 aarch64_get_qualifier_name (opnd
->qualifier
),
2483 opnd
->reglane
.index
);
2486 case AARCH64_OPND_VdD1
:
2487 case AARCH64_OPND_VnD1
:
2488 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
2491 case AARCH64_OPND_LVn
:
2492 case AARCH64_OPND_LVt
:
2493 case AARCH64_OPND_LVt_AL
:
2494 case AARCH64_OPND_LEt
:
2495 print_register_list (buf
, size
, opnd
);
2498 case AARCH64_OPND_Cn
:
2499 case AARCH64_OPND_Cm
:
2500 snprintf (buf
, size
, "C%d", opnd
->reg
.regno
);
2503 case AARCH64_OPND_IDX
:
2504 case AARCH64_OPND_IMM
:
2505 case AARCH64_OPND_WIDTH
:
2506 case AARCH64_OPND_UIMM3_OP1
:
2507 case AARCH64_OPND_UIMM3_OP2
:
2508 case AARCH64_OPND_BIT_NUM
:
2509 case AARCH64_OPND_IMM_VLSL
:
2510 case AARCH64_OPND_IMM_VLSR
:
2511 case AARCH64_OPND_SHLL_IMM
:
2512 case AARCH64_OPND_IMM0
:
2513 case AARCH64_OPND_IMMR
:
2514 case AARCH64_OPND_IMMS
:
2515 case AARCH64_OPND_FBITS
:
2516 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
2519 case AARCH64_OPND_IMM_MOV
:
2520 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2522 case 4: /* e.g. MOV Wd, #<imm32>. */
2524 int imm32
= opnd
->imm
.value
;
2525 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
2528 case 8: /* e.g. MOV Xd, #<imm64>. */
2529 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
2530 opnd
->imm
.value
, opnd
->imm
.value
);
2532 default: assert (0);
2536 case AARCH64_OPND_FPIMM0
:
2537 snprintf (buf
, size
, "#0.0");
2540 case AARCH64_OPND_LIMM
:
2541 case AARCH64_OPND_AIMM
:
2542 case AARCH64_OPND_HALF
:
2543 if (opnd
->shifter
.amount
)
2544 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%d", opnd
->imm
.value
,
2545 opnd
->shifter
.amount
);
2547 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2550 case AARCH64_OPND_SIMD_IMM
:
2551 case AARCH64_OPND_SIMD_IMM_SFT
:
2552 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2553 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
2554 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
2556 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%d", opnd
->imm
.value
,
2557 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
2558 opnd
->shifter
.amount
);
2561 case AARCH64_OPND_FPIMM
:
2562 case AARCH64_OPND_SIMD_FPIMM
:
2563 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
2565 case 2: /* e.g. FMOV <Hd>, #<imm>. */
2568 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
2569 snprintf (buf
, size
, "#%.18e", c
.f
);
2572 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
2575 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
2576 snprintf (buf
, size
, "#%.18e", c
.f
);
2579 case 8: /* e.g. FMOV <Sd>, #<imm>. */
2582 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
2583 snprintf (buf
, size
, "#%.18e", c
.d
);
2586 default: assert (0);
2590 case AARCH64_OPND_CCMP_IMM
:
2591 case AARCH64_OPND_NZCV
:
2592 case AARCH64_OPND_EXCEPTION
:
2593 case AARCH64_OPND_UIMM4
:
2594 case AARCH64_OPND_UIMM7
:
2595 if (optional_operand_p (opcode
, idx
) == TRUE
2596 && (opnd
->imm
.value
==
2597 (int64_t) get_optional_operand_default_value (opcode
)))
2598 /* Omit the operand, e.g. DCPS1. */
2600 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
2603 case AARCH64_OPND_COND
:
2604 case AARCH64_OPND_COND1
:
2605 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
2608 case AARCH64_OPND_ADDR_ADRP
:
2609 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
2615 /* This is not necessary during the disassembling, as print_address_func
2616 in the disassemble_info will take care of the printing. But some
2617 other callers may be still interested in getting the string in *STR,
2618 so here we do snprintf regardless. */
2619 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2622 case AARCH64_OPND_ADDR_PCREL14
:
2623 case AARCH64_OPND_ADDR_PCREL19
:
2624 case AARCH64_OPND_ADDR_PCREL21
:
2625 case AARCH64_OPND_ADDR_PCREL26
:
2626 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
2631 /* This is not necessary during the disassembling, as print_address_func
2632 in the disassemble_info will take care of the printing. But some
2633 other callers may be still interested in getting the string in *STR,
2634 so here we do snprintf regardless. */
2635 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
2638 case AARCH64_OPND_ADDR_SIMPLE
:
2639 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
2640 case AARCH64_OPND_SIMD_ADDR_POST
:
2641 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2642 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
2644 if (opnd
->addr
.offset
.is_reg
)
2645 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
2647 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
2650 snprintf (buf
, size
, "[%s]", name
);
2653 case AARCH64_OPND_ADDR_REGOFF
:
2654 print_register_offset_address (buf
, size
, opnd
);
2657 case AARCH64_OPND_ADDR_SIMM7
:
2658 case AARCH64_OPND_ADDR_SIMM9
:
2659 case AARCH64_OPND_ADDR_SIMM9_2
:
2660 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2661 if (opnd
->addr
.writeback
)
2663 if (opnd
->addr
.preind
)
2664 snprintf (buf
, size
, "[%s,#%d]!", name
, opnd
->addr
.offset
.imm
);
2666 snprintf (buf
, size
, "[%s],#%d", name
, opnd
->addr
.offset
.imm
);
2670 if (opnd
->addr
.offset
.imm
)
2671 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2673 snprintf (buf
, size
, "[%s]", name
);
2677 case AARCH64_OPND_ADDR_UIMM12
:
2678 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
2679 if (opnd
->addr
.offset
.imm
)
2680 snprintf (buf
, size
, "[%s,#%d]", name
, opnd
->addr
.offset
.imm
);
2682 snprintf (buf
, size
, "[%s]", name
);
2685 case AARCH64_OPND_SYSREG
:
2686 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
2687 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
2688 && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs
[i
]))
2690 if (aarch64_sys_regs
[i
].name
)
2691 snprintf (buf
, size
, "%s", aarch64_sys_regs
[i
].name
);
2694 /* Implementation defined system register. */
2695 unsigned int value
= opnd
->sysreg
;
2696 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
2697 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
2702 case AARCH64_OPND_PSTATEFIELD
:
2703 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
2704 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
2706 assert (aarch64_pstatefields
[i
].name
);
2707 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
2710 case AARCH64_OPND_SYSREG_AT
:
2711 case AARCH64_OPND_SYSREG_DC
:
2712 case AARCH64_OPND_SYSREG_IC
:
2713 case AARCH64_OPND_SYSREG_TLBI
:
2714 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
2717 case AARCH64_OPND_BARRIER
:
2718 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
2721 case AARCH64_OPND_BARRIER_ISB
:
2722 /* Operand can be omitted, e.g. in DCPS1. */
2723 if (! optional_operand_p (opcode
, idx
)
2724 || (opnd
->barrier
->value
2725 != get_optional_operand_default_value (opcode
)))
2726 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
2729 case AARCH64_OPND_PRFOP
:
2730 if (opnd
->prfop
->name
!= NULL
)
2731 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
2733 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
2736 case AARCH64_OPND_BARRIER_PSB
:
2737 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
2745 #define CPENC(op0,op1,crn,crm,op2) \
2746 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2747 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2748 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2749 /* for 3.9.10 System Instructions */
2750 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2772 #define F_DEPRECATED 0x1 /* Deprecated system register. */
2777 #define F_ARCHEXT 0x2 /* Architecture dependent system register. */
2782 #define F_HASXT 0x4 /* System instruction register <Xt>
2786 /* TODO there are two more issues need to be resolved
2787 1. handle read-only and write-only system registers
2788 2. handle cpu-implementation-defined system registers. */
2789 const aarch64_sys_reg aarch64_sys_regs
[] =
2791 { "spsr_el1", CPEN_(0,C0
,0), 0 }, /* = spsr_svc */
2792 { "spsr_el12", CPEN_ (5, C0
, 0), F_ARCHEXT
},
2793 { "elr_el1", CPEN_(0,C0
,1), 0 },
2794 { "elr_el12", CPEN_ (5, C0
, 1), F_ARCHEXT
},
2795 { "sp_el0", CPEN_(0,C1
,0), 0 },
2796 { "spsel", CPEN_(0,C2
,0), 0 },
2797 { "daif", CPEN_(3,C2
,1), 0 },
2798 { "currentel", CPEN_(0,C2
,2), 0 }, /* RO */
2799 { "pan", CPEN_(0,C2
,3), F_ARCHEXT
},
2800 { "uao", CPEN_ (0, C2
, 4), F_ARCHEXT
},
2801 { "nzcv", CPEN_(3,C2
,0), 0 },
2802 { "fpcr", CPEN_(3,C4
,0), 0 },
2803 { "fpsr", CPEN_(3,C4
,1), 0 },
2804 { "dspsr_el0", CPEN_(3,C5
,0), 0 },
2805 { "dlr_el0", CPEN_(3,C5
,1), 0 },
2806 { "spsr_el2", CPEN_(4,C0
,0), 0 }, /* = spsr_hyp */
2807 { "elr_el2", CPEN_(4,C0
,1), 0 },
2808 { "sp_el1", CPEN_(4,C1
,0), 0 },
2809 { "spsr_irq", CPEN_(4,C3
,0), 0 },
2810 { "spsr_abt", CPEN_(4,C3
,1), 0 },
2811 { "spsr_und", CPEN_(4,C3
,2), 0 },
2812 { "spsr_fiq", CPEN_(4,C3
,3), 0 },
2813 { "spsr_el3", CPEN_(6,C0
,0), 0 },
2814 { "elr_el3", CPEN_(6,C0
,1), 0 },
2815 { "sp_el2", CPEN_(6,C1
,0), 0 },
2816 { "spsr_svc", CPEN_(0,C0
,0), F_DEPRECATED
}, /* = spsr_el1 */
2817 { "spsr_hyp", CPEN_(4,C0
,0), F_DEPRECATED
}, /* = spsr_el2 */
2818 { "midr_el1", CPENC(3,0,C0
,C0
,0), 0 }, /* RO */
2819 { "ctr_el0", CPENC(3,3,C0
,C0
,1), 0 }, /* RO */
2820 { "mpidr_el1", CPENC(3,0,C0
,C0
,5), 0 }, /* RO */
2821 { "revidr_el1", CPENC(3,0,C0
,C0
,6), 0 }, /* RO */
2822 { "aidr_el1", CPENC(3,1,C0
,C0
,7), 0 }, /* RO */
2823 { "dczid_el0", CPENC(3,3,C0
,C0
,7), 0 }, /* RO */
2824 { "id_dfr0_el1", CPENC(3,0,C0
,C1
,2), 0 }, /* RO */
2825 { "id_pfr0_el1", CPENC(3,0,C0
,C1
,0), 0 }, /* RO */
2826 { "id_pfr1_el1", CPENC(3,0,C0
,C1
,1), 0 }, /* RO */
2827 { "id_afr0_el1", CPENC(3,0,C0
,C1
,3), 0 }, /* RO */
2828 { "id_mmfr0_el1", CPENC(3,0,C0
,C1
,4), 0 }, /* RO */
2829 { "id_mmfr1_el1", CPENC(3,0,C0
,C1
,5), 0 }, /* RO */
2830 { "id_mmfr2_el1", CPENC(3,0,C0
,C1
,6), 0 }, /* RO */
2831 { "id_mmfr3_el1", CPENC(3,0,C0
,C1
,7), 0 }, /* RO */
2832 { "id_mmfr4_el1", CPENC(3,0,C0
,C2
,6), 0 }, /* RO */
2833 { "id_isar0_el1", CPENC(3,0,C0
,C2
,0), 0 }, /* RO */
2834 { "id_isar1_el1", CPENC(3,0,C0
,C2
,1), 0 }, /* RO */
2835 { "id_isar2_el1", CPENC(3,0,C0
,C2
,2), 0 }, /* RO */
2836 { "id_isar3_el1", CPENC(3,0,C0
,C2
,3), 0 }, /* RO */
2837 { "id_isar4_el1", CPENC(3,0,C0
,C2
,4), 0 }, /* RO */
2838 { "id_isar5_el1", CPENC(3,0,C0
,C2
,5), 0 }, /* RO */
2839 { "mvfr0_el1", CPENC(3,0,C0
,C3
,0), 0 }, /* RO */
2840 { "mvfr1_el1", CPENC(3,0,C0
,C3
,1), 0 }, /* RO */
2841 { "mvfr2_el1", CPENC(3,0,C0
,C3
,2), 0 }, /* RO */
2842 { "ccsidr_el1", CPENC(3,1,C0
,C0
,0), 0 }, /* RO */
2843 { "id_aa64pfr0_el1", CPENC(3,0,C0
,C4
,0), 0 }, /* RO */
2844 { "id_aa64pfr1_el1", CPENC(3,0,C0
,C4
,1), 0 }, /* RO */
2845 { "id_aa64dfr0_el1", CPENC(3,0,C0
,C5
,0), 0 }, /* RO */
2846 { "id_aa64dfr1_el1", CPENC(3,0,C0
,C5
,1), 0 }, /* RO */
2847 { "id_aa64isar0_el1", CPENC(3,0,C0
,C6
,0), 0 }, /* RO */
2848 { "id_aa64isar1_el1", CPENC(3,0,C0
,C6
,1), 0 }, /* RO */
2849 { "id_aa64mmfr0_el1", CPENC(3,0,C0
,C7
,0), 0 }, /* RO */
2850 { "id_aa64mmfr1_el1", CPENC(3,0,C0
,C7
,1), 0 }, /* RO */
2851 { "id_aa64mmfr2_el1", CPENC (3, 0, C0
, C7
, 2), F_ARCHEXT
}, /* RO */
2852 { "id_aa64afr0_el1", CPENC(3,0,C0
,C5
,4), 0 }, /* RO */
2853 { "id_aa64afr1_el1", CPENC(3,0,C0
,C5
,5), 0 }, /* RO */
2854 { "clidr_el1", CPENC(3,1,C0
,C0
,1), 0 }, /* RO */
2855 { "csselr_el1", CPENC(3,2,C0
,C0
,0), 0 }, /* RO */
2856 { "vpidr_el2", CPENC(3,4,C0
,C0
,0), 0 },
2857 { "vmpidr_el2", CPENC(3,4,C0
,C0
,5), 0 },
2858 { "sctlr_el1", CPENC(3,0,C1
,C0
,0), 0 },
2859 { "sctlr_el2", CPENC(3,4,C1
,C0
,0), 0 },
2860 { "sctlr_el3", CPENC(3,6,C1
,C0
,0), 0 },
2861 { "sctlr_el12", CPENC (3, 5, C1
, C0
, 0), F_ARCHEXT
},
2862 { "actlr_el1", CPENC(3,0,C1
,C0
,1), 0 },
2863 { "actlr_el2", CPENC(3,4,C1
,C0
,1), 0 },
2864 { "actlr_el3", CPENC(3,6,C1
,C0
,1), 0 },
2865 { "cpacr_el1", CPENC(3,0,C1
,C0
,2), 0 },
2866 { "cpacr_el12", CPENC (3, 5, C1
, C0
, 2), F_ARCHEXT
},
2867 { "cptr_el2", CPENC(3,4,C1
,C1
,2), 0 },
2868 { "cptr_el3", CPENC(3,6,C1
,C1
,2), 0 },
2869 { "scr_el3", CPENC(3,6,C1
,C1
,0), 0 },
2870 { "hcr_el2", CPENC(3,4,C1
,C1
,0), 0 },
2871 { "mdcr_el2", CPENC(3,4,C1
,C1
,1), 0 },
2872 { "mdcr_el3", CPENC(3,6,C1
,C3
,1), 0 },
2873 { "hstr_el2", CPENC(3,4,C1
,C1
,3), 0 },
2874 { "hacr_el2", CPENC(3,4,C1
,C1
,7), 0 },
2875 { "ttbr0_el1", CPENC(3,0,C2
,C0
,0), 0 },
2876 { "ttbr1_el1", CPENC(3,0,C2
,C0
,1), 0 },
2877 { "ttbr0_el2", CPENC(3,4,C2
,C0
,0), 0 },
2878 { "ttbr1_el2", CPENC (3, 4, C2
, C0
, 1), F_ARCHEXT
},
2879 { "ttbr0_el3", CPENC(3,6,C2
,C0
,0), 0 },
2880 { "ttbr0_el12", CPENC (3, 5, C2
, C0
, 0), F_ARCHEXT
},
2881 { "ttbr1_el12", CPENC (3, 5, C2
, C0
, 1), F_ARCHEXT
},
2882 { "vttbr_el2", CPENC(3,4,C2
,C1
,0), 0 },
2883 { "tcr_el1", CPENC(3,0,C2
,C0
,2), 0 },
2884 { "tcr_el2", CPENC(3,4,C2
,C0
,2), 0 },
2885 { "tcr_el3", CPENC(3,6,C2
,C0
,2), 0 },
2886 { "tcr_el12", CPENC (3, 5, C2
, C0
, 2), F_ARCHEXT
},
2887 { "vtcr_el2", CPENC(3,4,C2
,C1
,2), 0 },
2888 { "afsr0_el1", CPENC(3,0,C5
,C1
,0), 0 },
2889 { "afsr1_el1", CPENC(3,0,C5
,C1
,1), 0 },
2890 { "afsr0_el2", CPENC(3,4,C5
,C1
,0), 0 },
2891 { "afsr1_el2", CPENC(3,4,C5
,C1
,1), 0 },
2892 { "afsr0_el3", CPENC(3,6,C5
,C1
,0), 0 },
2893 { "afsr0_el12", CPENC (3, 5, C5
, C1
, 0), F_ARCHEXT
},
2894 { "afsr1_el3", CPENC(3,6,C5
,C1
,1), 0 },
2895 { "afsr1_el12", CPENC (3, 5, C5
, C1
, 1), F_ARCHEXT
},
2896 { "esr_el1", CPENC(3,0,C5
,C2
,0), 0 },
2897 { "esr_el2", CPENC(3,4,C5
,C2
,0), 0 },
2898 { "esr_el3", CPENC(3,6,C5
,C2
,0), 0 },
2899 { "esr_el12", CPENC (3, 5, C5
, C2
, 0), F_ARCHEXT
},
2900 { "vsesr_el2", CPENC (3, 4, C5
, C2
, 3), F_ARCHEXT
}, /* RO */
2901 { "fpexc32_el2", CPENC(3,4,C5
,C3
,0), 0 },
2902 { "erridr_el1", CPENC (3, 0, C5
, C3
, 0), F_ARCHEXT
}, /* RO */
2903 { "errselr_el1", CPENC (3, 0, C5
, C3
, 1), F_ARCHEXT
},
2904 { "erxfr_el1", CPENC (3, 0, C5
, C4
, 0), F_ARCHEXT
}, /* RO */
2905 { "erxctlr_el1", CPENC (3, 0, C5
, C4
, 1), F_ARCHEXT
},
2906 { "erxstatus_el1", CPENC (3, 0, C5
, C4
, 2), F_ARCHEXT
},
2907 { "erxaddr_el1", CPENC (3, 0, C5
, C4
, 3), F_ARCHEXT
},
2908 { "erxmisc0_el1", CPENC (3, 0, C5
, C5
, 0), F_ARCHEXT
},
2909 { "erxmisc1_el1", CPENC (3, 0, C5
, C5
, 1), F_ARCHEXT
},
2910 { "far_el1", CPENC(3,0,C6
,C0
,0), 0 },
2911 { "far_el2", CPENC(3,4,C6
,C0
,0), 0 },
2912 { "far_el3", CPENC(3,6,C6
,C0
,0), 0 },
2913 { "far_el12", CPENC (3, 5, C6
, C0
, 0), F_ARCHEXT
},
2914 { "hpfar_el2", CPENC(3,4,C6
,C0
,4), 0 },
2915 { "par_el1", CPENC(3,0,C7
,C4
,0), 0 },
2916 { "mair_el1", CPENC(3,0,C10
,C2
,0), 0 },
2917 { "mair_el2", CPENC(3,4,C10
,C2
,0), 0 },
2918 { "mair_el3", CPENC(3,6,C10
,C2
,0), 0 },
2919 { "mair_el12", CPENC (3, 5, C10
, C2
, 0), F_ARCHEXT
},
2920 { "amair_el1", CPENC(3,0,C10
,C3
,0), 0 },
2921 { "amair_el2", CPENC(3,4,C10
,C3
,0), 0 },
2922 { "amair_el3", CPENC(3,6,C10
,C3
,0), 0 },
2923 { "amair_el12", CPENC (3, 5, C10
, C3
, 0), F_ARCHEXT
},
2924 { "vbar_el1", CPENC(3,0,C12
,C0
,0), 0 },
2925 { "vbar_el2", CPENC(3,4,C12
,C0
,0), 0 },
2926 { "vbar_el3", CPENC(3,6,C12
,C0
,0), 0 },
2927 { "vbar_el12", CPENC (3, 5, C12
, C0
, 0), F_ARCHEXT
},
2928 { "rvbar_el1", CPENC(3,0,C12
,C0
,1), 0 }, /* RO */
2929 { "rvbar_el2", CPENC(3,4,C12
,C0
,1), 0 }, /* RO */
2930 { "rvbar_el3", CPENC(3,6,C12
,C0
,1), 0 }, /* RO */
2931 { "rmr_el1", CPENC(3,0,C12
,C0
,2), 0 },
2932 { "rmr_el2", CPENC(3,4,C12
,C0
,2), 0 },
2933 { "rmr_el3", CPENC(3,6,C12
,C0
,2), 0 },
2934 { "isr_el1", CPENC(3,0,C12
,C1
,0), 0 }, /* RO */
2935 { "disr_el1", CPENC (3, 0, C12
, C1
, 1), F_ARCHEXT
},
2936 { "vdisr_el2", CPENC (3, 4, C12
, C1
, 1), F_ARCHEXT
},
2937 { "contextidr_el1", CPENC(3,0,C13
,C0
,1), 0 },
2938 { "contextidr_el2", CPENC (3, 4, C13
, C0
, 1), F_ARCHEXT
},
2939 { "contextidr_el12", CPENC (3, 5, C13
, C0
, 1), F_ARCHEXT
},
2940 { "tpidr_el0", CPENC(3,3,C13
,C0
,2), 0 },
2941 { "tpidrro_el0", CPENC(3,3,C13
,C0
,3), 0 }, /* RO */
2942 { "tpidr_el1", CPENC(3,0,C13
,C0
,4), 0 },
2943 { "tpidr_el2", CPENC(3,4,C13
,C0
,2), 0 },
2944 { "tpidr_el3", CPENC(3,6,C13
,C0
,2), 0 },
2945 { "teecr32_el1", CPENC(2,2,C0
, C0
,0), 0 }, /* See section 3.9.7.1 */
2946 { "cntfrq_el0", CPENC(3,3,C14
,C0
,0), 0 }, /* RO */
2947 { "cntpct_el0", CPENC(3,3,C14
,C0
,1), 0 }, /* RO */
2948 { "cntvct_el0", CPENC(3,3,C14
,C0
,2), 0 }, /* RO */
2949 { "cntvoff_el2", CPENC(3,4,C14
,C0
,3), 0 },
2950 { "cntkctl_el1", CPENC(3,0,C14
,C1
,0), 0 },
2951 { "cntkctl_el12", CPENC (3, 5, C14
, C1
, 0), F_ARCHEXT
},
2952 { "cnthctl_el2", CPENC(3,4,C14
,C1
,0), 0 },
2953 { "cntp_tval_el0", CPENC(3,3,C14
,C2
,0), 0 },
2954 { "cntp_tval_el02", CPENC (3, 5, C14
, C2
, 0), F_ARCHEXT
},
2955 { "cntp_ctl_el0", CPENC(3,3,C14
,C2
,1), 0 },
2956 { "cntp_ctl_el02", CPENC (3, 5, C14
, C2
, 1), F_ARCHEXT
},
2957 { "cntp_cval_el0", CPENC(3,3,C14
,C2
,2), 0 },
2958 { "cntp_cval_el02", CPENC (3, 5, C14
, C2
, 2), F_ARCHEXT
},
2959 { "cntv_tval_el0", CPENC(3,3,C14
,C3
,0), 0 },
2960 { "cntv_tval_el02", CPENC (3, 5, C14
, C3
, 0), F_ARCHEXT
},
2961 { "cntv_ctl_el0", CPENC(3,3,C14
,C3
,1), 0 },
2962 { "cntv_ctl_el02", CPENC (3, 5, C14
, C3
, 1), F_ARCHEXT
},
2963 { "cntv_cval_el0", CPENC(3,3,C14
,C3
,2), 0 },
2964 { "cntv_cval_el02", CPENC (3, 5, C14
, C3
, 2), F_ARCHEXT
},
2965 { "cnthp_tval_el2", CPENC(3,4,C14
,C2
,0), 0 },
2966 { "cnthp_ctl_el2", CPENC(3,4,C14
,C2
,1), 0 },
2967 { "cnthp_cval_el2", CPENC(3,4,C14
,C2
,2), 0 },
2968 { "cntps_tval_el1", CPENC(3,7,C14
,C2
,0), 0 },
2969 { "cntps_ctl_el1", CPENC(3,7,C14
,C2
,1), 0 },
2970 { "cntps_cval_el1", CPENC(3,7,C14
,C2
,2), 0 },
2971 { "cnthv_tval_el2", CPENC (3, 4, C14
, C3
, 0), F_ARCHEXT
},
2972 { "cnthv_ctl_el2", CPENC (3, 4, C14
, C3
, 1), F_ARCHEXT
},
2973 { "cnthv_cval_el2", CPENC (3, 4, C14
, C3
, 2), F_ARCHEXT
},
2974 { "dacr32_el2", CPENC(3,4,C3
,C0
,0), 0 },
2975 { "ifsr32_el2", CPENC(3,4,C5
,C0
,1), 0 },
2976 { "teehbr32_el1", CPENC(2,2,C1
,C0
,0), 0 },
2977 { "sder32_el3", CPENC(3,6,C1
,C1
,1), 0 },
2978 { "mdscr_el1", CPENC(2,0,C0
, C2
, 2), 0 },
2979 { "mdccsr_el0", CPENC(2,3,C0
, C1
, 0), 0 }, /* r */
2980 { "mdccint_el1", CPENC(2,0,C0
, C2
, 0), 0 },
2981 { "dbgdtr_el0", CPENC(2,3,C0
, C4
, 0), 0 },
2982 { "dbgdtrrx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* r */
2983 { "dbgdtrtx_el0", CPENC(2,3,C0
, C5
, 0), 0 }, /* w */
2984 { "osdtrrx_el1", CPENC(2,0,C0
, C0
, 2), 0 }, /* r */
2985 { "osdtrtx_el1", CPENC(2,0,C0
, C3
, 2), 0 }, /* w */
2986 { "oseccr_el1", CPENC(2,0,C0
, C6
, 2), 0 },
2987 { "dbgvcr32_el2", CPENC(2,4,C0
, C7
, 0), 0 },
2988 { "dbgbvr0_el1", CPENC(2,0,C0
, C0
, 4), 0 },
2989 { "dbgbvr1_el1", CPENC(2,0,C0
, C1
, 4), 0 },
2990 { "dbgbvr2_el1", CPENC(2,0,C0
, C2
, 4), 0 },
2991 { "dbgbvr3_el1", CPENC(2,0,C0
, C3
, 4), 0 },
2992 { "dbgbvr4_el1", CPENC(2,0,C0
, C4
, 4), 0 },
2993 { "dbgbvr5_el1", CPENC(2,0,C0
, C5
, 4), 0 },
2994 { "dbgbvr6_el1", CPENC(2,0,C0
, C6
, 4), 0 },
2995 { "dbgbvr7_el1", CPENC(2,0,C0
, C7
, 4), 0 },
2996 { "dbgbvr8_el1", CPENC(2,0,C0
, C8
, 4), 0 },
2997 { "dbgbvr9_el1", CPENC(2,0,C0
, C9
, 4), 0 },
2998 { "dbgbvr10_el1", CPENC(2,0,C0
, C10
,4), 0 },
2999 { "dbgbvr11_el1", CPENC(2,0,C0
, C11
,4), 0 },
3000 { "dbgbvr12_el1", CPENC(2,0,C0
, C12
,4), 0 },
3001 { "dbgbvr13_el1", CPENC(2,0,C0
, C13
,4), 0 },
3002 { "dbgbvr14_el1", CPENC(2,0,C0
, C14
,4), 0 },
3003 { "dbgbvr15_el1", CPENC(2,0,C0
, C15
,4), 0 },
3004 { "dbgbcr0_el1", CPENC(2,0,C0
, C0
, 5), 0 },
3005 { "dbgbcr1_el1", CPENC(2,0,C0
, C1
, 5), 0 },
3006 { "dbgbcr2_el1", CPENC(2,0,C0
, C2
, 5), 0 },
3007 { "dbgbcr3_el1", CPENC(2,0,C0
, C3
, 5), 0 },
3008 { "dbgbcr4_el1", CPENC(2,0,C0
, C4
, 5), 0 },
3009 { "dbgbcr5_el1", CPENC(2,0,C0
, C5
, 5), 0 },
3010 { "dbgbcr6_el1", CPENC(2,0,C0
, C6
, 5), 0 },
3011 { "dbgbcr7_el1", CPENC(2,0,C0
, C7
, 5), 0 },
3012 { "dbgbcr8_el1", CPENC(2,0,C0
, C8
, 5), 0 },
3013 { "dbgbcr9_el1", CPENC(2,0,C0
, C9
, 5), 0 },
3014 { "dbgbcr10_el1", CPENC(2,0,C0
, C10
,5), 0 },
3015 { "dbgbcr11_el1", CPENC(2,0,C0
, C11
,5), 0 },
3016 { "dbgbcr12_el1", CPENC(2,0,C0
, C12
,5), 0 },
3017 { "dbgbcr13_el1", CPENC(2,0,C0
, C13
,5), 0 },
3018 { "dbgbcr14_el1", CPENC(2,0,C0
, C14
,5), 0 },
3019 { "dbgbcr15_el1", CPENC(2,0,C0
, C15
,5), 0 },
3020 { "dbgwvr0_el1", CPENC(2,0,C0
, C0
, 6), 0 },
3021 { "dbgwvr1_el1", CPENC(2,0,C0
, C1
, 6), 0 },
3022 { "dbgwvr2_el1", CPENC(2,0,C0
, C2
, 6), 0 },
3023 { "dbgwvr3_el1", CPENC(2,0,C0
, C3
, 6), 0 },
3024 { "dbgwvr4_el1", CPENC(2,0,C0
, C4
, 6), 0 },
3025 { "dbgwvr5_el1", CPENC(2,0,C0
, C5
, 6), 0 },
3026 { "dbgwvr6_el1", CPENC(2,0,C0
, C6
, 6), 0 },
3027 { "dbgwvr7_el1", CPENC(2,0,C0
, C7
, 6), 0 },
3028 { "dbgwvr8_el1", CPENC(2,0,C0
, C8
, 6), 0 },
3029 { "dbgwvr9_el1", CPENC(2,0,C0
, C9
, 6), 0 },
3030 { "dbgwvr10_el1", CPENC(2,0,C0
, C10
,6), 0 },
3031 { "dbgwvr11_el1", CPENC(2,0,C0
, C11
,6), 0 },
3032 { "dbgwvr12_el1", CPENC(2,0,C0
, C12
,6), 0 },
3033 { "dbgwvr13_el1", CPENC(2,0,C0
, C13
,6), 0 },
3034 { "dbgwvr14_el1", CPENC(2,0,C0
, C14
,6), 0 },
3035 { "dbgwvr15_el1", CPENC(2,0,C0
, C15
,6), 0 },
3036 { "dbgwcr0_el1", CPENC(2,0,C0
, C0
, 7), 0 },
3037 { "dbgwcr1_el1", CPENC(2,0,C0
, C1
, 7), 0 },
3038 { "dbgwcr2_el1", CPENC(2,0,C0
, C2
, 7), 0 },
3039 { "dbgwcr3_el1", CPENC(2,0,C0
, C3
, 7), 0 },
3040 { "dbgwcr4_el1", CPENC(2,0,C0
, C4
, 7), 0 },
3041 { "dbgwcr5_el1", CPENC(2,0,C0
, C5
, 7), 0 },
3042 { "dbgwcr6_el1", CPENC(2,0,C0
, C6
, 7), 0 },
3043 { "dbgwcr7_el1", CPENC(2,0,C0
, C7
, 7), 0 },
3044 { "dbgwcr8_el1", CPENC(2,0,C0
, C8
, 7), 0 },
3045 { "dbgwcr9_el1", CPENC(2,0,C0
, C9
, 7), 0 },
3046 { "dbgwcr10_el1", CPENC(2,0,C0
, C10
,7), 0 },
3047 { "dbgwcr11_el1", CPENC(2,0,C0
, C11
,7), 0 },
3048 { "dbgwcr12_el1", CPENC(2,0,C0
, C12
,7), 0 },
3049 { "dbgwcr13_el1", CPENC(2,0,C0
, C13
,7), 0 },
3050 { "dbgwcr14_el1", CPENC(2,0,C0
, C14
,7), 0 },
3051 { "dbgwcr15_el1", CPENC(2,0,C0
, C15
,7), 0 },
3052 { "mdrar_el1", CPENC(2,0,C1
, C0
, 0), 0 }, /* r */
3053 { "oslar_el1", CPENC(2,0,C1
, C0
, 4), 0 }, /* w */
3054 { "oslsr_el1", CPENC(2,0,C1
, C1
, 4), 0 }, /* r */
3055 { "osdlr_el1", CPENC(2,0,C1
, C3
, 4), 0 },
3056 { "dbgprcr_el1", CPENC(2,0,C1
, C4
, 4), 0 },
3057 { "dbgclaimset_el1", CPENC(2,0,C7
, C8
, 6), 0 },
3058 { "dbgclaimclr_el1", CPENC(2,0,C7
, C9
, 6), 0 },
3059 { "dbgauthstatus_el1", CPENC(2,0,C7
, C14
,6), 0 }, /* r */
3060 { "pmblimitr_el1", CPENC (3, 0, C9
, C10
, 0), F_ARCHEXT
}, /* rw */
3061 { "pmbptr_el1", CPENC (3, 0, C9
, C10
, 1), F_ARCHEXT
}, /* rw */
3062 { "pmbsr_el1", CPENC (3, 0, C9
, C10
, 3), F_ARCHEXT
}, /* rw */
3063 { "pmbidr_el1", CPENC (3, 0, C9
, C10
, 7), F_ARCHEXT
}, /* ro */
3064 { "pmscr_el1", CPENC (3, 0, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3065 { "pmsicr_el1", CPENC (3, 0, C9
, C9
, 2), F_ARCHEXT
}, /* rw */
3066 { "pmsirr_el1", CPENC (3, 0, C9
, C9
, 3), F_ARCHEXT
}, /* rw */
3067 { "pmsfcr_el1", CPENC (3, 0, C9
, C9
, 4), F_ARCHEXT
}, /* rw */
3068 { "pmsevfr_el1", CPENC (3, 0, C9
, C9
, 5), F_ARCHEXT
}, /* rw */
3069 { "pmslatfr_el1", CPENC (3, 0, C9
, C9
, 6), F_ARCHEXT
}, /* rw */
3070 { "pmsidr_el1", CPENC (3, 0, C9
, C9
, 7), F_ARCHEXT
}, /* ro */
3071 { "pmscr_el2", CPENC (3, 4, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3072 { "pmscr_el12", CPENC (3, 5, C9
, C9
, 0), F_ARCHEXT
}, /* rw */
3073 { "pmcr_el0", CPENC(3,3,C9
,C12
, 0), 0 },
3074 { "pmcntenset_el0", CPENC(3,3,C9
,C12
, 1), 0 },
3075 { "pmcntenclr_el0", CPENC(3,3,C9
,C12
, 2), 0 },
3076 { "pmovsclr_el0", CPENC(3,3,C9
,C12
, 3), 0 },
3077 { "pmswinc_el0", CPENC(3,3,C9
,C12
, 4), 0 }, /* w */
3078 { "pmselr_el0", CPENC(3,3,C9
,C12
, 5), 0 },
3079 { "pmceid0_el0", CPENC(3,3,C9
,C12
, 6), 0 }, /* r */
3080 { "pmceid1_el0", CPENC(3,3,C9
,C12
, 7), 0 }, /* r */
3081 { "pmccntr_el0", CPENC(3,3,C9
,C13
, 0), 0 },
3082 { "pmxevtyper_el0", CPENC(3,3,C9
,C13
, 1), 0 },
3083 { "pmxevcntr_el0", CPENC(3,3,C9
,C13
, 2), 0 },
3084 { "pmuserenr_el0", CPENC(3,3,C9
,C14
, 0), 0 },
3085 { "pmintenset_el1", CPENC(3,0,C9
,C14
, 1), 0 },
3086 { "pmintenclr_el1", CPENC(3,0,C9
,C14
, 2), 0 },
3087 { "pmovsset_el0", CPENC(3,3,C9
,C14
, 3), 0 },
3088 { "pmevcntr0_el0", CPENC(3,3,C14
,C8
, 0), 0 },
3089 { "pmevcntr1_el0", CPENC(3,3,C14
,C8
, 1), 0 },
3090 { "pmevcntr2_el0", CPENC(3,3,C14
,C8
, 2), 0 },
3091 { "pmevcntr3_el0", CPENC(3,3,C14
,C8
, 3), 0 },
3092 { "pmevcntr4_el0", CPENC(3,3,C14
,C8
, 4), 0 },
3093 { "pmevcntr5_el0", CPENC(3,3,C14
,C8
, 5), 0 },
3094 { "pmevcntr6_el0", CPENC(3,3,C14
,C8
, 6), 0 },
3095 { "pmevcntr7_el0", CPENC(3,3,C14
,C8
, 7), 0 },
3096 { "pmevcntr8_el0", CPENC(3,3,C14
,C9
, 0), 0 },
3097 { "pmevcntr9_el0", CPENC(3,3,C14
,C9
, 1), 0 },
3098 { "pmevcntr10_el0", CPENC(3,3,C14
,C9
, 2), 0 },
3099 { "pmevcntr11_el0", CPENC(3,3,C14
,C9
, 3), 0 },
3100 { "pmevcntr12_el0", CPENC(3,3,C14
,C9
, 4), 0 },
3101 { "pmevcntr13_el0", CPENC(3,3,C14
,C9
, 5), 0 },
3102 { "pmevcntr14_el0", CPENC(3,3,C14
,C9
, 6), 0 },
3103 { "pmevcntr15_el0", CPENC(3,3,C14
,C9
, 7), 0 },
3104 { "pmevcntr16_el0", CPENC(3,3,C14
,C10
,0), 0 },
3105 { "pmevcntr17_el0", CPENC(3,3,C14
,C10
,1), 0 },
3106 { "pmevcntr18_el0", CPENC(3,3,C14
,C10
,2), 0 },
3107 { "pmevcntr19_el0", CPENC(3,3,C14
,C10
,3), 0 },
3108 { "pmevcntr20_el0", CPENC(3,3,C14
,C10
,4), 0 },
3109 { "pmevcntr21_el0", CPENC(3,3,C14
,C10
,5), 0 },
3110 { "pmevcntr22_el0", CPENC(3,3,C14
,C10
,6), 0 },
3111 { "pmevcntr23_el0", CPENC(3,3,C14
,C10
,7), 0 },
3112 { "pmevcntr24_el0", CPENC(3,3,C14
,C11
,0), 0 },
3113 { "pmevcntr25_el0", CPENC(3,3,C14
,C11
,1), 0 },
3114 { "pmevcntr26_el0", CPENC(3,3,C14
,C11
,2), 0 },
3115 { "pmevcntr27_el0", CPENC(3,3,C14
,C11
,3), 0 },
3116 { "pmevcntr28_el0", CPENC(3,3,C14
,C11
,4), 0 },
3117 { "pmevcntr29_el0", CPENC(3,3,C14
,C11
,5), 0 },
3118 { "pmevcntr30_el0", CPENC(3,3,C14
,C11
,6), 0 },
3119 { "pmevtyper0_el0", CPENC(3,3,C14
,C12
,0), 0 },
3120 { "pmevtyper1_el0", CPENC(3,3,C14
,C12
,1), 0 },
3121 { "pmevtyper2_el0", CPENC(3,3,C14
,C12
,2), 0 },
3122 { "pmevtyper3_el0", CPENC(3,3,C14
,C12
,3), 0 },
3123 { "pmevtyper4_el0", CPENC(3,3,C14
,C12
,4), 0 },
3124 { "pmevtyper5_el0", CPENC(3,3,C14
,C12
,5), 0 },
3125 { "pmevtyper6_el0", CPENC(3,3,C14
,C12
,6), 0 },
3126 { "pmevtyper7_el0", CPENC(3,3,C14
,C12
,7), 0 },
3127 { "pmevtyper8_el0", CPENC(3,3,C14
,C13
,0), 0 },
3128 { "pmevtyper9_el0", CPENC(3,3,C14
,C13
,1), 0 },
3129 { "pmevtyper10_el0", CPENC(3,3,C14
,C13
,2), 0 },
3130 { "pmevtyper11_el0", CPENC(3,3,C14
,C13
,3), 0 },
3131 { "pmevtyper12_el0", CPENC(3,3,C14
,C13
,4), 0 },
3132 { "pmevtyper13_el0", CPENC(3,3,C14
,C13
,5), 0 },
3133 { "pmevtyper14_el0", CPENC(3,3,C14
,C13
,6), 0 },
3134 { "pmevtyper15_el0", CPENC(3,3,C14
,C13
,7), 0 },
3135 { "pmevtyper16_el0", CPENC(3,3,C14
,C14
,0), 0 },
3136 { "pmevtyper17_el0", CPENC(3,3,C14
,C14
,1), 0 },
3137 { "pmevtyper18_el0", CPENC(3,3,C14
,C14
,2), 0 },
3138 { "pmevtyper19_el0", CPENC(3,3,C14
,C14
,3), 0 },
3139 { "pmevtyper20_el0", CPENC(3,3,C14
,C14
,4), 0 },
3140 { "pmevtyper21_el0", CPENC(3,3,C14
,C14
,5), 0 },
3141 { "pmevtyper22_el0", CPENC(3,3,C14
,C14
,6), 0 },
3142 { "pmevtyper23_el0", CPENC(3,3,C14
,C14
,7), 0 },
3143 { "pmevtyper24_el0", CPENC(3,3,C14
,C15
,0), 0 },
3144 { "pmevtyper25_el0", CPENC(3,3,C14
,C15
,1), 0 },
3145 { "pmevtyper26_el0", CPENC(3,3,C14
,C15
,2), 0 },
3146 { "pmevtyper27_el0", CPENC(3,3,C14
,C15
,3), 0 },
3147 { "pmevtyper28_el0", CPENC(3,3,C14
,C15
,4), 0 },
3148 { "pmevtyper29_el0", CPENC(3,3,C14
,C15
,5), 0 },
3149 { "pmevtyper30_el0", CPENC(3,3,C14
,C15
,6), 0 },
3150 { "pmccfiltr_el0", CPENC(3,3,C14
,C15
,7), 0 },
3151 { 0, CPENC(0,0,0,0,0), 0 },
3155 aarch64_sys_reg_deprecated_p (const aarch64_sys_reg
*reg
)
3157 return (reg
->flags
& F_DEPRECATED
) != 0;
3161 aarch64_sys_reg_supported_p (const aarch64_feature_set features
,
3162 const aarch64_sys_reg
*reg
)
3164 if (!(reg
->flags
& F_ARCHEXT
))
3167 /* PAN. Values are from aarch64_sys_regs. */
3168 if (reg
->value
== CPEN_(0,C2
,3)
3169 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3172 /* Virtualization host extensions: system registers. */
3173 if ((reg
->value
== CPENC (3, 4, C2
, C0
, 1)
3174 || reg
->value
== CPENC (3, 4, C13
, C0
, 1)
3175 || reg
->value
== CPENC (3, 4, C14
, C3
, 0)
3176 || reg
->value
== CPENC (3, 4, C14
, C3
, 1)
3177 || reg
->value
== CPENC (3, 4, C14
, C3
, 2))
3178 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3181 /* Virtualization host extensions: *_el12 names of *_el1 registers. */
3182 if ((reg
->value
== CPEN_ (5, C0
, 0)
3183 || reg
->value
== CPEN_ (5, C0
, 1)
3184 || reg
->value
== CPENC (3, 5, C1
, C0
, 0)
3185 || reg
->value
== CPENC (3, 5, C1
, C0
, 2)
3186 || reg
->value
== CPENC (3, 5, C2
, C0
, 0)
3187 || reg
->value
== CPENC (3, 5, C2
, C0
, 1)
3188 || reg
->value
== CPENC (3, 5, C2
, C0
, 2)
3189 || reg
->value
== CPENC (3, 5, C5
, C1
, 0)
3190 || reg
->value
== CPENC (3, 5, C5
, C1
, 1)
3191 || reg
->value
== CPENC (3, 5, C5
, C2
, 0)
3192 || reg
->value
== CPENC (3, 5, C6
, C0
, 0)
3193 || reg
->value
== CPENC (3, 5, C10
, C2
, 0)
3194 || reg
->value
== CPENC (3, 5, C10
, C3
, 0)
3195 || reg
->value
== CPENC (3, 5, C12
, C0
, 0)
3196 || reg
->value
== CPENC (3, 5, C13
, C0
, 1)
3197 || reg
->value
== CPENC (3, 5, C14
, C1
, 0))
3198 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3201 /* Virtualization host extensions: *_el02 names of *_el0 registers. */
3202 if ((reg
->value
== CPENC (3, 5, C14
, C2
, 0)
3203 || reg
->value
== CPENC (3, 5, C14
, C2
, 1)
3204 || reg
->value
== CPENC (3, 5, C14
, C2
, 2)
3205 || reg
->value
== CPENC (3, 5, C14
, C3
, 0)
3206 || reg
->value
== CPENC (3, 5, C14
, C3
, 1)
3207 || reg
->value
== CPENC (3, 5, C14
, C3
, 2))
3208 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_1
))
3211 /* ARMv8.2 features. */
3213 /* ID_AA64MMFR2_EL1. */
3214 if (reg
->value
== CPENC (3, 0, C0
, C7
, 2)
3215 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3219 if (reg
->value
== CPEN_ (0, C2
, 4)
3220 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3223 /* RAS extension. */
3225 /* ERRIDR_EL1 and ERRSELR_EL1. */
3226 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3227 || reg
->value
== CPENC (3, 0, C5
, C3
, 1))
3228 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3231 /* ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1, ERXMISC0_EL1 AND
3233 if ((reg
->value
== CPENC (3, 0, C5
, C3
, 0)
3234 || reg
->value
== CPENC (3, 0, C5
, C3
,1)
3235 || reg
->value
== CPENC (3, 0, C5
, C3
, 2)
3236 || reg
->value
== CPENC (3, 0, C5
, C3
, 3)
3237 || reg
->value
== CPENC (3, 0, C5
, C5
, 0)
3238 || reg
->value
== CPENC (3, 0, C5
, C5
, 1))
3239 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3242 /* VSESR_EL2, DISR_EL1 and VDISR_EL2. */
3243 if ((reg
->value
== CPENC (3, 4, C5
, C2
, 3)
3244 || reg
->value
== CPENC (3, 0, C12
, C1
, 1)
3245 || reg
->value
== CPENC (3, 4, C12
, C1
, 1))
3246 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_RAS
))
3249 /* Statistical Profiling extension. */
3250 if ((reg
->value
== CPENC (3, 0, C9
, C10
, 0)
3251 || reg
->value
== CPENC (3, 0, C9
, C10
, 1)
3252 || reg
->value
== CPENC (3, 0, C9
, C10
, 3)
3253 || reg
->value
== CPENC (3, 0, C9
, C10
, 7)
3254 || reg
->value
== CPENC (3, 0, C9
, C9
, 0)
3255 || reg
->value
== CPENC (3, 0, C9
, C9
, 2)
3256 || reg
->value
== CPENC (3, 0, C9
, C9
, 3)
3257 || reg
->value
== CPENC (3, 0, C9
, C9
, 4)
3258 || reg
->value
== CPENC (3, 0, C9
, C9
, 5)
3259 || reg
->value
== CPENC (3, 0, C9
, C9
, 6)
3260 || reg
->value
== CPENC (3, 0, C9
, C9
, 7)
3261 || reg
->value
== CPENC (3, 4, C9
, C9
, 0)
3262 || reg
->value
== CPENC (3, 5, C9
, C9
, 0))
3263 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PROFILE
))
3269 const aarch64_sys_reg aarch64_pstatefields
[] =
3271 { "spsel", 0x05, 0 },
3272 { "daifset", 0x1e, 0 },
3273 { "daifclr", 0x1f, 0 },
3274 { "pan", 0x04, F_ARCHEXT
},
3275 { "uao", 0x03, F_ARCHEXT
},
3276 { 0, CPENC(0,0,0,0,0), 0 },
3280 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
3281 const aarch64_sys_reg
*reg
)
3283 if (!(reg
->flags
& F_ARCHEXT
))
3286 /* PAN. Values are from aarch64_pstatefields. */
3287 if (reg
->value
== 0x04
3288 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PAN
))
3291 /* UAO. Values are from aarch64_pstatefields. */
3292 if (reg
->value
== 0x03
3293 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3299 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
3301 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
3302 { "iallu", CPENS(0,C7
,C5
,0), 0 },
3303 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
3304 { 0, CPENS(0,0,0,0), 0 }
3307 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
3309 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
3310 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
3311 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
3312 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
3313 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
3314 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
3315 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
3316 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
3317 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
3318 { 0, CPENS(0,0,0,0), 0 }
3321 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
3323 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
3324 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
3325 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
3326 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
3327 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
3328 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
3329 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
3330 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
3331 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
3332 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
3333 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
3334 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
3335 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
3336 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
3337 { 0, CPENS(0,0,0,0), 0 }
3340 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
3342 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
3343 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
3344 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
3345 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
3346 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
3347 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
3348 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
3349 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
3350 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
3351 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
3352 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
3353 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
3354 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
3355 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
3356 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
3357 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
3358 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
3359 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
3360 { "alle2", CPENS(4,C8
,C7
,0), 0 },
3361 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
3362 { "alle1", CPENS(4,C8
,C7
,4), 0 },
3363 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
3364 { "alle3", CPENS(6,C8
,C7
,0), 0 },
3365 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
3366 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
3367 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
3368 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
3369 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
3370 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
3371 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
3372 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
3373 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
3374 { 0, CPENS(0,0,0,0), 0 }
3378 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
3380 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
3384 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
3385 const aarch64_sys_ins_reg
*reg
)
3387 if (!(reg
->flags
& F_ARCHEXT
))
3390 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
3391 if (reg
->value
== CPENS (3, C7
, C12
, 1)
3392 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3395 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
3396 if ((reg
->value
== CPENS (0, C7
, C9
, 0)
3397 || reg
->value
== CPENS (0, C7
, C9
, 1))
3398 && !AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
3421 /* Include the opcode description table as well as the operand description
3423 #include "aarch64-tbl.h"