1 /* AArch64 assembler/disassembler support.
3 Copyright (C) 2009-2022 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GNU Binutils.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the license, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING3. If not,
20 see <http://www.gnu.org/licenses/>. */
22 #ifndef OPCODE_AARCH64_H
23 #define OPCODE_AARCH64_H
34 /* The offset for pc-relative addressing is currently defined to be 0. */
35 #define AARCH64_PCREL_OFFSET 0
37 typedef uint32_t aarch64_insn
;
39 /* The following bitmasks control CPU features. */
40 #define AARCH64_FEATURE_V8 (1ULL << 0) /* All processors. */
41 #define AARCH64_FEATURE_V8_6 (1ULL << 1) /* ARMv8.6 processors. */
42 #define AARCH64_FEATURE_BFLOAT16 (1ULL << 2) /* Bfloat16 insns. */
43 #define AARCH64_FEATURE_V8_A (1ULL << 3) /* Armv8-A processors. */
44 #define AARCH64_FEATURE_SVE2 (1ULL << 4) /* SVE2 instructions. */
45 #define AARCH64_FEATURE_V8_2 (1ULL << 5) /* ARMv8.2 processors. */
46 #define AARCH64_FEATURE_V8_3 (1ULL << 6) /* ARMv8.3 processors. */
47 #define AARCH64_FEATURE_SVE2_AES (1ULL << 7)
48 #define AARCH64_FEATURE_SVE2_BITPERM (1ULL << 8)
49 #define AARCH64_FEATURE_SVE2_SM4 (1ULL << 9)
50 #define AARCH64_FEATURE_SVE2_SHA3 (1ULL << 10)
51 #define AARCH64_FEATURE_V8_4 (1ULL << 11) /* ARMv8.4 processors. */
52 #define AARCH64_FEATURE_V8_R (1ULL << 12) /* Armv8-R processors. */
53 #define AARCH64_FEATURE_V8_7 (1ULL << 13) /* Armv8.7 processors. */
54 #define AARCH64_FEATURE_SME (1ULL << 14) /* Scalable Matrix Extension. */
55 #define AARCH64_FEATURE_LS64 (1ULL << 15) /* Atomic 64-byte load/store. */
56 #define AARCH64_FEATURE_PAC (1ULL << 16) /* v8.3 Pointer Authentication. */
57 #define AARCH64_FEATURE_FP (1ULL << 17) /* FP instructions. */
58 #define AARCH64_FEATURE_SIMD (1ULL << 18) /* SIMD instructions. */
59 #define AARCH64_FEATURE_CRC (1ULL << 19) /* CRC instructions. */
60 #define AARCH64_FEATURE_LSE (1ULL << 20) /* LSE instructions. */
61 #define AARCH64_FEATURE_PAN (1ULL << 21) /* PAN instructions. */
62 #define AARCH64_FEATURE_LOR (1ULL << 22) /* LOR instructions. */
63 #define AARCH64_FEATURE_RDMA (1ULL << 23) /* v8.1 SIMD instructions. */
64 #define AARCH64_FEATURE_V8_1 (1ULL << 24) /* v8.1 features. */
65 #define AARCH64_FEATURE_F16 (1ULL << 25) /* v8.2 FP16 instructions. */
66 #define AARCH64_FEATURE_RAS (1ULL << 26) /* RAS Extensions. */
67 #define AARCH64_FEATURE_PROFILE (1ULL << 27) /* Statistical Profiling. */
68 #define AARCH64_FEATURE_SVE (1ULL << 28) /* SVE instructions. */
69 #define AARCH64_FEATURE_RCPC (1ULL << 29) /* RCPC instructions. */
70 #define AARCH64_FEATURE_COMPNUM (1ULL << 30) /* Complex # instructions. */
71 #define AARCH64_FEATURE_DOTPROD (1ULL << 31) /* Dot Product instructions. */
72 #define AARCH64_FEATURE_SM4 (1ULL << 32) /* SM3 & SM4 instructions. */
73 #define AARCH64_FEATURE_SHA2 (1ULL << 33) /* SHA2 instructions. */
74 #define AARCH64_FEATURE_SHA3 (1ULL << 34) /* SHA3 instructions. */
75 #define AARCH64_FEATURE_AES (1ULL << 35) /* AES instructions. */
76 #define AARCH64_FEATURE_F16_FML (1ULL << 36) /* v8.2 FP16FML ins. */
77 #define AARCH64_FEATURE_V8_5 (1ULL << 37) /* ARMv8.5 processors. */
78 #define AARCH64_FEATURE_FLAGMANIP (1ULL << 38) /* v8.5 Flag Manipulation version 2. */
79 #define AARCH64_FEATURE_FRINTTS (1ULL << 39) /* FRINT[32,64][Z,X] insns. */
80 #define AARCH64_FEATURE_SB (1ULL << 40) /* SB instruction. */
81 #define AARCH64_FEATURE_PREDRES (1ULL << 41) /* Execution and Data Prediction Restriction instructions. */
82 #define AARCH64_FEATURE_CVADP (1ULL << 42) /* DC CVADP. */
83 #define AARCH64_FEATURE_RNG (1ULL << 43) /* Random Number instructions. */
84 #define AARCH64_FEATURE_BTI (1ULL << 44) /* BTI instructions. */
85 #define AARCH64_FEATURE_SCXTNUM (1ULL << 45) /* SCXTNUM_ELx. */
86 #define AARCH64_FEATURE_ID_PFR2 (1ULL << 46) /* ID_PFR2 instructions. */
87 #define AARCH64_FEATURE_SSBS (1ULL << 47) /* SSBS mechanism enabled. */
88 #define AARCH64_FEATURE_MEMTAG (1ULL << 48) /* Memory Tagging Extension. */
89 #define AARCH64_FEATURE_TME (1ULL << 49) /* Transactional Memory Extension. */
90 #define AARCH64_FEATURE_MOPS (1ULL << 50) /* Standardization of memory operations. */
91 #define AARCH64_FEATURE_HBC (1ULL << 51) /* Hinted conditional branches. */
92 #define AARCH64_FEATURE_I8MM (1ULL << 52) /* Matrix Multiply instructions. */
93 #define AARCH64_FEATURE_F32MM (1ULL << 53)
94 #define AARCH64_FEATURE_F64MM (1ULL << 54)
95 #define AARCH64_FEATURE_FLAGM (1ULL << 55) /* v8.4 Flag Manipulation. */
96 #define AARCH64_FEATURE_V9 (1ULL << 56) /* Armv9.0-A processors. */
97 #define AARCH64_FEATURE_SME_F64 (1ULL << 57) /* SME F64. */
98 #define AARCH64_FEATURE_SME_I64 (1ULL << 58) /* SME I64. */
99 #define AARCH64_FEATURE_V8_8 (1ULL << 59) /* Armv8.8 processors. */
101 /* Crypto instructions are the combination of AES and SHA2. */
102 #define AARCH64_FEATURE_CRYPTO (AARCH64_FEATURE_SHA2 | AARCH64_FEATURE_AES)
104 #define AARCH64_ARCH_V8_FEATURES (AARCH64_FEATURE_V8_A \
105 | AARCH64_FEATURE_FP \
106 | AARCH64_FEATURE_RAS \
107 | AARCH64_FEATURE_SIMD)
108 #define AARCH64_ARCH_V8_1_FEATURES (AARCH64_FEATURE_V8_1 \
109 | AARCH64_FEATURE_CRC \
110 | AARCH64_FEATURE_LSE \
111 | AARCH64_FEATURE_PAN \
112 | AARCH64_FEATURE_LOR \
113 | AARCH64_FEATURE_RDMA)
114 #define AARCH64_ARCH_V8_2_FEATURES (AARCH64_FEATURE_V8_2)
115 #define AARCH64_ARCH_V8_3_FEATURES (AARCH64_FEATURE_V8_3 \
116 | AARCH64_FEATURE_PAC \
117 | AARCH64_FEATURE_RCPC \
118 | AARCH64_FEATURE_COMPNUM)
119 #define AARCH64_ARCH_V8_4_FEATURES (AARCH64_FEATURE_V8_4 \
120 | AARCH64_FEATURE_DOTPROD \
121 | AARCH64_FEATURE_FLAGM \
122 | AARCH64_FEATURE_F16_FML)
123 #define AARCH64_ARCH_V8_5_FEATURES (AARCH64_FEATURE_V8_5 \
124 | AARCH64_FEATURE_FLAGMANIP \
125 | AARCH64_FEATURE_FRINTTS \
126 | AARCH64_FEATURE_SB \
127 | AARCH64_FEATURE_PREDRES \
128 | AARCH64_FEATURE_CVADP \
129 | AARCH64_FEATURE_BTI \
130 | AARCH64_FEATURE_SCXTNUM \
131 | AARCH64_FEATURE_ID_PFR2 \
132 | AARCH64_FEATURE_SSBS)
133 #define AARCH64_ARCH_V8_6_FEATURES (AARCH64_FEATURE_V8_6 \
134 | AARCH64_FEATURE_BFLOAT16 \
135 | AARCH64_FEATURE_I8MM)
136 #define AARCH64_ARCH_V8_7_FEATURES (AARCH64_FEATURE_V8_7 \
137 | AARCH64_FEATURE_LS64)
138 #define AARCH64_ARCH_V8_8_FEATURES (AARCH64_FEATURE_V8_8 \
139 | AARCH64_FEATURE_MOPS \
140 | AARCH64_FEATURE_HBC)
142 #define AARCH64_ARCH_V9_FEATURES (AARCH64_FEATURE_V9 \
143 | AARCH64_FEATURE_SVE \
144 | AARCH64_FEATURE_SVE2)
145 #define AARCH64_ARCH_V9_1_FEATURES (AARCH64_ARCH_V8_6_FEATURES)
146 #define AARCH64_ARCH_V9_2_FEATURES (AARCH64_ARCH_V8_7_FEATURES)
147 #define AARCH64_ARCH_V9_3_FEATURES (AARCH64_ARCH_V8_8_FEATURES)
149 /* Architectures are the sum of the base and extensions. */
150 #define AARCH64_ARCH_V8 AARCH64_FEATURE (AARCH64_FEATURE_V8, \
151 AARCH64_ARCH_V8_FEATURES)
152 #define AARCH64_ARCH_V8_1 AARCH64_FEATURE (AARCH64_ARCH_V8, \
153 AARCH64_ARCH_V8_1_FEATURES)
154 #define AARCH64_ARCH_V8_2 AARCH64_FEATURE (AARCH64_ARCH_V8_1, \
155 AARCH64_ARCH_V8_2_FEATURES)
156 #define AARCH64_ARCH_V8_3 AARCH64_FEATURE (AARCH64_ARCH_V8_2, \
157 AARCH64_ARCH_V8_3_FEATURES)
158 #define AARCH64_ARCH_V8_4 AARCH64_FEATURE (AARCH64_ARCH_V8_3, \
159 AARCH64_ARCH_V8_4_FEATURES)
160 #define AARCH64_ARCH_V8_5 AARCH64_FEATURE (AARCH64_ARCH_V8_4, \
161 AARCH64_ARCH_V8_5_FEATURES)
162 #define AARCH64_ARCH_V8_6 AARCH64_FEATURE (AARCH64_ARCH_V8_5, \
163 AARCH64_ARCH_V8_6_FEATURES)
164 #define AARCH64_ARCH_V8_7 AARCH64_FEATURE (AARCH64_ARCH_V8_6, \
165 AARCH64_ARCH_V8_7_FEATURES)
166 #define AARCH64_ARCH_V8_8 AARCH64_FEATURE (AARCH64_ARCH_V8_7, \
167 AARCH64_ARCH_V8_8_FEATURES)
168 #define AARCH64_ARCH_V8_R (AARCH64_FEATURE (AARCH64_ARCH_V8_4, \
169 AARCH64_FEATURE_V8_R) \
170 & ~(AARCH64_FEATURE_V8_A | AARCH64_FEATURE_LOR))
172 #define AARCH64_ARCH_V9 AARCH64_FEATURE (AARCH64_ARCH_V8_5, \
173 AARCH64_ARCH_V9_FEATURES)
174 #define AARCH64_ARCH_V9_1 AARCH64_FEATURE (AARCH64_ARCH_V9, \
175 AARCH64_ARCH_V9_1_FEATURES)
176 #define AARCH64_ARCH_V9_2 AARCH64_FEATURE (AARCH64_ARCH_V9_1, \
177 AARCH64_ARCH_V9_2_FEATURES)
178 #define AARCH64_ARCH_V9_3 AARCH64_FEATURE (AARCH64_ARCH_V9_2, \
179 AARCH64_ARCH_V9_3_FEATURES)
181 #define AARCH64_ARCH_NONE AARCH64_FEATURE (0, 0)
182 #define AARCH64_ANY AARCH64_FEATURE (-1, 0) /* Any basic core. */
184 /* CPU-specific features. */
185 typedef unsigned long long aarch64_feature_set
;
187 #define AARCH64_CPU_HAS_ALL_FEATURES(CPU,FEAT) \
188 ((~(CPU) & (FEAT)) == 0)
190 #define AARCH64_CPU_HAS_ANY_FEATURES(CPU,FEAT) \
191 (((CPU) & (FEAT)) != 0)
193 #define AARCH64_CPU_HAS_FEATURE(CPU,FEAT) \
194 AARCH64_CPU_HAS_ALL_FEATURES (CPU,FEAT)
196 #define AARCH64_MERGE_FEATURE_SETS(TARG,F1,F2) \
199 (TARG) = (F1) | (F2); \
203 #define AARCH64_CLEAR_FEATURE(TARG,F1,F2) \
206 (TARG) = (F1) &~ (F2); \
210 #define AARCH64_FEATURE(core,coproc) ((core) | (coproc))
212 enum aarch64_operand_class
214 AARCH64_OPND_CLASS_NIL
,
215 AARCH64_OPND_CLASS_INT_REG
,
216 AARCH64_OPND_CLASS_MODIFIED_REG
,
217 AARCH64_OPND_CLASS_FP_REG
,
218 AARCH64_OPND_CLASS_SIMD_REG
,
219 AARCH64_OPND_CLASS_SIMD_ELEMENT
,
220 AARCH64_OPND_CLASS_SISD_REG
,
221 AARCH64_OPND_CLASS_SIMD_REGLIST
,
222 AARCH64_OPND_CLASS_SVE_REG
,
223 AARCH64_OPND_CLASS_PRED_REG
,
224 AARCH64_OPND_CLASS_ADDRESS
,
225 AARCH64_OPND_CLASS_IMMEDIATE
,
226 AARCH64_OPND_CLASS_SYSTEM
,
227 AARCH64_OPND_CLASS_COND
,
230 /* Operand code that helps both parsing and coding.
231 Keep AARCH64_OPERANDS synced. */
235 AARCH64_OPND_NIL
, /* no operand---MUST BE FIRST!*/
237 AARCH64_OPND_Rd
, /* Integer register as destination. */
238 AARCH64_OPND_Rn
, /* Integer register as source. */
239 AARCH64_OPND_Rm
, /* Integer register as source. */
240 AARCH64_OPND_Rt
, /* Integer register used in ld/st instructions. */
241 AARCH64_OPND_Rt2
, /* Integer register used in ld/st pair instructions. */
242 AARCH64_OPND_Rt_LS64
, /* Integer register used in LS64 instructions. */
243 AARCH64_OPND_Rt_SP
, /* Integer Rt or SP used in STG instructions. */
244 AARCH64_OPND_Rs
, /* Integer register used in ld/st exclusive. */
245 AARCH64_OPND_Ra
, /* Integer register used in ddp_3src instructions. */
246 AARCH64_OPND_Rt_SYS
, /* Integer register used in system instructions. */
248 AARCH64_OPND_Rd_SP
, /* Integer Rd or SP. */
249 AARCH64_OPND_Rn_SP
, /* Integer Rn or SP. */
250 AARCH64_OPND_Rm_SP
, /* Integer Rm or SP. */
251 AARCH64_OPND_PAIRREG
, /* Paired register operand. */
252 AARCH64_OPND_Rm_EXT
, /* Integer Rm extended. */
253 AARCH64_OPND_Rm_SFT
, /* Integer Rm shifted. */
255 AARCH64_OPND_Fd
, /* Floating-point Fd. */
256 AARCH64_OPND_Fn
, /* Floating-point Fn. */
257 AARCH64_OPND_Fm
, /* Floating-point Fm. */
258 AARCH64_OPND_Fa
, /* Floating-point Fa. */
259 AARCH64_OPND_Ft
, /* Floating-point Ft. */
260 AARCH64_OPND_Ft2
, /* Floating-point Ft2. */
262 AARCH64_OPND_Sd
, /* AdvSIMD Scalar Sd. */
263 AARCH64_OPND_Sn
, /* AdvSIMD Scalar Sn. */
264 AARCH64_OPND_Sm
, /* AdvSIMD Scalar Sm. */
266 AARCH64_OPND_Va
, /* AdvSIMD Vector Va. */
267 AARCH64_OPND_Vd
, /* AdvSIMD Vector Vd. */
268 AARCH64_OPND_Vn
, /* AdvSIMD Vector Vn. */
269 AARCH64_OPND_Vm
, /* AdvSIMD Vector Vm. */
270 AARCH64_OPND_VdD1
, /* AdvSIMD <Vd>.D[1]; for FMOV only. */
271 AARCH64_OPND_VnD1
, /* AdvSIMD <Vn>.D[1]; for FMOV only. */
272 AARCH64_OPND_Ed
, /* AdvSIMD Vector Element Vd. */
273 AARCH64_OPND_En
, /* AdvSIMD Vector Element Vn. */
274 AARCH64_OPND_Em
, /* AdvSIMD Vector Element Vm. */
275 AARCH64_OPND_Em16
, /* AdvSIMD Vector Element Vm restricted to V0 - V15 when
277 AARCH64_OPND_LVn
, /* AdvSIMD Vector register list used in e.g. TBL. */
278 AARCH64_OPND_LVt
, /* AdvSIMD Vector register list used in ld/st. */
279 AARCH64_OPND_LVt_AL
, /* AdvSIMD Vector register list for loading single
280 structure to all lanes. */
281 AARCH64_OPND_LEt
, /* AdvSIMD Vector Element list. */
283 AARCH64_OPND_CRn
, /* Co-processor register in CRn field. */
284 AARCH64_OPND_CRm
, /* Co-processor register in CRm field. */
286 AARCH64_OPND_IDX
, /* AdvSIMD EXT index operand. */
287 AARCH64_OPND_MASK
, /* AdvSIMD EXT index operand. */
288 AARCH64_OPND_IMM_VLSL
,/* Immediate for shifting vector registers left. */
289 AARCH64_OPND_IMM_VLSR
,/* Immediate for shifting vector registers right. */
290 AARCH64_OPND_SIMD_IMM
,/* AdvSIMD modified immediate without shift. */
291 AARCH64_OPND_SIMD_IMM_SFT
, /* AdvSIMD modified immediate with shift. */
292 AARCH64_OPND_SIMD_FPIMM
,/* AdvSIMD 8-bit fp immediate. */
293 AARCH64_OPND_SHLL_IMM
,/* Immediate shift for AdvSIMD SHLL instruction
295 AARCH64_OPND_IMM0
, /* Immediate for #0. */
296 AARCH64_OPND_FPIMM0
, /* Immediate for #0.0. */
297 AARCH64_OPND_FPIMM
, /* Floating-point Immediate. */
298 AARCH64_OPND_IMMR
, /* Immediate #<immr> in e.g. BFM. */
299 AARCH64_OPND_IMMS
, /* Immediate #<imms> in e.g. BFM. */
300 AARCH64_OPND_WIDTH
, /* Immediate #<width> in e.g. BFI. */
301 AARCH64_OPND_IMM
, /* Immediate. */
302 AARCH64_OPND_IMM_2
, /* Immediate. */
303 AARCH64_OPND_UIMM3_OP1
,/* Unsigned 3-bit immediate in the op1 field. */
304 AARCH64_OPND_UIMM3_OP2
,/* Unsigned 3-bit immediate in the op2 field. */
305 AARCH64_OPND_UIMM4
, /* Unsigned 4-bit immediate in the CRm field. */
306 AARCH64_OPND_UIMM4_ADDG
,/* Unsigned 4-bit immediate in addg/subg. */
307 AARCH64_OPND_UIMM7
, /* Unsigned 7-bit immediate in the CRm:op2 fields. */
308 AARCH64_OPND_UIMM10
, /* Unsigned 10-bit immediate in addg/subg. */
309 AARCH64_OPND_BIT_NUM
, /* Immediate. */
310 AARCH64_OPND_EXCEPTION
,/* imm16 operand in exception instructions. */
311 AARCH64_OPND_UNDEFINED
,/* imm16 operand in undefined instruction. */
312 AARCH64_OPND_CCMP_IMM
,/* Immediate in conditional compare instructions. */
313 AARCH64_OPND_SIMM5
, /* 5-bit signed immediate in the imm5 field. */
314 AARCH64_OPND_NZCV
, /* Flag bit specifier giving an alternative value for
315 each condition flag. */
317 AARCH64_OPND_LIMM
, /* Logical Immediate. */
318 AARCH64_OPND_AIMM
, /* Arithmetic immediate. */
319 AARCH64_OPND_HALF
, /* #<imm16>{, LSL #<shift>} operand in move wide. */
320 AARCH64_OPND_FBITS
, /* FP #<fbits> operand in e.g. SCVTF */
321 AARCH64_OPND_IMM_MOV
, /* Immediate operand for the MOV alias. */
322 AARCH64_OPND_IMM_ROT1
, /* Immediate rotate operand for FCMLA. */
323 AARCH64_OPND_IMM_ROT2
, /* Immediate rotate operand for indexed FCMLA. */
324 AARCH64_OPND_IMM_ROT3
, /* Immediate rotate operand for FCADD. */
326 AARCH64_OPND_COND
, /* Standard condition as the last operand. */
327 AARCH64_OPND_COND1
, /* Same as the above, but excluding AL and NV. */
329 AARCH64_OPND_ADDR_ADRP
, /* Memory address for ADRP */
330 AARCH64_OPND_ADDR_PCREL14
, /* 14-bit PC-relative address for e.g. TBZ. */
331 AARCH64_OPND_ADDR_PCREL19
, /* 19-bit PC-relative address for e.g. LDR. */
332 AARCH64_OPND_ADDR_PCREL21
, /* 21-bit PC-relative address for e.g. ADR. */
333 AARCH64_OPND_ADDR_PCREL26
, /* 26-bit PC-relative address for e.g. BL. */
335 AARCH64_OPND_ADDR_SIMPLE
, /* Address of ld/st exclusive. */
336 AARCH64_OPND_ADDR_REGOFF
, /* Address of register offset. */
337 AARCH64_OPND_ADDR_SIMM7
, /* Address of signed 7-bit immediate. */
338 AARCH64_OPND_ADDR_SIMM9
, /* Address of signed 9-bit immediate. */
339 AARCH64_OPND_ADDR_SIMM9_2
, /* Same as the above, but the immediate is
340 negative or unaligned and there is
341 no writeback allowed. This operand code
342 is only used to support the programmer-
343 friendly feature of using LDR/STR as the
344 the mnemonic name for LDUR/STUR instructions
345 wherever there is no ambiguity. */
346 AARCH64_OPND_ADDR_SIMM10
, /* Address of signed 10-bit immediate. */
347 AARCH64_OPND_ADDR_SIMM11
, /* Address with a signed 11-bit (multiple of
349 AARCH64_OPND_ADDR_UIMM12
, /* Address of unsigned 12-bit immediate. */
350 AARCH64_OPND_ADDR_SIMM13
, /* Address with a signed 13-bit (multiple of
352 AARCH64_OPND_SIMD_ADDR_SIMPLE
,/* Address of ld/st multiple structures. */
353 AARCH64_OPND_ADDR_OFFSET
, /* Address with an optional 9-bit immediate. */
354 AARCH64_OPND_SIMD_ADDR_POST
, /* Address of ld/st multiple post-indexed. */
356 AARCH64_OPND_SYSREG
, /* System register operand. */
357 AARCH64_OPND_PSTATEFIELD
, /* PSTATE field name operand. */
358 AARCH64_OPND_SYSREG_AT
, /* System register <at_op> operand. */
359 AARCH64_OPND_SYSREG_DC
, /* System register <dc_op> operand. */
360 AARCH64_OPND_SYSREG_IC
, /* System register <ic_op> operand. */
361 AARCH64_OPND_SYSREG_TLBI
, /* System register <tlbi_op> operand. */
362 AARCH64_OPND_SYSREG_SR
, /* System register RCTX operand. */
363 AARCH64_OPND_BARRIER
, /* Barrier operand. */
364 AARCH64_OPND_BARRIER_DSB_NXS
, /* Barrier operand for DSB nXS variant. */
365 AARCH64_OPND_BARRIER_ISB
, /* Barrier operand for ISB. */
366 AARCH64_OPND_PRFOP
, /* Prefetch operation. */
367 AARCH64_OPND_BARRIER_PSB
, /* Barrier operand for PSB. */
368 AARCH64_OPND_BTI_TARGET
, /* BTI {<target>}. */
369 AARCH64_OPND_SVE_ADDR_RI_S4x16
, /* SVE [<Xn|SP>, #<simm4>*16]. */
370 AARCH64_OPND_SVE_ADDR_RI_S4x32
, /* SVE [<Xn|SP>, #<simm4>*32]. */
371 AARCH64_OPND_SVE_ADDR_RI_S4xVL
, /* SVE [<Xn|SP>, #<simm4>, MUL VL]. */
372 AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
, /* SVE [<Xn|SP>, #<simm4>*2, MUL VL]. */
373 AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
, /* SVE [<Xn|SP>, #<simm4>*3, MUL VL]. */
374 AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
, /* SVE [<Xn|SP>, #<simm4>*4, MUL VL]. */
375 AARCH64_OPND_SVE_ADDR_RI_S6xVL
, /* SVE [<Xn|SP>, #<simm6>, MUL VL]. */
376 AARCH64_OPND_SVE_ADDR_RI_S9xVL
, /* SVE [<Xn|SP>, #<simm9>, MUL VL]. */
377 AARCH64_OPND_SVE_ADDR_RI_U6
, /* SVE [<Xn|SP>, #<uimm6>]. */
378 AARCH64_OPND_SVE_ADDR_RI_U6x2
, /* SVE [<Xn|SP>, #<uimm6>*2]. */
379 AARCH64_OPND_SVE_ADDR_RI_U6x4
, /* SVE [<Xn|SP>, #<uimm6>*4]. */
380 AARCH64_OPND_SVE_ADDR_RI_U6x8
, /* SVE [<Xn|SP>, #<uimm6>*8]. */
381 AARCH64_OPND_SVE_ADDR_R
, /* SVE [<Xn|SP>]. */
382 AARCH64_OPND_SVE_ADDR_RR
, /* SVE [<Xn|SP>, <Xm|XZR>]. */
383 AARCH64_OPND_SVE_ADDR_RR_LSL1
, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #1]. */
384 AARCH64_OPND_SVE_ADDR_RR_LSL2
, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #2]. */
385 AARCH64_OPND_SVE_ADDR_RR_LSL3
, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #3]. */
386 AARCH64_OPND_SVE_ADDR_RR_LSL4
, /* SVE [<Xn|SP>, <Xm|XZR>, LSL #4]. */
387 AARCH64_OPND_SVE_ADDR_RX
, /* SVE [<Xn|SP>, <Xm>]. */
388 AARCH64_OPND_SVE_ADDR_RX_LSL1
, /* SVE [<Xn|SP>, <Xm>, LSL #1]. */
389 AARCH64_OPND_SVE_ADDR_RX_LSL2
, /* SVE [<Xn|SP>, <Xm>, LSL #2]. */
390 AARCH64_OPND_SVE_ADDR_RX_LSL3
, /* SVE [<Xn|SP>, <Xm>, LSL #3]. */
391 AARCH64_OPND_SVE_ADDR_ZX
, /* SVE [Zn.<T>{, <Xm>}]. */
392 AARCH64_OPND_SVE_ADDR_RZ
, /* SVE [<Xn|SP>, Zm.D]. */
393 AARCH64_OPND_SVE_ADDR_RZ_LSL1
, /* SVE [<Xn|SP>, Zm.D, LSL #1]. */
394 AARCH64_OPND_SVE_ADDR_RZ_LSL2
, /* SVE [<Xn|SP>, Zm.D, LSL #2]. */
395 AARCH64_OPND_SVE_ADDR_RZ_LSL3
, /* SVE [<Xn|SP>, Zm.D, LSL #3]. */
396 AARCH64_OPND_SVE_ADDR_RZ_XTW_14
, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
397 Bit 14 controls S/U choice. */
398 AARCH64_OPND_SVE_ADDR_RZ_XTW_22
, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW].
399 Bit 22 controls S/U choice. */
400 AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
401 Bit 14 controls S/U choice. */
402 AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #1].
403 Bit 22 controls S/U choice. */
404 AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
405 Bit 14 controls S/U choice. */
406 AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #2].
407 Bit 22 controls S/U choice. */
408 AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
409 Bit 14 controls S/U choice. */
410 AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
, /* SVE [<Xn|SP>, Zm.<T>, (S|U)XTW #3].
411 Bit 22 controls S/U choice. */
412 AARCH64_OPND_SVE_ADDR_ZI_U5
, /* SVE [Zn.<T>, #<uimm5>]. */
413 AARCH64_OPND_SVE_ADDR_ZI_U5x2
, /* SVE [Zn.<T>, #<uimm5>*2]. */
414 AARCH64_OPND_SVE_ADDR_ZI_U5x4
, /* SVE [Zn.<T>, #<uimm5>*4]. */
415 AARCH64_OPND_SVE_ADDR_ZI_U5x8
, /* SVE [Zn.<T>, #<uimm5>*8]. */
416 AARCH64_OPND_SVE_ADDR_ZZ_LSL
, /* SVE [Zn.<T>, Zm,<T>, LSL #<msz>]. */
417 AARCH64_OPND_SVE_ADDR_ZZ_SXTW
, /* SVE [Zn.<T>, Zm,<T>, SXTW #<msz>]. */
418 AARCH64_OPND_SVE_ADDR_ZZ_UXTW
, /* SVE [Zn.<T>, Zm,<T>, UXTW #<msz>]. */
419 AARCH64_OPND_SVE_AIMM
, /* SVE unsigned arithmetic immediate. */
420 AARCH64_OPND_SVE_ASIMM
, /* SVE signed arithmetic immediate. */
421 AARCH64_OPND_SVE_FPIMM8
, /* SVE 8-bit floating-point immediate. */
422 AARCH64_OPND_SVE_I1_HALF_ONE
, /* SVE choice between 0.5 and 1.0. */
423 AARCH64_OPND_SVE_I1_HALF_TWO
, /* SVE choice between 0.5 and 2.0. */
424 AARCH64_OPND_SVE_I1_ZERO_ONE
, /* SVE choice between 0.0 and 1.0. */
425 AARCH64_OPND_SVE_IMM_ROT1
, /* SVE 1-bit rotate operand (90 or 270). */
426 AARCH64_OPND_SVE_IMM_ROT2
, /* SVE 2-bit rotate operand (N*90). */
427 AARCH64_OPND_SVE_IMM_ROT3
, /* SVE cadd 1-bit rotate (90 or 270). */
428 AARCH64_OPND_SVE_INV_LIMM
, /* SVE inverted logical immediate. */
429 AARCH64_OPND_SVE_LIMM
, /* SVE logical immediate. */
430 AARCH64_OPND_SVE_LIMM_MOV
, /* SVE logical immediate for MOV. */
431 AARCH64_OPND_SVE_PATTERN
, /* SVE vector pattern enumeration. */
432 AARCH64_OPND_SVE_PATTERN_SCALED
, /* Likewise, with additional MUL factor. */
433 AARCH64_OPND_SVE_PRFOP
, /* SVE prefetch operation. */
434 AARCH64_OPND_SVE_Pd
, /* SVE p0-p15 in Pd. */
435 AARCH64_OPND_SVE_Pg3
, /* SVE p0-p7 in Pg. */
436 AARCH64_OPND_SVE_Pg4_5
, /* SVE p0-p15 in Pg, bits [8,5]. */
437 AARCH64_OPND_SVE_Pg4_10
, /* SVE p0-p15 in Pg, bits [13,10]. */
438 AARCH64_OPND_SVE_Pg4_16
, /* SVE p0-p15 in Pg, bits [19,16]. */
439 AARCH64_OPND_SVE_Pm
, /* SVE p0-p15 in Pm. */
440 AARCH64_OPND_SVE_Pn
, /* SVE p0-p15 in Pn. */
441 AARCH64_OPND_SVE_Pt
, /* SVE p0-p15 in Pt. */
442 AARCH64_OPND_SVE_Rm
, /* Integer Rm or ZR, alt. SVE position. */
443 AARCH64_OPND_SVE_Rn_SP
, /* Integer Rn or SP, alt. SVE position. */
444 AARCH64_OPND_SVE_SHLIMM_PRED
, /* SVE shift left amount (predicated). */
445 AARCH64_OPND_SVE_SHLIMM_UNPRED
, /* SVE shift left amount (unpredicated). */
446 AARCH64_OPND_SVE_SHLIMM_UNPRED_22
, /* SVE 3 bit shift left unpred. */
447 AARCH64_OPND_SVE_SHRIMM_PRED
, /* SVE shift right amount (predicated). */
448 AARCH64_OPND_SVE_SHRIMM_UNPRED
, /* SVE shift right amount (unpredicated). */
449 AARCH64_OPND_SVE_SHRIMM_UNPRED_22
, /* SVE 3 bit shift right unpred. */
450 AARCH64_OPND_SVE_SIMM5
, /* SVE signed 5-bit immediate. */
451 AARCH64_OPND_SVE_SIMM5B
, /* SVE secondary signed 5-bit immediate. */
452 AARCH64_OPND_SVE_SIMM6
, /* SVE signed 6-bit immediate. */
453 AARCH64_OPND_SVE_SIMM8
, /* SVE signed 8-bit immediate. */
454 AARCH64_OPND_SVE_UIMM3
, /* SVE unsigned 3-bit immediate. */
455 AARCH64_OPND_SVE_UIMM7
, /* SVE unsigned 7-bit immediate. */
456 AARCH64_OPND_SVE_UIMM8
, /* SVE unsigned 8-bit immediate. */
457 AARCH64_OPND_SVE_UIMM8_53
, /* SVE split unsigned 8-bit immediate. */
458 AARCH64_OPND_SVE_VZn
, /* Scalar SIMD&FP register in Zn field. */
459 AARCH64_OPND_SVE_Vd
, /* Scalar SIMD&FP register in Vd. */
460 AARCH64_OPND_SVE_Vm
, /* Scalar SIMD&FP register in Vm. */
461 AARCH64_OPND_SVE_Vn
, /* Scalar SIMD&FP register in Vn. */
462 AARCH64_OPND_SVE_Za_5
, /* SVE vector register in Za, bits [9,5]. */
463 AARCH64_OPND_SVE_Za_16
, /* SVE vector register in Za, bits [20,16]. */
464 AARCH64_OPND_SVE_Zd
, /* SVE vector register in Zd. */
465 AARCH64_OPND_SVE_Zm_5
, /* SVE vector register in Zm, bits [9,5]. */
466 AARCH64_OPND_SVE_Zm_16
, /* SVE vector register in Zm, bits [20,16]. */
467 AARCH64_OPND_SVE_Zm3_INDEX
, /* z0-z7[0-3] in Zm, bits [20,16]. */
468 AARCH64_OPND_SVE_Zm3_22_INDEX
, /* z0-z7[0-7] in Zm3_INDEX plus bit 22. */
469 AARCH64_OPND_SVE_Zm3_11_INDEX
, /* z0-z7[0-7] in Zm3_INDEX plus bit 11. */
470 AARCH64_OPND_SVE_Zm4_11_INDEX
, /* z0-z15[0-3] in Zm plus bit 11. */
471 AARCH64_OPND_SVE_Zm4_INDEX
, /* z0-z15[0-1] in Zm, bits [20,16]. */
472 AARCH64_OPND_SVE_Zn
, /* SVE vector register in Zn. */
473 AARCH64_OPND_SVE_Zn_INDEX
, /* Indexed SVE vector register, for DUP. */
474 AARCH64_OPND_SVE_ZnxN
, /* SVE vector register list in Zn. */
475 AARCH64_OPND_SVE_Zt
, /* SVE vector register in Zt. */
476 AARCH64_OPND_SVE_ZtxN
, /* SVE vector register list in Zt. */
477 AARCH64_OPND_SME_ZAda_2b
, /* SME <ZAda>.S, 2-bits. */
478 AARCH64_OPND_SME_ZAda_3b
, /* SME <ZAda>.D, 3-bits. */
479 AARCH64_OPND_SME_ZA_HV_idx_src
, /* SME source ZA tile vector. */
480 AARCH64_OPND_SME_ZA_HV_idx_dest
, /* SME destination ZA tile vector. */
481 AARCH64_OPND_SME_Pm
, /* SME scalable predicate register, bits [15:13]. */
482 AARCH64_OPND_SME_list_of_64bit_tiles
, /* SME list of ZA tiles. */
483 AARCH64_OPND_SME_ZA_HV_idx_ldstr
, /* SME destination ZA tile vector. */
484 AARCH64_OPND_SME_ZA_array
, /* SME ZA[<Wv>{, #<imm>}]. */
485 AARCH64_OPND_SME_ADDR_RI_U4xVL
, /* SME [<Xn|SP>{, #<imm>, MUL VL}]. */
486 AARCH64_OPND_SME_SM_ZA
, /* SME {SM | ZA}. */
487 AARCH64_OPND_SME_PnT_Wm_imm
, /* SME <Pn>.<T>[<Wm>, #<imm>]. */
488 AARCH64_OPND_TME_UIMM16
, /* TME unsigned 16-bit immediate. */
489 AARCH64_OPND_SM3_IMM2
, /* SM3 encodes lane in bits [13, 14]. */
490 AARCH64_OPND_MOPS_ADDR_Rd
, /* [Rd]!, in bits [0, 4]. */
491 AARCH64_OPND_MOPS_ADDR_Rs
, /* [Rs]!, in bits [16, 20]. */
492 AARCH64_OPND_MOPS_WB_Rn
/* Rn!, in bits [5, 9]. */
495 /* Qualifier constrains an operand. It either specifies a variant of an
496 operand type or limits values available to an operand type.
498 N.B. Order is important; keep aarch64_opnd_qualifiers synced. */
500 enum aarch64_opnd_qualifier
502 /* Indicating no further qualification on an operand. */
503 AARCH64_OPND_QLF_NIL
,
505 /* Qualifying an operand which is a general purpose (integer) register;
506 indicating the operand data size or a specific register. */
507 AARCH64_OPND_QLF_W
, /* Wn, WZR or WSP. */
508 AARCH64_OPND_QLF_X
, /* Xn, XZR or XSP. */
509 AARCH64_OPND_QLF_WSP
, /* WSP. */
510 AARCH64_OPND_QLF_SP
, /* SP. */
512 /* Qualifying an operand which is a floating-point register, a SIMD
513 vector element or a SIMD vector element list; indicating operand data
514 size or the size of each SIMD vector element in the case of a SIMD
516 These qualifiers are also used to qualify an address operand to
517 indicate the size of data element a load/store instruction is
519 They are also used for the immediate shift operand in e.g. SSHR. Such
520 a use is only for the ease of operand encoding/decoding and qualifier
521 sequence matching; such a use should not be applied widely; use the value
522 constraint qualifiers for immediate operands wherever possible. */
523 AARCH64_OPND_QLF_S_B
,
524 AARCH64_OPND_QLF_S_H
,
525 AARCH64_OPND_QLF_S_S
,
526 AARCH64_OPND_QLF_S_D
,
527 AARCH64_OPND_QLF_S_Q
,
528 /* These type qualifiers have a special meaning in that they mean 4 x 1 byte
529 or 2 x 2 byte are selected by the instruction. Other than that they have
530 no difference with AARCH64_OPND_QLF_S_B in encoding. They are here purely
531 for syntactical reasons and is an exception from normal AArch64
532 disassembly scheme. */
533 AARCH64_OPND_QLF_S_4B
,
534 AARCH64_OPND_QLF_S_2H
,
536 /* Qualifying an operand which is a SIMD vector register or a SIMD vector
537 register list; indicating register shape.
538 They are also used for the immediate shift operand in e.g. SSHR. Such
539 a use is only for the ease of operand encoding/decoding and qualifier
540 sequence matching; such a use should not be applied widely; use the value
541 constraint qualifiers for immediate operands wherever possible. */
542 AARCH64_OPND_QLF_V_4B
,
543 AARCH64_OPND_QLF_V_8B
,
544 AARCH64_OPND_QLF_V_16B
,
545 AARCH64_OPND_QLF_V_2H
,
546 AARCH64_OPND_QLF_V_4H
,
547 AARCH64_OPND_QLF_V_8H
,
548 AARCH64_OPND_QLF_V_2S
,
549 AARCH64_OPND_QLF_V_4S
,
550 AARCH64_OPND_QLF_V_1D
,
551 AARCH64_OPND_QLF_V_2D
,
552 AARCH64_OPND_QLF_V_1Q
,
554 AARCH64_OPND_QLF_P_Z
,
555 AARCH64_OPND_QLF_P_M
,
557 /* Used in scaled signed immediate that are scaled by a Tag granule
558 like in stg, st2g, etc. */
559 AARCH64_OPND_QLF_imm_tag
,
561 /* Constraint on value. */
562 AARCH64_OPND_QLF_CR
, /* CRn, CRm. */
563 AARCH64_OPND_QLF_imm_0_7
,
564 AARCH64_OPND_QLF_imm_0_15
,
565 AARCH64_OPND_QLF_imm_0_31
,
566 AARCH64_OPND_QLF_imm_0_63
,
567 AARCH64_OPND_QLF_imm_1_32
,
568 AARCH64_OPND_QLF_imm_1_64
,
570 /* Indicate whether an AdvSIMD modified immediate operand is shift-zeros
572 AARCH64_OPND_QLF_LSL
,
573 AARCH64_OPND_QLF_MSL
,
575 /* Special qualifier helping retrieve qualifier information during the
576 decoding time (currently not in use). */
577 AARCH64_OPND_QLF_RETRIEVE
,
580 /* Instruction class. */
582 enum aarch64_insn_class
638 ldst_imm9
, /* immpost or immpre */
639 ldst_imm10
, /* LDRAA/LDRAB */
686 /* Opcode enumerators. */
730 OP_MOV_IMM_LOG
, /* MOV alias for moving bitmask immediate. */
731 OP_MOV_IMM_WIDE
, /* MOV alias for moving wide immediate. */
732 OP_MOV_IMM_WIDEN
, /* MOV alias for moving wide immediate (negated). */
734 OP_MOV_V
, /* MOV alias for moving vector register. */
747 OP_BFC
, /* ARMv8.2. */
764 OP_FCVTXN_S
, /* Scalar version. */
785 OP_FCMLA_ELEM
, /* ARMv8.3, indexed element version. */
787 OP_TOTAL_NUM
, /* Pseudo. */
801 /* Maximum number of operands an instruction can have. */
802 #define AARCH64_MAX_OPND_NUM 6
803 /* Maximum number of qualifier sequences an instruction can have. */
804 #define AARCH64_MAX_QLF_SEQ_NUM 10
805 /* Operand qualifier typedef; optimized for the size. */
806 typedef unsigned char aarch64_opnd_qualifier_t
;
807 /* Operand qualifier sequence typedef. */
808 typedef aarch64_opnd_qualifier_t \
809 aarch64_opnd_qualifier_seq_t
[AARCH64_MAX_OPND_NUM
];
811 /* FIXME: improve the efficiency. */
813 empty_qualifier_sequence_p (const aarch64_opnd_qualifier_t
*qualifiers
)
816 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
817 if (qualifiers
[i
] != AARCH64_OPND_QLF_NIL
)
822 /* Forward declare error reporting type. */
823 typedef struct aarch64_operand_error aarch64_operand_error
;
824 /* Forward declare instruction sequence type. */
825 typedef struct aarch64_instr_sequence aarch64_instr_sequence
;
826 /* Forward declare instruction definition. */
827 typedef struct aarch64_inst aarch64_inst
;
829 /* This structure holds information for a particular opcode. */
831 struct aarch64_opcode
833 /* The name of the mnemonic. */
836 /* The opcode itself. Those bits which will be filled in with
837 operands are zeroes. */
840 /* The opcode mask. This is used by the disassembler. This is a
841 mask containing ones indicating those bits which must match the
842 opcode field, and zeroes indicating those bits which need not
843 match (and are presumably filled in by operands). */
846 /* Instruction class. */
847 enum aarch64_insn_class iclass
;
849 /* Enumerator identifier. */
852 /* Which architecture variant provides this instruction. */
853 const aarch64_feature_set
*avariant
;
855 /* An array of operand codes. Each code is an index into the
856 operand table. They appear in the order which the operands must
857 appear in assembly code, and are terminated by a zero. */
858 enum aarch64_opnd operands
[AARCH64_MAX_OPND_NUM
];
860 /* A list of operand qualifier code sequence. Each operand qualifier
861 code qualifies the corresponding operand code. Each operand
862 qualifier sequence specifies a valid opcode variant and related
863 constraint on operands. */
864 aarch64_opnd_qualifier_seq_t qualifiers_list
[AARCH64_MAX_QLF_SEQ_NUM
];
866 /* Flags providing information about this instruction */
869 /* Extra constraints on the instruction that the verifier checks. */
870 uint32_t constraints
;
872 /* If nonzero, this operand and operand 0 are both registers and
873 are required to have the same register number. */
874 unsigned char tied_operand
;
876 /* If non-NULL, a function to verify that a given instruction is valid. */
877 enum err_type (* verifier
) (const struct aarch64_inst
*, const aarch64_insn
,
878 bfd_vma
, bool, aarch64_operand_error
*,
879 struct aarch64_instr_sequence
*);
882 typedef struct aarch64_opcode aarch64_opcode
;
884 /* Table describing all the AArch64 opcodes. */
885 extern const aarch64_opcode aarch64_opcode_table
[];
888 #define F_ALIAS (1 << 0)
889 #define F_HAS_ALIAS (1 << 1)
890 /* Disassembly preference priority 1-3 (the larger the higher). If nothing
891 is specified, it is the priority 0 by default, i.e. the lowest priority. */
892 #define F_P1 (1 << 2)
893 #define F_P2 (2 << 2)
894 #define F_P3 (3 << 2)
895 /* Flag an instruction that is truly conditional executed, e.g. b.cond. */
896 #define F_COND (1 << 4)
897 /* Instruction has the field of 'sf'. */
898 #define F_SF (1 << 5)
899 /* Instruction has the field of 'size:Q'. */
900 #define F_SIZEQ (1 << 6)
901 /* Floating-point instruction has the field of 'type'. */
902 #define F_FPTYPE (1 << 7)
903 /* AdvSIMD scalar instruction has the field of 'size'. */
904 #define F_SSIZE (1 << 8)
905 /* AdvSIMD vector register arrangement specifier encoded in "imm5<3:0>:Q". */
907 /* Size of GPR operand in AdvSIMD instructions encoded in Q. */
908 #define F_GPRSIZE_IN_Q (1 << 10)
909 /* Size of Rt load signed instruction encoded in opc[0], i.e. bit 22. */
910 #define F_LDS_SIZE (1 << 11)
911 /* Optional operand; assume maximum of 1 operand can be optional. */
912 #define F_OPD0_OPT (1 << 12)
913 #define F_OPD1_OPT (2 << 12)
914 #define F_OPD2_OPT (3 << 12)
915 #define F_OPD3_OPT (4 << 12)
916 #define F_OPD4_OPT (5 << 12)
917 /* Default value for the optional operand when omitted from the assembly. */
918 #define F_DEFAULT(X) (((X) & 0x1f) << 15)
919 /* Instruction that is an alias of another instruction needs to be
920 encoded/decoded by converting it to/from the real form, followed by
921 the encoding/decoding according to the rules of the real opcode.
922 This compares to the direct coding using the alias's information.
923 N.B. this flag requires F_ALIAS to be used together. */
924 #define F_CONV (1 << 20)
925 /* Use together with F_ALIAS to indicate an alias opcode is a programmer
926 friendly pseudo instruction available only in the assembly code (thus will
927 not show up in the disassembly). */
928 #define F_PSEUDO (1 << 21)
929 /* Instruction has miscellaneous encoding/decoding rules. */
930 #define F_MISC (1 << 22)
931 /* Instruction has the field of 'N'; used in conjunction with F_SF. */
932 #define F_N (1 << 23)
933 /* Opcode dependent field. */
934 #define F_OD(X) (((X) & 0x7) << 24)
935 /* Instruction has the field of 'sz'. */
936 #define F_LSE_SZ (1 << 27)
937 /* Require an exact qualifier match, even for NIL qualifiers. */
938 #define F_STRICT (1ULL << 28)
939 /* This system instruction is used to read system registers. */
940 #define F_SYS_READ (1ULL << 29)
941 /* This system instruction is used to write system registers. */
942 #define F_SYS_WRITE (1ULL << 30)
943 /* This instruction has an extra constraint on it that imposes a requirement on
944 subsequent instructions. */
945 #define F_SCAN (1ULL << 31)
946 /* Next bit is 32. */
948 /* Instruction constraints. */
949 /* This instruction has a predication constraint on the instruction at PC+4. */
950 #define C_SCAN_MOVPRFX (1U << 0)
951 /* This instruction's operation width is determined by the operand with the
952 largest element size. */
953 #define C_MAX_ELEM (1U << 1)
954 #define C_SCAN_MOPS_P (1U << 2)
955 #define C_SCAN_MOPS_M (2U << 2)
956 #define C_SCAN_MOPS_E (3U << 2)
957 #define C_SCAN_MOPS_PME (3U << 2)
961 alias_opcode_p (const aarch64_opcode
*opcode
)
963 return (opcode
->flags
& F_ALIAS
) != 0;
967 opcode_has_alias (const aarch64_opcode
*opcode
)
969 return (opcode
->flags
& F_HAS_ALIAS
) != 0;
972 /* Priority for disassembling preference. */
974 opcode_priority (const aarch64_opcode
*opcode
)
976 return (opcode
->flags
>> 2) & 0x3;
980 pseudo_opcode_p (const aarch64_opcode
*opcode
)
982 return (opcode
->flags
& F_PSEUDO
) != 0lu;
986 optional_operand_p (const aarch64_opcode
*opcode
, unsigned int idx
)
988 return ((opcode
->flags
>> 12) & 0x7) == idx
+ 1;
991 static inline aarch64_insn
992 get_optional_operand_default_value (const aarch64_opcode
*opcode
)
994 return (opcode
->flags
>> 15) & 0x1f;
997 static inline unsigned int
998 get_opcode_dependent_value (const aarch64_opcode
*opcode
)
1000 return (opcode
->flags
>> 24) & 0x7;
1004 opcode_has_special_coder (const aarch64_opcode
*opcode
)
1006 return (opcode
->flags
& (F_SF
| F_LSE_SZ
| F_SIZEQ
| F_FPTYPE
| F_SSIZE
| F_T
1007 | F_GPRSIZE_IN_Q
| F_LDS_SIZE
| F_MISC
| F_N
| F_COND
)) != 0;
1010 struct aarch64_name_value_pair
1016 extern const struct aarch64_name_value_pair aarch64_operand_modifiers
[];
1017 extern const struct aarch64_name_value_pair aarch64_barrier_options
[16];
1018 extern const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options
[4];
1019 extern const struct aarch64_name_value_pair aarch64_prfops
[32];
1020 extern const struct aarch64_name_value_pair aarch64_hint_options
[];
1022 #define AARCH64_MAX_SYSREG_NAME_LEN 32
1030 /* A set of features, all of which are required for this system register to be
1032 aarch64_feature_set features
;
1035 extern const aarch64_sys_reg aarch64_sys_regs
[];
1036 extern const aarch64_sys_reg aarch64_pstatefields
[];
1037 extern bool aarch64_sys_reg_deprecated_p (const uint32_t);
1038 extern bool aarch64_pstatefield_supported_p (const aarch64_feature_set
,
1039 const aarch64_sys_reg
*);
1046 } aarch64_sys_ins_reg
;
1048 extern bool aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*);
1050 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set
,
1051 const char *reg_name
, aarch64_insn
,
1052 uint32_t, aarch64_feature_set
);
1054 extern const aarch64_sys_ins_reg aarch64_sys_regs_ic
[];
1055 extern const aarch64_sys_ins_reg aarch64_sys_regs_dc
[];
1056 extern const aarch64_sys_ins_reg aarch64_sys_regs_at
[];
1057 extern const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[];
1058 extern const aarch64_sys_ins_reg aarch64_sys_regs_sr
[];
1060 /* Shift/extending operator kinds.
1061 N.B. order is important; keep aarch64_operand_modifiers synced. */
1062 enum aarch64_modifier_kind
1083 aarch64_extend_operator_p (enum aarch64_modifier_kind
);
1085 enum aarch64_modifier_kind
1086 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*);
1091 /* A list of names with the first one as the disassembly preference;
1092 terminated by NULL if fewer than 3. */
1093 const char *names
[4];
1097 extern const aarch64_cond aarch64_conds
[16];
1099 const aarch64_cond
* get_cond_from_value (aarch64_insn value
);
1100 const aarch64_cond
* get_inverted_cond (const aarch64_cond
*cond
);
1102 /* Structure representing an operand. */
1104 struct aarch64_opnd_info
1106 enum aarch64_opnd type
;
1107 aarch64_opnd_qualifier_t qualifier
;
1124 unsigned first_regno
: 5;
1125 unsigned num_regs
: 3;
1126 /* 1 if it is a list of reg element. */
1127 unsigned has_index
: 1;
1128 /* Lane index; valid only when has_index is 1. */
1131 /* e.g. immediate or pc relative address offset. */
1137 /* e.g. address in STR (register offset). */
1140 unsigned base_regno
;
1150 unsigned pcrel
: 1; /* PC-relative. */
1151 unsigned writeback
: 1;
1152 unsigned preind
: 1; /* Pre-indexed. */
1153 unsigned postind
: 1; /* Post-indexed. */
1158 /* The encoding of the system register. */
1161 /* The system register flags. */
1165 /* ZA tile vector, e.g. <ZAn><HV>.D[<Wv>{, <imm>}] */
1168 int regno
; /* <ZAn> */
1171 int regno
; /* <Wv> */
1172 int imm
; /* <imm> */
1174 unsigned v
: 1; /* <HV> horizontal or vertical vector indicator. */
1177 const aarch64_cond
*cond
;
1178 /* The encoding of the PSTATE field. */
1179 aarch64_insn pstatefield
;
1180 const aarch64_sys_ins_reg
*sysins_op
;
1181 const struct aarch64_name_value_pair
*barrier
;
1182 const struct aarch64_name_value_pair
*hint_option
;
1183 const struct aarch64_name_value_pair
*prfop
;
1186 /* Operand shifter; in use when the operand is a register offset address,
1187 add/sub extended reg, etc. e.g. <R><m>{, <extend> {#<amount>}}. */
1190 enum aarch64_modifier_kind kind
;
1191 unsigned operator_present
: 1; /* Only valid during encoding. */
1192 /* Value of the 'S' field in ld/st reg offset; used only in decoding. */
1193 unsigned amount_present
: 1;
1197 unsigned skip
:1; /* Operand is not completed if there is a fixup needed
1198 to be done on it. In some (but not all) of these
1199 cases, we need to tell libopcodes to skip the
1200 constraint checking and the encoding for this
1201 operand, so that the libopcodes can pick up the
1202 right opcode before the operand is fixed-up. This
1203 flag should only be used during the
1204 assembling/encoding. */
1205 unsigned present
:1; /* Whether this operand is present in the assembly
1206 line; not used during the disassembly. */
1209 typedef struct aarch64_opnd_info aarch64_opnd_info
;
1211 /* Structure representing an instruction.
1213 It is used during both the assembling and disassembling. The assembler
1214 fills an aarch64_inst after a successful parsing and then passes it to the
1215 encoding routine to do the encoding. During the disassembling, the
1216 disassembler calls the decoding routine to decode a binary instruction; on a
1217 successful return, such a structure will be filled with information of the
1218 instruction; then the disassembler uses the information to print out the
1223 /* The value of the binary instruction. */
1226 /* Corresponding opcode entry. */
1227 const aarch64_opcode
*opcode
;
1229 /* Condition for a truly conditional-executed instrutions, e.g. b.cond. */
1230 const aarch64_cond
*cond
;
1232 /* Operands information. */
1233 aarch64_opnd_info operands
[AARCH64_MAX_OPND_NUM
];
1236 /* Defining the HINT #imm values for the aarch64_hint_options. */
1237 #define HINT_OPD_CSYNC 0x11
1238 #define HINT_OPD_C 0x22
1239 #define HINT_OPD_J 0x24
1240 #define HINT_OPD_JC 0x26
1241 #define HINT_OPD_NULL 0x00
1244 /* Diagnosis related declaration and interface. */
1246 /* Operand error kind enumerators.
1248 AARCH64_OPDE_RECOVERABLE
1249 Less severe error found during the parsing, very possibly because that
1250 GAS has picked up a wrong instruction template for the parsing.
1252 AARCH64_OPDE_A_SHOULD_FOLLOW_B
1253 The instruction forms (or is expected to form) part of a sequence,
1254 but the preceding instruction in the sequence wasn't the expected one.
1255 The message refers to two strings: the name of the current instruction,
1256 followed by the name of the expected preceding instruction.
1258 AARCH64_OPDE_EXPECTED_A_AFTER_B
1259 Same as AARCH64_OPDE_A_SHOULD_FOLLOW_B, but shifting the focus
1260 so that the current instruction is assumed to be the incorrect one:
1261 "since the previous instruction was B, the current one should be A".
1263 AARCH64_OPDE_SYNTAX_ERROR
1264 General syntax error; it can be either a user error, or simply because
1265 that GAS is trying a wrong instruction template.
1267 AARCH64_OPDE_FATAL_SYNTAX_ERROR
1268 Definitely a user syntax error.
1270 AARCH64_OPDE_INVALID_VARIANT
1271 No syntax error, but the operands are not a valid combination, e.g.
1274 AARCH64_OPDE_UNTIED_IMMS
1275 The asm failed to use the same immediate for a destination operand
1276 and a tied source operand.
1278 AARCH64_OPDE_UNTIED_OPERAND
1279 The asm failed to use the same register for a destination operand
1280 and a tied source operand.
1282 AARCH64_OPDE_OUT_OF_RANGE
1283 Error about some immediate value out of a valid range.
1285 AARCH64_OPDE_UNALIGNED
1286 Error about some immediate value not properly aligned (i.e. not being a
1287 multiple times of a certain value).
1289 AARCH64_OPDE_REG_LIST
1290 Error about the register list operand having unexpected number of
1293 AARCH64_OPDE_OTHER_ERROR
1294 Error of the highest severity and used for any severe issue that does not
1295 fall into any of the above categories.
1297 AARCH64_OPDE_RECOVERABLE, AARCH64_OPDE_SYNTAX_ERROR and
1298 AARCH64_OPDE_FATAL_SYNTAX_ERROR are only deteced by GAS while the
1299 AARCH64_OPDE_INVALID_VARIANT error can only be spotted by libopcodes as
1300 only libopcodes has the information about the valid variants of each
1303 The enumerators have an increasing severity. This is helpful when there are
1304 multiple instruction templates available for a given mnemonic name (e.g.
1305 FMOV); this mechanism will help choose the most suitable template from which
1306 the generated diagnostics can most closely describe the issues, if any. */
1308 enum aarch64_operand_error_kind
1311 AARCH64_OPDE_RECOVERABLE
,
1312 AARCH64_OPDE_A_SHOULD_FOLLOW_B
,
1313 AARCH64_OPDE_EXPECTED_A_AFTER_B
,
1314 AARCH64_OPDE_SYNTAX_ERROR
,
1315 AARCH64_OPDE_FATAL_SYNTAX_ERROR
,
1316 AARCH64_OPDE_INVALID_VARIANT
,
1317 AARCH64_OPDE_UNTIED_IMMS
,
1318 AARCH64_OPDE_UNTIED_OPERAND
,
1319 AARCH64_OPDE_OUT_OF_RANGE
,
1320 AARCH64_OPDE_UNALIGNED
,
1321 AARCH64_OPDE_REG_LIST
,
1322 AARCH64_OPDE_OTHER_ERROR
1325 /* N.B. GAS assumes that this structure work well with shallow copy. */
1326 struct aarch64_operand_error
1328 enum aarch64_operand_error_kind kind
;
1331 /* Some data for extra information. */
1339 /* AArch64 sequence structure used to track instructions with F_SCAN
1340 dependencies for both assembler and disassembler. */
1341 struct aarch64_instr_sequence
1343 /* The instructions in the sequence, starting with the one that
1344 caused it to be opened. */
1345 aarch64_inst
*instr
;
1346 /* The number of instructions already in the sequence. */
1347 int num_added_insns
;
1348 /* The number of instructions allocated to the sequence. */
1349 int num_allocated_insns
;
1352 /* Encoding entrypoint. */
1355 aarch64_opcode_encode (const aarch64_opcode
*, const aarch64_inst
*,
1356 aarch64_insn
*, aarch64_opnd_qualifier_t
*,
1357 aarch64_operand_error
*, aarch64_instr_sequence
*);
1359 extern const aarch64_opcode
*
1360 aarch64_replace_opcode (struct aarch64_inst
*,
1361 const aarch64_opcode
*);
1363 /* Given the opcode enumerator OP, return the pointer to the corresponding
1366 extern const aarch64_opcode
*
1367 aarch64_get_opcode (enum aarch64_op
);
1369 /* Generate the string representation of an operand. */
1371 aarch64_print_operand (char *, size_t, bfd_vma
, const aarch64_opcode
*,
1372 const aarch64_opnd_info
*, int, int *, bfd_vma
*,
1374 aarch64_feature_set features
);
1376 /* Miscellaneous interface. */
1379 aarch64_operand_index (const enum aarch64_opnd
*, enum aarch64_opnd
);
1381 extern aarch64_opnd_qualifier_t
1382 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*, int,
1383 const aarch64_opnd_qualifier_t
, int);
1386 aarch64_is_destructive_by_operands (const aarch64_opcode
*);
1389 aarch64_num_of_operands (const aarch64_opcode
*);
1392 aarch64_stack_pointer_p (const aarch64_opnd_info
*);
1395 aarch64_zero_register_p (const aarch64_opnd_info
*);
1397 extern enum err_type
1398 aarch64_decode_insn (aarch64_insn
, aarch64_inst
*, bool,
1399 aarch64_operand_error
*);
1402 init_insn_sequence (const struct aarch64_inst
*, aarch64_instr_sequence
*);
1404 /* Given an operand qualifier, return the expected data element size
1405 of a qualified operand. */
1406 extern unsigned char
1407 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t
);
1409 extern enum aarch64_operand_class
1410 aarch64_get_operand_class (enum aarch64_opnd
);
1413 aarch64_get_operand_name (enum aarch64_opnd
);
1416 aarch64_get_operand_desc (enum aarch64_opnd
);
1419 aarch64_sve_dupm_mov_immediate_p (uint64_t, int);
1421 #ifdef DEBUG_AARCH64
1422 extern int debug_dump
;
1425 aarch64_verbose (const char *, ...) __attribute__ ((format (printf
, 1, 2)));
1427 #define DEBUG_TRACE(M, ...) \
1430 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
1433 #define DEBUG_TRACE_IF(C, M, ...) \
1435 if (debug_dump && (C)) \
1436 aarch64_verbose ("%s: " M ".", __func__, ##__VA_ARGS__); \
1438 #else /* !DEBUG_AARCH64 */
1439 #define DEBUG_TRACE(M, ...) ;
1440 #define DEBUG_TRACE_IF(C, M, ...) ;
1441 #endif /* DEBUG_AARCH64 */
1443 extern const char *const aarch64_sve_pattern_array
[32];
1444 extern const char *const aarch64_sve_prfop_array
[16];
1450 #endif /* OPCODE_AARCH64_H */