1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
40 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
43 const aarch64_field
*field
;
44 enum aarch64_field_kind kind
;
48 num
= va_arg (va
, uint32_t);
52 kind
= va_arg (va
, enum aarch64_field_kind
);
53 field
= &fields
[kind
];
54 insert_field (kind
, code
, value
, mask
);
55 value
>>= field
->width
;
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61 The least significant bit goes in the final field. */
64 insert_all_fields_after (const aarch64_operand
*self
, unsigned int start
,
65 aarch64_insn
*code
, aarch64_insn value
)
68 enum aarch64_field_kind kind
;
70 for (i
= ARRAY_SIZE (self
->fields
); i
-- > start
; )
71 if (self
->fields
[i
] != FLD_NIL
)
73 kind
= self
->fields
[i
];
74 insert_field (kind
, code
, value
, 0);
75 value
>>= fields
[kind
].width
;
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80 The least significant bit goes in the final field. */
83 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
86 return insert_all_fields_after (self
, 0, code
, value
);
89 /* Operand inserters. */
93 aarch64_ins_none (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
94 const aarch64_opnd_info
*info ATTRIBUTE_UNUSED
,
95 aarch64_insn
*code ATTRIBUTE_UNUSED
,
96 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
97 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
102 /* Insert register number. */
104 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
106 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
107 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
109 int val
= info
->reg
.regno
- get_operand_specific_data (self
);
110 insert_field (self
->fields
[0], code
, val
, 0);
114 /* Insert register number, index and/or other data for SIMD register element
115 operand, e.g. the last source operand in
116 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
118 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
119 aarch64_insn
*code
, const aarch64_inst
*inst
,
120 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
123 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
124 /* index and/or type */
125 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
127 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
128 if (info
->type
== AARCH64_OPND_En
129 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
131 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
132 assert (info
->idx
== 1); /* Vn */
133 aarch64_insn value
= info
->reglane
.index
<< pos
;
134 insert_field (FLD_imm4_11
, code
, value
, 0);
138 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
145 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
146 insert_field (FLD_imm5
, code
, value
, 0);
149 else if (inst
->opcode
->iclass
== dotproduct
)
151 unsigned reglane_index
= info
->reglane
.index
;
152 switch (info
->qualifier
)
154 case AARCH64_OPND_QLF_S_4B
:
155 case AARCH64_OPND_QLF_S_2H
:
157 assert (reglane_index
< 4);
158 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
164 else if (inst
->opcode
->iclass
== cryptosm3
)
166 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
167 unsigned reglane_index
= info
->reglane
.index
;
168 assert (reglane_index
< 4);
169 insert_field (FLD_SM3_imm2
, code
, reglane_index
, 0);
173 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
174 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
175 unsigned reglane_index
= info
->reglane
.index
;
177 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
178 /* Complex operand takes two elements. */
181 switch (info
->qualifier
)
183 case AARCH64_OPND_QLF_S_H
:
185 assert (reglane_index
< 8);
186 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
188 case AARCH64_OPND_QLF_S_S
:
190 assert (reglane_index
< 4);
191 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
193 case AARCH64_OPND_QLF_S_D
:
195 assert (reglane_index
< 2);
196 insert_field (FLD_H
, code
, reglane_index
, 0);
205 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
207 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
209 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
210 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
213 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
215 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
219 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
220 in AdvSIMD load/store instructions. */
222 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
223 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
224 const aarch64_inst
*inst
,
225 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
227 aarch64_insn value
= 0;
228 /* Number of elements in each structure to be loaded/stored. */
229 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
232 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
237 switch (info
->reglist
.num_regs
)
239 case 1: value
= 0x7; break;
240 case 2: value
= 0xa; break;
241 case 3: value
= 0x6; break;
242 case 4: value
= 0x2; break;
243 default: return false;
247 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
258 insert_field (FLD_opcode
, code
, value
, 0);
263 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
264 single structure to all lanes instructions. */
266 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
267 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
268 const aarch64_inst
*inst
,
269 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
272 /* The opcode dependent area stores the number of elements in
273 each structure to be loaded/stored. */
274 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
277 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
279 value
= (aarch64_insn
) 0;
280 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
281 /* OP_LD1R does not have alternating variant, but have "two consecutive"
283 value
= (aarch64_insn
) 1;
284 insert_field (FLD_S
, code
, value
, 0);
289 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
290 operand e.g. Vt in AdvSIMD load/store single element instructions. */
292 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
293 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
294 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
295 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
297 aarch64_field field
= {0, 0};
298 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
299 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
301 assert (info
->reglist
.has_index
);
304 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
305 /* Encode the index, opcode<2:1> and size. */
306 switch (info
->qualifier
)
308 case AARCH64_OPND_QLF_S_B
:
309 /* Index encoded in "Q:S:size". */
310 QSsize
= info
->reglist
.index
;
313 case AARCH64_OPND_QLF_S_H
:
314 /* Index encoded in "Q:S:size<1>". */
315 QSsize
= info
->reglist
.index
<< 1;
318 case AARCH64_OPND_QLF_S_S
:
319 /* Index encoded in "Q:S". */
320 QSsize
= info
->reglist
.index
<< 2;
323 case AARCH64_OPND_QLF_S_D
:
324 /* Index encoded in "Q". */
325 QSsize
= info
->reglist
.index
<< 3 | 0x1;
331 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
332 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
333 insert_field_2 (&field
, code
, opcodeh2
, 0);
338 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
339 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
340 or SSHR <V><d>, <V><n>, #<shift>. */
342 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
343 const aarch64_opnd_info
*info
,
344 aarch64_insn
*code
, const aarch64_inst
*inst
,
345 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
347 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
350 if (inst
->opcode
->iclass
== asimdshf
)
354 0000 x SEE AdvSIMD modified immediate
363 Q
= (val
& 0x1) ? 1 : 0;
364 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
368 assert (info
->type
== AARCH64_OPND_IMM_VLSR
369 || info
->type
== AARCH64_OPND_IMM_VLSL
);
371 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
374 0000 SEE AdvSIMD modified immediate
375 0001 (16-UInt(immh:immb))
376 001x (32-UInt(immh:immb))
377 01xx (64-UInt(immh:immb))
378 1xxx (128-UInt(immh:immb)) */
379 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
383 0000 SEE AdvSIMD modified immediate
384 0001 (UInt(immh:immb)-8)
385 001x (UInt(immh:immb)-16)
386 01xx (UInt(immh:immb)-32)
387 1xxx (UInt(immh:immb)-64) */
388 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
389 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
394 /* Insert fields for e.g. the immediate operands in
395 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
397 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
399 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
400 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
404 imm
= info
->imm
.value
;
405 if (operand_need_shift_by_two (self
))
407 if (operand_need_shift_by_three (self
))
409 if (operand_need_shift_by_four (self
))
411 insert_all_fields (self
, code
, imm
);
415 /* Insert immediate and its shift amount for e.g. the last operand in
416 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
418 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
419 aarch64_insn
*code
, const aarch64_inst
*inst
,
420 aarch64_operand_error
*errors
)
423 aarch64_ins_imm (self
, info
, code
, inst
, errors
);
425 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
429 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
430 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
432 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
433 const aarch64_opnd_info
*info
,
435 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
436 aarch64_operand_error
*errors
439 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
440 uint64_t imm
= info
->imm
.value
;
441 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
442 int amount
= info
->shifter
.amount
;
443 aarch64_field field
= {0, 0};
445 /* a:b:c:d:e:f:g:h */
446 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
448 /* Either MOVI <Dd>, #<imm>
449 or MOVI <Vd>.2D, #<imm>.
450 <imm> is a 64-bit immediate
451 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
452 encoded in "a:b:c:d:e:f:g:h". */
453 imm
= aarch64_shrink_expanded_imm8 (imm
);
454 assert ((int)imm
>= 0);
456 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
458 if (kind
== AARCH64_MOD_NONE
)
461 /* shift amount partially in cmode */
462 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
463 if (kind
== AARCH64_MOD_LSL
)
465 /* AARCH64_MOD_LSL: shift zeros. */
466 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
467 assert (esize
== 4 || esize
== 2 || esize
== 1);
468 /* For 8-bit move immediate, the optional LSL #0 does not require
474 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
476 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
480 /* AARCH64_MOD_MSL: shift ones. */
482 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
484 insert_field_2 (&field
, code
, amount
, 0);
489 /* Insert fields for an 8-bit floating-point immediate. */
491 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
493 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
494 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
496 insert_all_fields (self
, code
, info
->imm
.value
);
500 /* Insert 1-bit rotation immediate (#90 or #270). */
502 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
503 const aarch64_opnd_info
*info
,
504 aarch64_insn
*code
, const aarch64_inst
*inst
,
505 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
507 uint64_t rot
= (info
->imm
.value
- 90) / 180;
509 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
513 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
515 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
516 const aarch64_opnd_info
*info
,
517 aarch64_insn
*code
, const aarch64_inst
*inst
,
518 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
520 uint64_t rot
= info
->imm
.value
/ 90;
522 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
526 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
527 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
529 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
531 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
532 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
534 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
538 /* Insert arithmetic immediate for e.g. the last operand in
539 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
541 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
542 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
543 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
546 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
547 insert_field (self
->fields
[0], code
, value
, 0);
548 /* imm12 (unsigned) */
549 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
553 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
554 the operand should be inverted before encoding. */
556 aarch64_ins_limm_1 (const aarch64_operand
*self
,
557 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
558 const aarch64_inst
*inst
, bool invert_p
,
559 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
563 uint64_t imm
= info
->imm
.value
;
564 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
568 /* The constraint check should guarantee that this will work. */
569 res
= aarch64_logical_immediate_p (imm
, esize
, &value
);
571 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
576 /* Insert logical/bitmask immediate for e.g. the last operand in
577 ORR <Wd|WSP>, <Wn>, #<imm>. */
579 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
580 aarch64_insn
*code
, const aarch64_inst
*inst
,
581 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
583 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
584 inst
->opcode
->op
== OP_BIC
, errors
);
587 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
589 aarch64_ins_inv_limm (const aarch64_operand
*self
,
590 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
591 const aarch64_inst
*inst
,
592 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
594 return aarch64_ins_limm_1 (self
, info
, code
, inst
, true, errors
);
597 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
598 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
600 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
601 aarch64_insn
*code
, const aarch64_inst
*inst
,
602 aarch64_operand_error
*errors
)
604 aarch64_insn value
= 0;
606 assert (info
->idx
== 0);
609 aarch64_ins_regno (self
, info
, code
, inst
, errors
);
610 if (inst
->opcode
->iclass
== ldstpair_indexed
611 || inst
->opcode
->iclass
== ldstnapair_offs
612 || inst
->opcode
->iclass
== ldstpair_off
613 || inst
->opcode
->iclass
== loadlit
)
616 switch (info
->qualifier
)
618 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
619 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
620 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
621 default: return false;
623 insert_field (FLD_ldst_size
, code
, value
, 0);
628 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
629 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
635 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
637 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
638 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
639 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
640 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
643 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
647 /* Encode the address operand for e.g.
648 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
650 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
651 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
652 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
653 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
656 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
659 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
661 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
663 if (kind
== AARCH64_MOD_LSL
)
664 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
665 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
667 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
668 S
= info
->shifter
.amount
!= 0;
670 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
674 Must be #0 if <extend> is explicitly LSL. */
675 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
676 insert_field (FLD_S
, code
, S
, 0);
681 /* Encode the address operand for e.g.
682 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
684 aarch64_ins_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
685 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
686 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
687 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
690 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
693 int imm
= info
->addr
.offset
.imm
;
694 insert_field (self
->fields
[1], code
, imm
, 0);
697 if (info
->addr
.writeback
)
699 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
700 insert_field (self
->fields
[2], code
, 1, 0);
705 /* Encode the address operand for e.g.
706 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
708 aarch64_ins_rcpc3_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
709 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
710 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
711 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
714 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
717 int imm
= info
->addr
.offset
.imm
;
718 insert_field (self
->fields
[1], code
, imm
, 0);
723 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
725 aarch64_ins_addr_simm (const aarch64_operand
*self
,
726 const aarch64_opnd_info
*info
,
728 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
729 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
734 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
735 /* simm (imm9 or imm7) */
736 imm
= info
->addr
.offset
.imm
;
737 if (self
->fields
[0] == FLD_imm7
738 || info
->qualifier
== AARCH64_OPND_QLF_imm_tag
)
739 /* scaled immediate in ld/st pair instructions.. */
740 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
741 insert_field (self
->fields
[0], code
, imm
, 0);
742 /* pre/post- index */
743 if (info
->addr
.writeback
)
745 assert (inst
->opcode
->iclass
!= ldst_unscaled
746 && inst
->opcode
->iclass
!= ldstnapair_offs
747 && inst
->opcode
->iclass
!= ldstpair_off
748 && inst
->opcode
->iclass
!= ldst_unpriv
);
749 assert (info
->addr
.preind
!= info
->addr
.postind
);
750 if (info
->addr
.preind
)
751 insert_field (self
->fields
[1], code
, 1, 0);
757 /* Encode the address operand, potentially offset by the load/store ammount,
758 e.g. LDIAPP <Xt>, <Xt2> [<Xn|SP>, #<simm>]
759 and STILP <Xt>, <Xt2> [<Xn|SP>], #<simm>.*/
761 aarch64_ins_rcpc3_addr_opt_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
762 const aarch64_opnd_info
*info
,
764 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
765 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
770 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
772 imm
= info
->addr
.offset
.imm
;
774 insert_field (FLD_opc2
, code
, 1, 0);
779 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
781 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
782 const aarch64_opnd_info
*info
,
784 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
785 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
790 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
792 imm
= info
->addr
.offset
.imm
>> 3;
793 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
794 insert_field (self
->fields
[2], code
, imm
, 0);
796 if (info
->addr
.writeback
)
798 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
799 insert_field (self
->fields
[3], code
, 1, 0);
804 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
806 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
807 const aarch64_opnd_info
*info
,
809 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
810 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
812 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
815 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
817 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
821 /* Encode the address operand for e.g.
822 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
824 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
825 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
826 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
827 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
830 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
832 if (info
->addr
.offset
.is_reg
)
833 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
835 insert_field (FLD_Rm
, code
, 0x1f, 0);
839 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
841 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
842 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
843 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
844 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
847 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
851 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
853 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
854 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
855 const aarch64_inst
*inst
,
856 aarch64_operand_error
*detail ATTRIBUTE_UNUSED
)
858 /* If a system instruction check if we have any restrictions on which
859 registers it can use. */
860 if (inst
->opcode
->iclass
== ic_system
)
862 uint64_t opcode_flags
863 = inst
->opcode
->flags
& (F_SYS_READ
| F_SYS_WRITE
);
864 uint32_t sysreg_flags
865 = info
->sysreg
.flags
& (F_REG_READ
| F_REG_WRITE
);
867 /* Check to see if it's read-only, else check if it's write only.
868 if it's both or unspecified don't care. */
869 if (opcode_flags
== F_SYS_READ
871 && sysreg_flags
!= F_REG_READ
)
873 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
874 detail
->error
= _("specified register cannot be read from");
875 detail
->index
= info
->idx
;
876 detail
->non_fatal
= true;
878 else if (opcode_flags
== F_SYS_WRITE
880 && sysreg_flags
!= F_REG_WRITE
)
882 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
883 detail
->error
= _("specified register cannot be written to");
884 detail
->index
= info
->idx
;
885 detail
->non_fatal
= true;
888 /* op0:op1:CRn:CRm:op2 */
889 insert_fields (code
, info
->sysreg
.value
, inst
->opcode
->mask
, 5,
890 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
894 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
896 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
897 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
898 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
899 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
902 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
905 /* Extra CRm mask. */
906 if (info
->sysreg
.flags
| F_REG_IN_CRM
)
907 insert_field (FLD_CRm
, code
, PSTATE_DECODE_CRM (info
->sysreg
.flags
), 0);
911 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
913 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
914 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
915 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
916 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
918 /* op1:CRn:CRm:op2 */
919 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
920 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
924 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
927 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
928 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
929 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
930 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
933 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
937 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
940 aarch64_ins_barrier_dsb_nxs (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
941 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
942 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
943 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
945 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
946 encoded in CRm<3:2>. */
947 aarch64_insn value
= (info
->barrier
->value
>> 2) - 4;
948 insert_field (FLD_CRm_dsb_nxs
, code
, value
, 0);
952 /* Encode the prefetch operation option operand for e.g.
953 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
956 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
957 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
958 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
959 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
962 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
966 /* Encode the hint number for instructions that alias HINT but take an
970 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
971 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
972 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
973 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
976 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
980 /* Encode the extended register operand for e.g.
981 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
983 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
984 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
985 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
986 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
988 enum aarch64_modifier_kind kind
;
991 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
993 kind
= info
->shifter
.kind
;
994 if (kind
== AARCH64_MOD_LSL
)
995 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
996 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
997 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
999 insert_field (FLD_imm3_10
, code
, info
->shifter
.amount
, 0);
1004 /* Encode the shifted register operand for e.g.
1005 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1007 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1008 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1009 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1010 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1013 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
1015 insert_field (FLD_shift
, code
,
1016 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
1018 insert_field (FLD_imm6_10
, code
, info
->shifter
.amount
, 0);
1023 /* Encode the LSL-shifted register operand for e.g.
1024 ADDPT <Xd|SP>, <Xn|SP>, <Xm>{, LSL #<amount>}. */
1026 aarch64_ins_reg_lsl_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1027 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1028 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1029 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1032 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
1034 insert_field (FLD_imm3_10
, code
, info
->shifter
.amount
, 0);
1038 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1039 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1040 SELF's operand-dependent value. fields[0] specifies the field that
1041 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1043 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
1044 const aarch64_opnd_info
*info
,
1046 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1047 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1049 int factor
= 1 + get_operand_specific_data (self
);
1050 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1051 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1055 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1056 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1057 SELF's operand-dependent value. fields[0] specifies the field that
1058 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1060 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
1061 const aarch64_opnd_info
*info
,
1063 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1064 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1066 int factor
= 1 + get_operand_specific_data (self
);
1067 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1068 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1072 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1073 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1074 SELF's operand-dependent value. fields[0] specifies the field that
1075 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1076 and imm3 fields, with imm3 being the less-significant part. */
1078 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
1079 const aarch64_opnd_info
*info
,
1081 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1082 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1084 int factor
= 1 + get_operand_specific_data (self
);
1085 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1086 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
1087 2, FLD_imm3_10
, FLD_SVE_imm6
);
1091 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1092 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1093 value. fields[0] specifies the base register field. */
1095 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
1096 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1097 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1098 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1100 int factor
= 1 << get_operand_specific_data (self
);
1101 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1102 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1106 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1107 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1108 value. fields[0] specifies the base register field. */
1110 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
1111 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1112 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1113 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1115 int factor
= 1 << get_operand_specific_data (self
);
1116 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1117 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1121 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1122 is SELF's operand-dependent value. fields[0] specifies the base
1123 register field and fields[1] specifies the offset register field. */
1125 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
1126 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1127 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1128 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1130 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1131 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1135 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1136 <shift> is SELF's operand-dependent value. fields[0] specifies the
1137 base register field, fields[1] specifies the offset register field and
1138 fields[2] is a single-bit field that selects SXTW over UXTW. */
1140 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
1141 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1142 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1143 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1145 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1146 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1147 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
1148 insert_field (self
->fields
[2], code
, 0, 0);
1150 insert_field (self
->fields
[2], code
, 1, 0);
1154 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1155 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1156 fields[0] specifies the base register field. */
1158 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
1159 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1160 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1161 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1163 int factor
= 1 << get_operand_specific_data (self
);
1164 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1165 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1169 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1170 where <modifier> is fixed by the instruction and where <msz> is a
1171 2-bit unsigned number. fields[0] specifies the base register field
1172 and fields[1] specifies the offset register field. */
1174 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
1175 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1176 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1178 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1179 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1180 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
1184 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1185 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1186 field and fields[1] specifies the offset register field. */
1188 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
1189 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1190 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1191 aarch64_operand_error
*errors
)
1193 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1196 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1197 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1198 field and fields[1] specifies the offset register field. */
1200 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
1201 const aarch64_opnd_info
*info
,
1203 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1204 aarch64_operand_error
*errors
)
1206 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1209 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1210 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1211 field and fields[1] specifies the offset register field. */
1213 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
1214 const aarch64_opnd_info
*info
,
1216 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1217 aarch64_operand_error
*errors
)
1219 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1222 /* Encode an SVE ADD/SUB immediate. */
1224 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1225 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1226 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1227 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1229 if (info
->shifter
.amount
== 8)
1230 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1231 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1232 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1234 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1239 aarch64_ins_sve_aligned_reglist (const aarch64_operand
*self
,
1240 const aarch64_opnd_info
*info
,
1242 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1243 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1245 unsigned int num_regs
= get_operand_specific_data (self
);
1246 unsigned int val
= info
->reglist
.first_regno
;
1247 insert_field (self
->fields
[0], code
, val
/ num_regs
, 0);
1251 /* Encode an SVE CPY/DUP immediate. */
1253 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1254 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1255 const aarch64_inst
*inst
,
1256 aarch64_operand_error
*errors
)
1258 return aarch64_ins_sve_aimm (self
, info
, code
, inst
, errors
);
1261 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1262 array specifies which field to use for Zn. MM is encoded in the
1263 concatenation of imm5 and SVE_tszh, with imm5 being the less
1264 significant part. */
1266 aarch64_ins_sve_index (const aarch64_operand
*self
,
1267 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1268 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1269 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1271 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1272 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1273 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1274 2, FLD_imm5
, FLD_SVE_tszh
);
1278 /* Encode Zn.<T>[<imm>], where <imm> is an immediate with range of 0 to one less
1279 than the number of elements in 128 bit, which can encode il:tsz. */
1281 aarch64_ins_sve_index_imm (const aarch64_operand
*self
,
1282 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1283 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1284 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1286 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1287 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1288 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1289 2, self
->fields
[1],self
->fields
[2]);
1293 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1295 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1296 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1297 const aarch64_inst
*inst
,
1298 aarch64_operand_error
*errors
)
1300 return aarch64_ins_limm (self
, info
, code
, inst
, errors
);
1303 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1304 and where MM occupies the most-significant part. The operand-dependent
1305 value specifies the number of bits in Zn. */
1307 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1308 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1309 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1310 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1312 unsigned int reg_bits
= get_operand_specific_data (self
);
1313 assert (info
->reglane
.regno
< (1U << reg_bits
));
1314 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1315 insert_all_fields (self
, code
, val
);
1319 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1322 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1323 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1324 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1325 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1327 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1331 /* Encode a strided register list. The first field holds the top bit
1332 (0 or 16) and the second field holds the lower bits. The stride is
1333 16 divided by the list length. */
1335 aarch64_ins_sve_strided_reglist (const aarch64_operand
*self
,
1336 const aarch64_opnd_info
*info
,
1338 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1339 aarch64_operand_error
*errors
1342 unsigned int num_regs
= get_operand_specific_data (self
);
1343 unsigned int mask ATTRIBUTE_UNUSED
= 16 | (16 / num_regs
- 1);
1344 unsigned int val
= info
->reglist
.first_regno
;
1345 assert ((val
& mask
) == val
);
1346 insert_field (self
->fields
[0], code
, val
>> 4, 0);
1347 insert_field (self
->fields
[1], code
, val
& 15, 0);
1351 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1352 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1355 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1356 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1357 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1358 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1360 insert_all_fields (self
, code
, info
->imm
.value
);
1361 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1365 /* Encode an SVE shift left immediate. */
1367 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1368 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1369 const aarch64_inst
*inst
,
1370 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1372 const aarch64_opnd_info
*prev_operand
;
1375 assert (info
->idx
> 0);
1376 prev_operand
= &inst
->operands
[info
->idx
- 1];
1377 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1378 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1382 /* Encode an SVE shift right immediate. */
1384 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1385 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1386 const aarch64_inst
*inst
,
1387 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1389 const aarch64_opnd_info
*prev_operand
;
1392 unsigned int opnd_backshift
= get_operand_specific_data (self
);
1393 assert (info
->idx
>= (int)opnd_backshift
);
1394 prev_operand
= &inst
->operands
[info
->idx
- opnd_backshift
];
1395 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1396 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1400 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1401 The fields array specifies which field to use. */
1403 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1404 const aarch64_opnd_info
*info
,
1406 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1407 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1409 if (info
->imm
.value
== 0x3f000000)
1410 insert_field (self
->fields
[0], code
, 0, 0);
1412 insert_field (self
->fields
[0], code
, 1, 0);
1416 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1417 The fields array specifies which field to use. */
1419 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1420 const aarch64_opnd_info
*info
,
1422 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1423 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1425 if (info
->imm
.value
== 0x3f000000)
1426 insert_field (self
->fields
[0], code
, 0, 0);
1428 insert_field (self
->fields
[0], code
, 1, 0);
1432 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1433 The fields array specifies which field to use. */
1435 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1436 const aarch64_opnd_info
*info
,
1438 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1439 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1441 if (info
->imm
.value
== 0)
1442 insert_field (self
->fields
[0], code
, 0, 0);
1444 insert_field (self
->fields
[0], code
, 1, 0);
1449 aarch64_ins_sme_za_vrs1 (const aarch64_operand
*self
,
1450 const aarch64_opnd_info
*info
,
1452 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1453 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1455 int za_reg
= info
->indexed_za
.regno
;
1456 int regno
= info
->indexed_za
.index
.regno
& 3;
1457 int imm
= info
->indexed_za
.index
.imm
;
1458 int v
= info
->indexed_za
.v
;
1459 int countm1
= info
->indexed_za
.index
.countm1
;
1461 insert_field (self
->fields
[0], code
, v
, 0);
1462 insert_field (self
->fields
[1], code
, regno
, 0);
1463 switch (info
->qualifier
)
1465 case AARCH64_OPND_QLF_S_B
:
1466 insert_field (self
->fields
[2], code
, imm
/ (countm1
+ 1), 0);
1468 case AARCH64_OPND_QLF_S_H
:
1469 case AARCH64_OPND_QLF_S_S
:
1470 insert_field (self
->fields
[2], code
, za_reg
, 0);
1471 insert_field (self
->fields
[3], code
, imm
/ (countm1
+ 1), 0);
1473 case AARCH64_OPND_QLF_S_D
:
1474 insert_field (self
->fields
[2], code
, za_reg
, 0);
1484 aarch64_ins_sme_za_vrs2 (const aarch64_operand
*self
,
1485 const aarch64_opnd_info
*info
,
1487 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1488 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1490 int za_reg
= info
->indexed_za
.regno
;
1491 int regno
= info
->indexed_za
.index
.regno
& 3;
1492 int imm
= info
->indexed_za
.index
.imm
;
1493 int v
= info
->indexed_za
.v
;
1494 int countm1
= info
->indexed_za
.index
.countm1
;
1496 insert_field (self
->fields
[0], code
, v
, 0);
1497 insert_field (self
->fields
[1], code
, regno
, 0);
1498 switch (info
->qualifier
)
1500 case AARCH64_OPND_QLF_S_B
:
1501 insert_field (self
->fields
[2], code
, imm
/ (countm1
+ 1), 0);
1503 case AARCH64_OPND_QLF_S_H
:
1504 insert_field (self
->fields
[2], code
, za_reg
, 0);
1505 insert_field (self
->fields
[3], code
, imm
/ (countm1
+ 1), 0);
1507 case AARCH64_OPND_QLF_S_S
:
1508 case AARCH64_OPND_QLF_S_D
:
1509 insert_field (self
->fields
[2], code
, za_reg
, 0);
1518 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1519 vector indicator, vector selector and immediate. */
1521 aarch64_ins_sme_za_hv_tiles (const aarch64_operand
*self
,
1522 const aarch64_opnd_info
*info
,
1524 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1525 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1529 int fld_v
= info
->indexed_za
.v
;
1530 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1531 int fld_zan_imm
= info
->indexed_za
.index
.imm
;
1532 int regno
= info
->indexed_za
.regno
;
1534 switch (info
->qualifier
)
1536 case AARCH64_OPND_QLF_S_B
:
1540 case AARCH64_OPND_QLF_S_H
:
1543 fld_zan_imm
|= regno
<< 3;
1545 case AARCH64_OPND_QLF_S_S
:
1548 fld_zan_imm
|= regno
<< 2;
1550 case AARCH64_OPND_QLF_S_D
:
1553 fld_zan_imm
|= regno
<< 1;
1555 case AARCH64_OPND_QLF_S_Q
:
1558 fld_zan_imm
= regno
;
1564 insert_field (self
->fields
[0], code
, fld_size
, 0);
1565 insert_field (self
->fields
[1], code
, fld_q
, 0);
1566 insert_field (self
->fields
[2], code
, fld_v
, 0);
1567 insert_field (self
->fields
[3], code
, fld_rv
, 0);
1568 insert_field (self
->fields
[4], code
, fld_zan_imm
, 0);
1574 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand
*self
,
1575 const aarch64_opnd_info
*info
,
1577 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1578 aarch64_operand_error
*errors
1581 int ebytes
= aarch64_get_qualifier_esize (info
->qualifier
);
1582 int range_size
= get_opcode_dependent_value (inst
->opcode
);
1583 int fld_v
= info
->indexed_za
.v
;
1584 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1585 int imm
= info
->indexed_za
.index
.imm
;
1586 int max_value
= 16 / range_size
/ ebytes
;
1591 assert (imm
% range_size
== 0 && (imm
/ range_size
) < max_value
);
1592 int fld_zan_imm
= (info
->indexed_za
.regno
* max_value
) | (imm
/ range_size
);
1593 assert (fld_zan_imm
< (range_size
== 4 && ebytes
< 8 ? 4 : 8));
1595 insert_field (self
->fields
[0], code
, fld_v
, 0);
1596 insert_field (self
->fields
[1], code
, fld_rv
, 0);
1597 insert_field (self
->fields
[2], code
, fld_zan_imm
, 0);
1602 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1603 separated by commas, encoded in the "imm8" field.
1605 For programmer convenience an assembler must also accept the names of
1606 32-bit, 16-bit and 8-bit element tiles which are converted into the
1607 corresponding set of 64-bit element tiles.
1610 aarch64_ins_sme_za_list (const aarch64_operand
*self
,
1611 const aarch64_opnd_info
*info
,
1613 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1614 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1616 int fld_mask
= info
->imm
.value
;
1617 insert_field (self
->fields
[0], code
, fld_mask
, 0);
1622 aarch64_ins_sme_za_array (const aarch64_operand
*self
,
1623 const aarch64_opnd_info
*info
,
1625 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1626 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1628 int regno
= info
->indexed_za
.index
.regno
& 3;
1629 int imm
= info
->indexed_za
.index
.imm
;
1630 int countm1
= info
->indexed_za
.index
.countm1
;
1631 assert (imm
% (countm1
+ 1) == 0);
1632 insert_field (self
->fields
[0], code
, regno
, 0);
1633 insert_field (self
->fields
[1], code
, imm
/ (countm1
+ 1), 0);
1638 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand
*self
,
1639 const aarch64_opnd_info
*info
,
1641 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1642 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1644 int regno
= info
->addr
.base_regno
;
1645 int imm
= info
->addr
.offset
.imm
;
1646 insert_field (self
->fields
[0], code
, regno
, 0);
1647 insert_field (self
->fields
[1], code
, imm
, 0);
1651 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1653 aarch64_ins_sme_sm_za (const aarch64_operand
*self
,
1654 const aarch64_opnd_info
*info
,
1656 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1657 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1659 aarch64_insn fld_crm
;
1660 /* Set CRm[3:1] bits. */
1661 if (info
->reg
.regno
== 's')
1662 fld_crm
= 0x02 ; /* SVCRSM. */
1663 else if (info
->reg
.regno
== 'z')
1664 fld_crm
= 0x04; /* SVCRZA. */
1668 insert_field (self
->fields
[0], code
, fld_crm
, 0);
1672 /* Encode source scalable predicate register (Pn), name of the index base
1673 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1674 range 0 to one less than the number of vector elements in a 128-bit vector
1675 register, encoded in "i1:tszh:tszl".
1678 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand
*self
,
1679 const aarch64_opnd_info
*info
,
1681 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1682 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1684 int fld_pn
= info
->indexed_za
.regno
;
1685 int fld_rm
= info
->indexed_za
.index
.regno
- 12;
1686 int imm
= info
->indexed_za
.index
.imm
;
1687 int fld_i1
, fld_tszh
, fld_tshl
;
1689 insert_field (self
->fields
[0], code
, fld_rm
, 0);
1690 insert_field (self
->fields
[1], code
, fld_pn
, 0);
1692 /* Optional element index, defaulting to 0, in the range 0 to one less than
1693 the number of vector elements in a 128-bit vector register, encoded in
1703 switch (info
->qualifier
)
1705 case AARCH64_OPND_QLF_S_B
:
1706 /* <imm> is 4 bit value. */
1707 fld_i1
= (imm
>> 3) & 0x1;
1708 fld_tszh
= (imm
>> 2) & 0x1;
1709 fld_tshl
= ((imm
<< 1) | 0x1) & 0x7;
1711 case AARCH64_OPND_QLF_S_H
:
1712 /* <imm> is 3 bit value. */
1713 fld_i1
= (imm
>> 2) & 0x1;
1714 fld_tszh
= (imm
>> 1) & 0x1;
1715 fld_tshl
= ((imm
<< 2) | 0x2) & 0x7;
1717 case AARCH64_OPND_QLF_S_S
:
1718 /* <imm> is 2 bit value. */
1719 fld_i1
= (imm
>> 1) & 0x1;
1720 fld_tszh
= imm
& 0x1;
1723 case AARCH64_OPND_QLF_S_D
:
1724 /* <imm> is 1 bit value. */
1733 insert_field (self
->fields
[2], code
, fld_i1
, 0);
1734 insert_field (self
->fields
[3], code
, fld_tszh
, 0);
1735 insert_field (self
->fields
[4], code
, fld_tshl
, 0);
1739 /* Insert X0-X30. Register 31 is unallocated. */
1741 aarch64_ins_x0_to_x30 (const aarch64_operand
*self
,
1742 const aarch64_opnd_info
*info
,
1744 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1745 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1747 assert (info
->reg
.regno
<= 30);
1748 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
1752 /* Insert an indexed register, with the first field being the register
1753 number and the remaining fields being the index. */
1755 aarch64_ins_simple_index (const aarch64_operand
*self
,
1756 const aarch64_opnd_info
*info
,
1758 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1759 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1761 int bias
= get_operand_specific_data (self
);
1762 insert_field (self
->fields
[0], code
, info
->reglane
.regno
- bias
, 0);
1763 insert_all_fields_after (self
, 1, code
, info
->reglane
.index
);
1767 /* Insert a plain shift-right immediate, when there is only a single
1770 aarch64_ins_plain_shrimm (const aarch64_operand
*self
,
1771 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1772 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1773 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1775 unsigned int base
= 1 << get_operand_field_width (self
, 0);
1776 insert_field (self
->fields
[0], code
, base
- info
->imm
.value
, 0);
1780 /* Miscellaneous encoding functions. */
1782 /* Encode size[0], i.e. bit 22, for
1783 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1786 encode_asimd_fcvt (aarch64_inst
*inst
)
1789 aarch64_field field
= {0, 0};
1790 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_NIL
;
1792 switch (inst
->opcode
->op
)
1796 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1797 qualifier
= inst
->operands
[1].qualifier
;
1801 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1802 qualifier
= inst
->operands
[0].qualifier
;
1807 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1808 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1809 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1810 gen_sub_field (FLD_size
, 0, 1, &field
);
1811 insert_field_2 (&field
, &inst
->value
, value
, 0);
1814 /* Encode size[0], i.e. bit 22, for
1815 e.g. FCVTXN <Vb><d>, <Va><n>. */
1818 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1820 aarch64_insn val
= 1;
1821 aarch64_field field
= {0, 0};
1822 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1823 gen_sub_field (FLD_size
, 0, 1, &field
);
1824 insert_field_2 (&field
, &inst
->value
, val
, 0);
1827 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1829 encode_fcvt (aarch64_inst
*inst
)
1832 const aarch64_field field
= {15, 2};
1835 switch (inst
->operands
[0].qualifier
)
1837 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1838 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1839 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1842 insert_field_2 (&field
, &inst
->value
, val
, 0);
1847 /* Return the index in qualifiers_list that INST is using. Should only
1848 be called once the qualifiers are known to be valid. */
1851 aarch64_get_variant (struct aarch64_inst
*inst
)
1853 int i
, nops
, variant
;
1855 nops
= aarch64_num_of_operands (inst
->opcode
);
1856 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1858 for (i
= 0; i
< nops
; ++i
)
1859 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1860 != inst
->operands
[i
].qualifier
)
1868 /* Do miscellaneous encodings that are not common enough to be driven by
1872 do_misc_encoding (aarch64_inst
*inst
)
1876 switch (inst
->opcode
->op
)
1885 encode_asimd_fcvt (inst
);
1888 encode_asisd_fcvtxn (inst
);
1893 /* Copy Pn to Pm and Pg. */
1894 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1895 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1896 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1899 /* Copy Zd to Zm. */
1900 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1901 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1904 /* Fill in the zero immediate. */
1905 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1906 2, FLD_imm5
, FLD_SVE_tszh
);
1909 /* Copy Zn to Zm. */
1910 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1911 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1916 /* Copy Pd to Pm. */
1917 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1918 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1920 case OP_MOVZS_P_P_P
:
1922 /* Copy Pn to Pm. */
1923 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1924 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1926 case OP_NOTS_P_P_P_Z
:
1927 case OP_NOT_P_P_P_Z
:
1928 /* Copy Pg to Pm. */
1929 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1930 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1936 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1938 encode_sizeq (aarch64_inst
*inst
)
1941 enum aarch64_field_kind kind
;
1944 /* Get the index of the operand whose information we are going to use
1945 to encode the size and Q fields.
1946 This is deduced from the possible valid qualifier lists. */
1947 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1948 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1949 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1950 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1952 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1954 if (inst
->opcode
->iclass
== asisdlse
1955 || inst
->opcode
->iclass
== asisdlsep
1956 || inst
->opcode
->iclass
== asisdlso
1957 || inst
->opcode
->iclass
== asisdlsop
)
1958 kind
= FLD_vldst_size
;
1961 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1964 /* Opcodes that have fields shared by multiple operands are usually flagged
1965 with flags. In this function, we detect such flags and use the
1966 information in one of the related operands to do the encoding. The 'one'
1967 operand is not any operand but one of the operands that has the enough
1968 information for such an encoding. */
1971 do_special_encoding (struct aarch64_inst
*inst
)
1974 aarch64_insn value
= 0;
1976 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1978 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1979 if (inst
->opcode
->flags
& F_COND
)
1981 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1983 if (inst
->opcode
->flags
& F_SF
)
1985 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1986 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1987 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1989 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1990 if (inst
->opcode
->flags
& F_N
)
1991 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1993 if (inst
->opcode
->flags
& F_LSE_SZ
)
1995 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1996 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1997 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1999 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
2001 if (inst
->opcode
->flags
& F_RCPC3_SIZE
)
2003 switch (inst
->operands
[0].qualifier
)
2005 case AARCH64_OPND_QLF_W
: value
= 2; break;
2006 case AARCH64_OPND_QLF_X
: value
= 3; break;
2007 case AARCH64_OPND_QLF_S_B
: value
= 0; break;
2008 case AARCH64_OPND_QLF_S_H
: value
= 1; break;
2009 case AARCH64_OPND_QLF_S_S
: value
= 2; break;
2010 case AARCH64_OPND_QLF_S_D
: value
= 3; break;
2011 case AARCH64_OPND_QLF_S_Q
: value
= 0; break;
2014 insert_field (FLD_rcpc3_size
, &inst
->value
, value
, 0);
2017 if (inst
->opcode
->flags
& F_SIZEQ
)
2018 encode_sizeq (inst
);
2019 if (inst
->opcode
->flags
& F_FPTYPE
)
2021 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
2022 switch (inst
->operands
[idx
].qualifier
)
2024 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
2025 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
2026 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
2029 insert_field (FLD_type
, &inst
->value
, value
, 0);
2031 if (inst
->opcode
->flags
& F_SSIZE
)
2033 enum aarch64_opnd_qualifier qualifier
;
2034 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
2035 qualifier
= inst
->operands
[idx
].qualifier
;
2036 assert (qualifier
>= AARCH64_OPND_QLF_S_B
2037 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
2038 value
= aarch64_get_qualifier_standard_value (qualifier
);
2039 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
2041 if (inst
->opcode
->flags
& F_T
)
2043 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
2044 aarch64_field field
= {0, 0};
2045 enum aarch64_opnd_qualifier qualifier
;
2048 qualifier
= inst
->operands
[idx
].qualifier
;
2049 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
2050 == AARCH64_OPND_CLASS_SIMD_REG
2051 && qualifier
>= AARCH64_OPND_QLF_V_8B
2052 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
2063 value
= aarch64_get_qualifier_standard_value (qualifier
);
2064 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
2065 num
= (int) value
>> 1;
2066 assert (num
>= 0 && num
<= 3);
2067 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
2068 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
2071 if ((inst
->opcode
->flags
& F_OPD_SIZE
) && inst
->opcode
->iclass
== sve2_urqvs
)
2073 enum aarch64_opnd_qualifier qualifier
[2];
2074 aarch64_insn value1
= 0;
2076 qualifier
[0] = inst
->operands
[idx
].qualifier
;
2077 qualifier
[1] = inst
->operands
[idx
+2].qualifier
;
2078 value
= aarch64_get_qualifier_standard_value (qualifier
[0]);
2079 value1
= aarch64_get_qualifier_standard_value (qualifier
[1]);
2080 assert ((value
>> 1) == value1
);
2081 insert_field (FLD_size
, &inst
->value
, value1
, inst
->opcode
->mask
);
2084 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
2086 /* Use Rt to encode in the case of e.g.
2087 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2088 enum aarch64_opnd_qualifier qualifier
;
2089 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
2091 /* Otherwise use the result operand, which has to be a integer
2094 assert (idx
== 0 || idx
== 1);
2095 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
2096 == AARCH64_OPND_CLASS_INT_REG
);
2097 qualifier
= inst
->operands
[idx
].qualifier
;
2098 insert_field (FLD_Q
, &inst
->value
,
2099 aarch64_get_qualifier_standard_value (qualifier
), 0);
2101 if (inst
->opcode
->flags
& F_LDS_SIZE
)
2103 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
2104 enum aarch64_opnd_qualifier qualifier
;
2105 aarch64_field field
= {0, 0};
2106 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
2107 == AARCH64_OPND_CLASS_INT_REG
);
2108 gen_sub_field (FLD_opc
, 0, 1, &field
);
2109 qualifier
= inst
->operands
[0].qualifier
;
2110 insert_field_2 (&field
, &inst
->value
,
2111 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
2113 /* Miscellaneous encoding as the last step. */
2114 if (inst
->opcode
->flags
& F_MISC
)
2115 do_misc_encoding (inst
);
2117 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
2120 /* Some instructions (including all SVE ones) use the instruction class
2121 to describe how a qualifiers_list index is represented in the instruction
2122 encoding. If INST is such an instruction, encode the chosen qualifier
2126 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
2129 switch (inst
->opcode
->iclass
)
2133 /* The variant is encoded as part of the immediate. */
2136 case sme_size_12_bhs
:
2137 insert_field (FLD_SME_size_12
, &inst
->value
,
2138 aarch64_get_variant (inst
), 0);
2142 insert_field (FLD_SME_size_22
, &inst
->value
,
2143 aarch64_get_variant (inst
), 0);
2146 case sme_size_22_hsd
:
2147 insert_field (FLD_SME_size_22
, &inst
->value
,
2148 aarch64_get_variant (inst
) + 1, 0);
2151 case sme_size_12_hs
:
2152 insert_field (FLD_SME_size_12
, &inst
->value
,
2153 aarch64_get_variant (inst
) + 1, 0);
2157 insert_field (FLD_SME_sz_23
, &inst
->value
,
2158 aarch64_get_variant (inst
), 0);
2162 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
2163 0, 2, FLD_SVE_M_14
, FLD_size
);
2169 case sve_shift_pred
:
2170 case sve_shift_unpred
:
2171 case sve_shift_tsz_hsd
:
2172 case sve_shift_tsz_bhsd
:
2173 /* For indices and shift amounts, the variant is encoded as
2174 part of the immediate. */
2179 /* For sve_limm, the .B, .H, and .S forms are just a convenience
2180 and depend on the immediate. They don't have a separate
2187 /* These instructions have only a single variant. */
2191 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
2192 0, 2, FLD_SVE_M_16
, FLD_size
);
2196 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
2201 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
2205 /* MOD 3 For `OP_SVE_Vv_HSD`. */
2206 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) % 3 + 1, 0);
2213 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
2217 insert_field (FLD_SVE_sz2
, &inst
->value
, aarch64_get_variant (inst
), 0);
2221 insert_field (FLD_SVE_size
, &inst
->value
,
2222 aarch64_get_variant (inst
) + 1, 0);
2225 case sve_size_tsz_bhs
:
2226 insert_fields (&inst
->value
,
2227 (1 << aarch64_get_variant (inst
)),
2228 0, 2, FLD_SVE_tszl_19
, FLD_SVE_sz
);
2232 variant
= aarch64_get_variant (inst
) + 1;
2235 insert_field (FLD_size
, &inst
->value
, variant
, 0);
2243 /* Converters converting an alias opcode instruction to its real form. */
2245 /* ROR <Wd>, <Ws>, #<shift>
2247 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2249 convert_ror_to_extr (aarch64_inst
*inst
)
2251 copy_operand_info (inst
, 3, 2);
2252 copy_operand_info (inst
, 2, 1);
2255 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2257 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2259 convert_xtl_to_shll (aarch64_inst
*inst
)
2261 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
2262 inst
->operands
[2].imm
.value
= 0;
2266 LSR <Xd>, <Xn>, #<shift>
2268 UBFM <Xd>, <Xn>, #<shift>, #63. */
2270 convert_sr_to_bfm (aarch64_inst
*inst
)
2272 inst
->operands
[3].imm
.value
=
2273 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
2276 /* Convert MOV to ORR. */
2278 convert_mov_to_orr (aarch64_inst
*inst
)
2280 /* MOV <Vd>.<T>, <Vn>.<T>
2282 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2283 copy_operand_info (inst
, 2, 1);
2286 /* When <imms> >= <immr>, the instruction written:
2287 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2289 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2292 convert_bfx_to_bfm (aarch64_inst
*inst
)
2296 /* Convert the operand. */
2297 lsb
= inst
->operands
[2].imm
.value
;
2298 width
= inst
->operands
[3].imm
.value
;
2299 inst
->operands
[2].imm
.value
= lsb
;
2300 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
2303 /* When <imms> < <immr>, the instruction written:
2304 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2306 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2309 convert_bfi_to_bfm (aarch64_inst
*inst
)
2313 /* Convert the operand. */
2314 lsb
= inst
->operands
[2].imm
.value
;
2315 width
= inst
->operands
[3].imm
.value
;
2316 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2318 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2319 inst
->operands
[3].imm
.value
= width
- 1;
2323 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2324 inst
->operands
[3].imm
.value
= width
- 1;
2328 /* The instruction written:
2329 BFC <Xd>, #<lsb>, #<width>
2331 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2334 convert_bfc_to_bfm (aarch64_inst
*inst
)
2339 copy_operand_info (inst
, 3, 2);
2340 copy_operand_info (inst
, 2, 1);
2341 copy_operand_info (inst
, 1, 0);
2342 inst
->operands
[1].reg
.regno
= 0x1f;
2344 /* Convert the immediate operand. */
2345 lsb
= inst
->operands
[2].imm
.value
;
2346 width
= inst
->operands
[3].imm
.value
;
2347 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2349 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2350 inst
->operands
[3].imm
.value
= width
- 1;
2354 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2355 inst
->operands
[3].imm
.value
= width
- 1;
2359 /* The instruction written:
2360 LSL <Xd>, <Xn>, #<shift>
2362 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2365 convert_lsl_to_ubfm (aarch64_inst
*inst
)
2367 int64_t shift
= inst
->operands
[2].imm
.value
;
2369 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2371 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
2372 inst
->operands
[3].imm
.value
= 31 - shift
;
2376 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
2377 inst
->operands
[3].imm
.value
= 63 - shift
;
2381 /* CINC <Wd>, <Wn>, <cond>
2383 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2386 convert_to_csel (aarch64_inst
*inst
)
2388 copy_operand_info (inst
, 3, 2);
2389 copy_operand_info (inst
, 2, 1);
2390 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2393 /* CSET <Wd>, <cond>
2395 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2398 convert_cset_to_csinc (aarch64_inst
*inst
)
2400 copy_operand_info (inst
, 3, 1);
2401 copy_operand_info (inst
, 2, 0);
2402 copy_operand_info (inst
, 1, 0);
2403 inst
->operands
[1].reg
.regno
= 0x1f;
2404 inst
->operands
[2].reg
.regno
= 0x1f;
2405 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2410 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2413 convert_mov_to_movewide (aarch64_inst
*inst
)
2416 uint32_t shift_amount
;
2417 uint64_t value
= ~(uint64_t)0;
2419 switch (inst
->opcode
->op
)
2421 case OP_MOV_IMM_WIDE
:
2422 value
= inst
->operands
[1].imm
.value
;
2424 case OP_MOV_IMM_WIDEN
:
2425 value
= ~inst
->operands
[1].imm
.value
;
2430 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
2431 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
2432 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
2433 /* The constraint check should have guaranteed this wouldn't happen. */
2435 value
>>= shift_amount
;
2437 inst
->operands
[1].imm
.value
= value
;
2438 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
2439 inst
->operands
[1].shifter
.amount
= shift_amount
;
2444 ORR <Wd>, WZR, #<imm>. */
2447 convert_mov_to_movebitmask (aarch64_inst
*inst
)
2449 copy_operand_info (inst
, 2, 1);
2450 inst
->operands
[1].reg
.regno
= 0x1f;
2451 inst
->operands
[1].skip
= 0;
2454 /* Some alias opcodes are assembled by being converted to their real-form. */
2457 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
2459 const aarch64_opcode
*alias
= inst
->opcode
;
2461 if ((alias
->flags
& F_CONV
) == 0)
2462 goto convert_to_real_return
;
2468 convert_sr_to_bfm (inst
);
2471 convert_lsl_to_ubfm (inst
);
2476 convert_to_csel (inst
);
2480 convert_cset_to_csinc (inst
);
2485 convert_bfx_to_bfm (inst
);
2490 convert_bfi_to_bfm (inst
);
2493 convert_bfc_to_bfm (inst
);
2496 convert_mov_to_orr (inst
);
2498 case OP_MOV_IMM_WIDE
:
2499 case OP_MOV_IMM_WIDEN
:
2500 convert_mov_to_movewide (inst
);
2502 case OP_MOV_IMM_LOG
:
2503 convert_mov_to_movebitmask (inst
);
2506 convert_ror_to_extr (inst
);
2512 convert_xtl_to_shll (inst
);
2518 convert_to_real_return
:
2519 aarch64_replace_opcode (inst
, real
);
2522 /* Encode *INST_ORI of the opcode code OPCODE.
2523 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2524 matched operand qualifier sequence in *QLF_SEQ. */
2527 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
2528 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
2529 aarch64_opnd_qualifier_t
*qlf_seq
,
2530 aarch64_operand_error
*mismatch_detail
,
2531 aarch64_instr_sequence
* insn_sequence
)
2534 const aarch64_opcode
*aliased
;
2535 aarch64_inst copy
, *inst
;
2537 DEBUG_TRACE ("enter with %s", opcode
->name
);
2539 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2543 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
2544 if (inst
->opcode
== NULL
)
2545 inst
->opcode
= opcode
;
2547 /* Constrain the operands.
2548 After passing this, the encoding is guaranteed to succeed. */
2549 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
2551 DEBUG_TRACE ("FAIL since operand constraint not met");
2555 /* Get the base value.
2556 Note: this has to be before the aliasing handling below in order to
2557 get the base value from the alias opcode before we move on to the
2558 aliased opcode for encoding. */
2559 inst
->value
= opcode
->opcode
;
2561 /* No need to do anything else if the opcode does not have any operand. */
2562 if (aarch64_num_of_operands (opcode
) == 0)
2565 /* Assign operand indexes and check types. Also put the matched
2566 operand qualifiers in *QLF_SEQ to return. */
2567 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2569 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
2570 inst
->operands
[i
].idx
= i
;
2571 if (qlf_seq
!= NULL
)
2572 *qlf_seq
= inst
->operands
[i
].qualifier
;
2575 aliased
= aarch64_find_real_opcode (opcode
);
2576 /* If the opcode is an alias and it does not ask for direct encoding by
2577 itself, the instruction will be transformed to the form of real opcode
2578 and the encoding will be carried out using the rules for the aliased
2580 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
2582 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2583 aliased
->name
, opcode
->name
);
2584 /* Convert the operands to the form of the real opcode. */
2585 convert_to_real (inst
, aliased
);
2589 aarch64_opnd_info
*info
= inst
->operands
;
2591 /* Call the inserter of each operand. */
2592 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
2594 const aarch64_operand
*opnd
;
2595 enum aarch64_opnd type
= opcode
->operands
[i
];
2596 if (type
== AARCH64_OPND_NIL
)
2600 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2603 opnd
= &aarch64_operands
[type
];
2604 if (operand_has_inserter (opnd
)
2605 && !aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
,
2610 /* Call opcode encoders indicated by flags. */
2611 if (opcode_has_special_coder (opcode
))
2612 do_special_encoding (inst
);
2614 /* Possibly use the instruction class to encode the chosen qualifier
2616 aarch64_encode_variant_using_iclass (inst
);
2618 /* Run a verifier if the instruction has one set. */
2619 if (opcode
->verifier
)
2621 enum err_type result
= opcode
->verifier (inst
, *code
, 0, true,
2622 mismatch_detail
, insn_sequence
);
2634 /* Always run constrain verifiers, this is needed because constrains need to
2635 maintain a global state. Regardless if the instruction has the flag set
2637 enum err_type result
= verify_constraints (inst
, *code
, 0, true,
2638 mismatch_detail
, insn_sequence
);
2651 DEBUG_TRACE ("exit with %s", opcode
->name
);
2653 *code
= inst
->value
;