1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
40 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
43 const aarch64_field
*field
;
44 enum aarch64_field_kind kind
;
48 num
= va_arg (va
, uint32_t);
52 kind
= va_arg (va
, enum aarch64_field_kind
);
53 field
= &fields
[kind
];
54 insert_field (kind
, code
, value
, mask
);
55 value
>>= field
->width
;
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61 The least significant bit goes in the final field. */
64 insert_all_fields_after (const aarch64_operand
*self
, unsigned int start
,
65 aarch64_insn
*code
, aarch64_insn value
)
68 enum aarch64_field_kind kind
;
70 for (i
= ARRAY_SIZE (self
->fields
); i
-- > start
; )
71 if (self
->fields
[i
] != FLD_NIL
)
73 kind
= self
->fields
[i
];
74 insert_field (kind
, code
, value
, 0);
75 value
>>= fields
[kind
].width
;
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80 The least significant bit goes in the final field. */
83 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
86 return insert_all_fields_after (self
, 0, code
, value
);
89 /* Operand inserters. */
93 aarch64_ins_none (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
94 const aarch64_opnd_info
*info ATTRIBUTE_UNUSED
,
95 aarch64_insn
*code ATTRIBUTE_UNUSED
,
96 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
97 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
102 /* Insert register number. */
104 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
106 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
107 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
109 int val
= info
->reg
.regno
- get_operand_specific_data (self
);
110 insert_field (self
->fields
[0], code
, val
, 0);
114 /* Insert register number, index and/or other data for SIMD register element
115 operand, e.g. the last source operand in
116 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
118 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
119 aarch64_insn
*code
, const aarch64_inst
*inst
,
120 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
123 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
124 /* index and/or type */
125 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
127 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
128 if (info
->type
== AARCH64_OPND_En
129 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
131 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
132 assert (info
->idx
== 1); /* Vn */
133 aarch64_insn value
= info
->reglane
.index
<< pos
;
134 insert_field (FLD_imm4_11
, code
, value
, 0);
138 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
145 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
146 insert_field (FLD_imm5
, code
, value
, 0);
149 else if (inst
->opcode
->iclass
== dotproduct
)
151 unsigned reglane_index
= info
->reglane
.index
;
152 switch (info
->qualifier
)
154 case AARCH64_OPND_QLF_S_4B
:
155 case AARCH64_OPND_QLF_S_2H
:
157 assert (reglane_index
< 4);
158 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
160 case AARCH64_OPND_QLF_S_2B
:
162 assert (reglane_index
< 8);
163 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
169 else if (inst
->opcode
->iclass
== cryptosm3
)
171 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
172 unsigned reglane_index
= info
->reglane
.index
;
173 assert (reglane_index
< 4);
174 insert_field (FLD_SM3_imm2
, code
, reglane_index
, 0);
178 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
179 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
180 unsigned reglane_index
= info
->reglane
.index
;
182 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
183 /* Complex operand takes two elements. */
186 switch (info
->qualifier
)
188 case AARCH64_OPND_QLF_S_B
:
190 assert (reglane_index
< 16);
191 insert_fields (code
, reglane_index
, 0, 2, FLD_imm3_19
, FLD_H
);
193 case AARCH64_OPND_QLF_S_H
:
195 assert (reglane_index
< 8);
196 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
198 case AARCH64_OPND_QLF_S_S
:
200 assert (reglane_index
< 4);
201 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
203 case AARCH64_OPND_QLF_S_D
:
205 assert (reglane_index
< 2);
206 insert_field (FLD_H
, code
, reglane_index
, 0);
215 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
217 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
219 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
220 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
223 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
225 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
229 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
230 in AdvSIMD load/store instructions. */
232 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
233 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
234 const aarch64_inst
*inst
,
235 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
237 aarch64_insn value
= 0;
238 /* Number of elements in each structure to be loaded/stored. */
239 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
242 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
247 switch (info
->reglist
.num_regs
)
249 case 1: value
= 0x7; break;
250 case 2: value
= 0xa; break;
251 case 3: value
= 0x6; break;
252 case 4: value
= 0x2; break;
253 default: return false;
257 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
268 insert_field (FLD_opcode
, code
, value
, 0);
273 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
274 single structure to all lanes instructions. */
276 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
277 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
278 const aarch64_inst
*inst
,
279 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
282 /* The opcode dependent area stores the number of elements in
283 each structure to be loaded/stored. */
284 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
287 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
289 value
= (aarch64_insn
) 0;
290 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
291 /* OP_LD1R does not have alternating variant, but have "two consecutive"
293 value
= (aarch64_insn
) 1;
294 insert_field (FLD_S
, code
, value
, 0);
299 /* Insert regnos of register list operand for AdvSIMD lut instructions. */
301 aarch64_ins_lut_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
303 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
304 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
306 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
310 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
311 operand e.g. Vt in AdvSIMD load/store single element instructions. */
313 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
314 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
315 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
316 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
318 aarch64_field field
= {0, 0};
319 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
320 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
322 assert (info
->reglist
.has_index
);
325 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
326 /* Encode the index, opcode<2:1> and size. */
327 switch (info
->qualifier
)
329 case AARCH64_OPND_QLF_S_B
:
330 /* Index encoded in "Q:S:size". */
331 QSsize
= info
->reglist
.index
;
334 case AARCH64_OPND_QLF_S_H
:
335 /* Index encoded in "Q:S:size<1>". */
336 QSsize
= info
->reglist
.index
<< 1;
339 case AARCH64_OPND_QLF_S_S
:
340 /* Index encoded in "Q:S". */
341 QSsize
= info
->reglist
.index
<< 2;
344 case AARCH64_OPND_QLF_S_D
:
345 /* Index encoded in "Q". */
346 QSsize
= info
->reglist
.index
<< 3 | 0x1;
352 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
353 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
354 insert_field_2 (&field
, code
, opcodeh2
, 0);
359 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
360 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
361 or SSHR <V><d>, <V><n>, #<shift>. */
363 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
364 const aarch64_opnd_info
*info
,
365 aarch64_insn
*code
, const aarch64_inst
*inst
,
366 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
368 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
371 if (inst
->opcode
->iclass
== asimdshf
)
375 0000 x SEE AdvSIMD modified immediate
384 Q
= (val
& 0x1) ? 1 : 0;
385 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
389 assert (info
->type
== AARCH64_OPND_IMM_VLSR
390 || info
->type
== AARCH64_OPND_IMM_VLSL
);
392 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
395 0000 SEE AdvSIMD modified immediate
396 0001 (16-UInt(immh:immb))
397 001x (32-UInt(immh:immb))
398 01xx (64-UInt(immh:immb))
399 1xxx (128-UInt(immh:immb)) */
400 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
404 0000 SEE AdvSIMD modified immediate
405 0001 (UInt(immh:immb)-8)
406 001x (UInt(immh:immb)-16)
407 01xx (UInt(immh:immb)-32)
408 1xxx (UInt(immh:immb)-64) */
409 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
410 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
415 /* Insert fields for e.g. the immediate operands in
416 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
418 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
420 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
421 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
425 imm
= info
->imm
.value
;
426 if (operand_need_shift_by_two (self
))
428 if (operand_need_shift_by_three (self
))
430 if (operand_need_shift_by_four (self
))
432 insert_all_fields (self
, code
, imm
);
436 /* Insert immediate and its shift amount for e.g. the last operand in
437 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
439 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
440 aarch64_insn
*code
, const aarch64_inst
*inst
,
441 aarch64_operand_error
*errors
)
444 aarch64_ins_imm (self
, info
, code
, inst
, errors
);
446 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
450 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
451 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
453 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
454 const aarch64_opnd_info
*info
,
456 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
457 aarch64_operand_error
*errors
460 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
461 uint64_t imm
= info
->imm
.value
;
462 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
463 int amount
= info
->shifter
.amount
;
464 aarch64_field field
= {0, 0};
466 /* a:b:c:d:e:f:g:h */
467 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
469 /* Either MOVI <Dd>, #<imm>
470 or MOVI <Vd>.2D, #<imm>.
471 <imm> is a 64-bit immediate
472 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
473 encoded in "a:b:c:d:e:f:g:h". */
474 imm
= aarch64_shrink_expanded_imm8 (imm
);
475 assert ((int)imm
>= 0);
477 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
479 if (kind
== AARCH64_MOD_NONE
)
482 /* shift amount partially in cmode */
483 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
484 if (kind
== AARCH64_MOD_LSL
)
486 /* AARCH64_MOD_LSL: shift zeros. */
487 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
488 assert (esize
== 4 || esize
== 2 || esize
== 1);
489 /* For 8-bit move immediate, the optional LSL #0 does not require
495 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
497 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
501 /* AARCH64_MOD_MSL: shift ones. */
503 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
505 insert_field_2 (&field
, code
, amount
, 0);
510 /* Insert fields for an 8-bit floating-point immediate. */
512 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
514 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
515 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
517 insert_all_fields (self
, code
, info
->imm
.value
);
521 /* Insert 1-bit rotation immediate (#90 or #270). */
523 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
524 const aarch64_opnd_info
*info
,
525 aarch64_insn
*code
, const aarch64_inst
*inst
,
526 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
528 uint64_t rot
= (info
->imm
.value
- 90) / 180;
530 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
534 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
536 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
537 const aarch64_opnd_info
*info
,
538 aarch64_insn
*code
, const aarch64_inst
*inst
,
539 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
541 uint64_t rot
= info
->imm
.value
/ 90;
543 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
547 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
548 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
550 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
552 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
553 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
555 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
559 /* Insert arithmetic immediate for e.g. the last operand in
560 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
562 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
563 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
564 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
567 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
568 insert_field (self
->fields
[0], code
, value
, 0);
569 /* imm12 (unsigned) */
570 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
574 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
575 the operand should be inverted before encoding. */
577 aarch64_ins_limm_1 (const aarch64_operand
*self
,
578 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
579 const aarch64_inst
*inst
, bool invert_p
,
580 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
584 uint64_t imm
= info
->imm
.value
;
585 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
589 /* The constraint check should guarantee that this will work. */
590 res
= aarch64_logical_immediate_p (imm
, esize
, &value
);
592 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
597 /* Insert logical/bitmask immediate for e.g. the last operand in
598 ORR <Wd|WSP>, <Wn>, #<imm>. */
600 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
601 aarch64_insn
*code
, const aarch64_inst
*inst
,
602 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
604 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
605 inst
->opcode
->op
== OP_BIC
, errors
);
608 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
610 aarch64_ins_inv_limm (const aarch64_operand
*self
,
611 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
612 const aarch64_inst
*inst
,
613 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
615 return aarch64_ins_limm_1 (self
, info
, code
, inst
, true, errors
);
618 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
619 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
621 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
622 aarch64_insn
*code
, const aarch64_inst
*inst
,
623 aarch64_operand_error
*errors
)
625 aarch64_insn value
= 0;
627 assert (info
->idx
== 0);
630 aarch64_ins_regno (self
, info
, code
, inst
, errors
);
631 if (inst
->opcode
->iclass
== ldstpair_indexed
632 || inst
->opcode
->iclass
== ldstnapair_offs
633 || inst
->opcode
->iclass
== ldstpair_off
634 || inst
->opcode
->iclass
== loadlit
)
637 switch (info
->qualifier
)
639 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
640 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
641 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
642 default: return false;
644 insert_field (FLD_ldst_size
, code
, value
, 0);
649 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
650 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
656 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
658 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
659 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
660 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
661 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
664 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
668 /* Encode the address operand for e.g.
669 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
671 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
672 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
673 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
674 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
677 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
680 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
682 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
684 if (kind
== AARCH64_MOD_LSL
)
685 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
686 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
688 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
689 S
= info
->shifter
.amount
!= 0;
691 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
695 Must be #0 if <extend> is explicitly LSL. */
696 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
697 insert_field (FLD_S
, code
, S
, 0);
702 /* Encode the address operand for e.g.
703 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
705 aarch64_ins_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
706 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
707 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
708 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
711 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
714 int imm
= info
->addr
.offset
.imm
;
715 insert_field (self
->fields
[1], code
, imm
, 0);
718 if (info
->addr
.writeback
)
720 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
721 insert_field (self
->fields
[2], code
, 1, 0);
726 /* Encode the address operand for e.g.
727 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
729 aarch64_ins_rcpc3_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
730 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
731 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
732 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
735 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
738 int imm
= info
->addr
.offset
.imm
;
739 insert_field (self
->fields
[1], code
, imm
, 0);
744 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
746 aarch64_ins_addr_simm (const aarch64_operand
*self
,
747 const aarch64_opnd_info
*info
,
749 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
750 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
755 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
756 /* simm (imm9 or imm7) */
757 imm
= info
->addr
.offset
.imm
;
758 if (self
->fields
[0] == FLD_imm7
759 || info
->qualifier
== AARCH64_OPND_QLF_imm_tag
)
760 /* scaled immediate in ld/st pair instructions.. */
761 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
762 insert_field (self
->fields
[0], code
, imm
, 0);
763 /* pre/post- index */
764 if (info
->addr
.writeback
)
766 assert (inst
->opcode
->iclass
!= ldst_unscaled
767 && inst
->opcode
->iclass
!= ldstnapair_offs
768 && inst
->opcode
->iclass
!= ldstpair_off
769 && inst
->opcode
->iclass
!= ldst_unpriv
);
770 assert (info
->addr
.preind
!= info
->addr
.postind
);
771 if (info
->addr
.preind
)
772 insert_field (self
->fields
[1], code
, 1, 0);
778 /* Encode the address operand, potentially offset by the load/store ammount,
779 e.g. LDIAPP <Xt>, <Xt2> [<Xn|SP>, #<simm>]
780 and STILP <Xt>, <Xt2> [<Xn|SP>], #<simm>.*/
782 aarch64_ins_rcpc3_addr_opt_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
783 const aarch64_opnd_info
*info
,
785 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
786 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
791 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
793 imm
= info
->addr
.offset
.imm
;
795 insert_field (FLD_opc2
, code
, 1, 0);
800 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
802 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
803 const aarch64_opnd_info
*info
,
805 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
806 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
811 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
813 imm
= info
->addr
.offset
.imm
>> 3;
814 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
815 insert_field (self
->fields
[2], code
, imm
, 0);
817 if (info
->addr
.writeback
)
819 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
820 insert_field (self
->fields
[3], code
, 1, 0);
825 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
827 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
828 const aarch64_opnd_info
*info
,
830 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
831 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
833 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
836 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
838 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
842 /* Encode the address operand for e.g.
843 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
845 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
846 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
847 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
848 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
851 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
853 if (info
->addr
.offset
.is_reg
)
854 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
856 insert_field (FLD_Rm
, code
, 0x1f, 0);
860 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
862 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
863 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
864 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
865 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
868 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
872 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
874 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
875 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
876 const aarch64_inst
*inst
,
877 aarch64_operand_error
*detail ATTRIBUTE_UNUSED
)
879 /* If a system instruction check if we have any restrictions on which
880 registers it can use. */
881 if (inst
->opcode
->iclass
== ic_system
)
883 uint64_t opcode_flags
884 = inst
->opcode
->flags
& (F_SYS_READ
| F_SYS_WRITE
);
885 uint32_t sysreg_flags
886 = info
->sysreg
.flags
& (F_REG_READ
| F_REG_WRITE
);
888 /* Check to see if it's read-only, else check if it's write only.
889 if it's both or unspecified don't care. */
890 if (opcode_flags
== F_SYS_READ
892 && sysreg_flags
!= F_REG_READ
)
894 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
895 detail
->error
= _("specified register cannot be read from");
896 detail
->index
= info
->idx
;
897 detail
->non_fatal
= true;
899 else if (opcode_flags
== F_SYS_WRITE
901 && sysreg_flags
!= F_REG_WRITE
)
903 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
904 detail
->error
= _("specified register cannot be written to");
905 detail
->index
= info
->idx
;
906 detail
->non_fatal
= true;
909 /* op0:op1:CRn:CRm:op2 */
910 insert_fields (code
, info
->sysreg
.value
, inst
->opcode
->mask
, 5,
911 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
915 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
917 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
918 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
919 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
920 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
923 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
926 /* Extra CRm mask. */
927 if (info
->sysreg
.flags
| F_REG_IN_CRM
)
928 insert_field (FLD_CRm
, code
, PSTATE_DECODE_CRM (info
->sysreg
.flags
), 0);
932 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
934 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
935 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
936 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
937 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
939 /* op1:CRn:CRm:op2 */
940 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
941 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
945 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
948 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
949 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
950 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
951 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
954 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
958 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
961 aarch64_ins_barrier_dsb_nxs (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
962 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
963 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
964 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
966 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
967 encoded in CRm<3:2>. */
968 aarch64_insn value
= (info
->barrier
->value
>> 2) - 4;
969 insert_field (FLD_CRm_dsb_nxs
, code
, value
, 0);
973 /* Encode the prefetch operation option operand for e.g.
974 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
977 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
978 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
979 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
980 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
983 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
987 /* Encode the hint number for instructions that alias HINT but take an
991 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
992 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
993 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
994 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
997 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
1001 /* Encode the extended register operand for e.g.
1002 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1004 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1005 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1006 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1007 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1009 enum aarch64_modifier_kind kind
;
1012 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
1014 kind
= info
->shifter
.kind
;
1015 if (kind
== AARCH64_MOD_LSL
)
1016 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
1017 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
1018 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
1020 insert_field (FLD_imm3_10
, code
, info
->shifter
.amount
, 0);
1025 /* Encode the shifted register operand for e.g.
1026 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1028 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1029 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1030 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1031 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1034 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
1036 insert_field (FLD_shift
, code
,
1037 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
1039 insert_field (FLD_imm6_10
, code
, info
->shifter
.amount
, 0);
1044 /* Encode the LSL-shifted register operand for e.g.
1045 ADDPT <Xd|SP>, <Xn|SP>, <Xm>{, LSL #<amount>}. */
1047 aarch64_ins_reg_lsl_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
1048 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1049 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1050 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1053 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
1055 insert_field (FLD_imm3_10
, code
, info
->shifter
.amount
, 0);
1059 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1060 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1061 SELF's operand-dependent value. fields[0] specifies the field that
1062 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1064 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
1065 const aarch64_opnd_info
*info
,
1067 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1068 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1070 int factor
= 1 + get_operand_specific_data (self
);
1071 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1072 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1076 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1077 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1078 SELF's operand-dependent value. fields[0] specifies the field that
1079 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1081 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
1082 const aarch64_opnd_info
*info
,
1084 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1085 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1087 int factor
= 1 + get_operand_specific_data (self
);
1088 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1089 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1093 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1094 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1095 SELF's operand-dependent value. fields[0] specifies the field that
1096 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1097 and imm3 fields, with imm3 being the less-significant part. */
1099 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
1100 const aarch64_opnd_info
*info
,
1102 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1103 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1105 int factor
= 1 + get_operand_specific_data (self
);
1106 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1107 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
1108 2, FLD_imm3_10
, FLD_SVE_imm6
);
1112 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1113 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1114 value. fields[0] specifies the base register field. */
1116 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
1117 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1118 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1119 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1121 int factor
= 1 << get_operand_specific_data (self
);
1122 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1123 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1127 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1128 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1129 value. fields[0] specifies the base register field. */
1131 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
1132 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1133 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1134 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1136 int factor
= 1 << get_operand_specific_data (self
);
1137 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1138 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1142 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1143 is SELF's operand-dependent value. fields[0] specifies the base
1144 register field and fields[1] specifies the offset register field. */
1146 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
1147 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1148 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1149 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1151 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1152 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1156 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1157 <shift> is SELF's operand-dependent value. fields[0] specifies the
1158 base register field, fields[1] specifies the offset register field and
1159 fields[2] is a single-bit field that selects SXTW over UXTW. */
1161 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
1162 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1163 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1164 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1166 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1167 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1168 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
1169 insert_field (self
->fields
[2], code
, 0, 0);
1171 insert_field (self
->fields
[2], code
, 1, 0);
1175 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1176 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1177 fields[0] specifies the base register field. */
1179 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
1180 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1181 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1182 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1184 int factor
= 1 << get_operand_specific_data (self
);
1185 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1186 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1190 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1191 where <modifier> is fixed by the instruction and where <msz> is a
1192 2-bit unsigned number. fields[0] specifies the base register field
1193 and fields[1] specifies the offset register field. */
1195 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
1196 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1197 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1199 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1200 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1201 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
1205 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1206 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1207 field and fields[1] specifies the offset register field. */
1209 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
1210 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1211 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1212 aarch64_operand_error
*errors
)
1214 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1217 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1218 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1219 field and fields[1] specifies the offset register field. */
1221 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
1222 const aarch64_opnd_info
*info
,
1224 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1225 aarch64_operand_error
*errors
)
1227 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1230 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1231 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1232 field and fields[1] specifies the offset register field. */
1234 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
1235 const aarch64_opnd_info
*info
,
1237 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1238 aarch64_operand_error
*errors
)
1240 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1243 /* Encode an SVE ADD/SUB immediate. */
1245 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1246 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1247 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1248 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1250 if (info
->shifter
.amount
== 8)
1251 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1252 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1253 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1255 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1260 aarch64_ins_sve_aligned_reglist (const aarch64_operand
*self
,
1261 const aarch64_opnd_info
*info
,
1263 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1264 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1266 unsigned int num_regs
= get_operand_specific_data (self
);
1267 unsigned int val
= info
->reglist
.first_regno
;
1268 insert_field (self
->fields
[0], code
, val
/ num_regs
, 0);
1272 /* Encode an SVE CPY/DUP immediate. */
1274 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1275 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1276 const aarch64_inst
*inst
,
1277 aarch64_operand_error
*errors
)
1279 return aarch64_ins_sve_aimm (self
, info
, code
, inst
, errors
);
1282 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1283 array specifies which field to use for Zn. MM is encoded in the
1284 concatenation of imm5 and SVE_tszh, with imm5 being the less
1285 significant part. */
1287 aarch64_ins_sve_index (const aarch64_operand
*self
,
1288 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1289 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1290 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1292 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1293 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1294 insert_all_fields_after (self
, 1, code
,
1295 (info
->reglane
.index
* 2 + 1) * esize
);
1299 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1301 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1302 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1303 const aarch64_inst
*inst
,
1304 aarch64_operand_error
*errors
)
1306 return aarch64_ins_limm (self
, info
, code
, inst
, errors
);
1309 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1310 and where MM occupies the most-significant part. The operand-dependent
1311 value specifies the number of bits in Zn. */
1313 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1314 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1315 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1316 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1318 unsigned int reg_bits
= get_operand_specific_data (self
);
1319 assert (info
->reglane
.regno
< (1U << reg_bits
));
1320 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1321 insert_all_fields (self
, code
, val
);
1325 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1328 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1329 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1330 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1331 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1333 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1337 /* Encode a strided register list. The first field holds the top bit
1338 (0 or 16) and the second field holds the lower bits. The stride is
1339 16 divided by the list length. */
1341 aarch64_ins_sve_strided_reglist (const aarch64_operand
*self
,
1342 const aarch64_opnd_info
*info
,
1344 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1345 aarch64_operand_error
*errors
1348 unsigned int num_regs
= get_operand_specific_data (self
);
1349 unsigned int mask ATTRIBUTE_UNUSED
= 16 | (16 / num_regs
- 1);
1350 unsigned int val
= info
->reglist
.first_regno
;
1351 assert ((val
& mask
) == val
);
1352 insert_field (self
->fields
[0], code
, val
>> 4, 0);
1353 insert_field (self
->fields
[1], code
, val
& 15, 0);
1357 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1358 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1361 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1362 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1363 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1364 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1366 insert_all_fields (self
, code
, info
->imm
.value
);
1367 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1371 /* Encode an SVE shift left immediate. */
1373 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1374 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1375 const aarch64_inst
*inst
,
1376 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1378 const aarch64_opnd_info
*prev_operand
;
1381 assert (info
->idx
> 0);
1382 prev_operand
= &inst
->operands
[info
->idx
- 1];
1383 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1384 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1388 /* Encode an SVE shift right immediate. */
1390 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1391 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1392 const aarch64_inst
*inst
,
1393 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1395 const aarch64_opnd_info
*prev_operand
;
1398 unsigned int opnd_backshift
= get_operand_specific_data (self
);
1399 assert (info
->idx
>= (int)opnd_backshift
);
1400 prev_operand
= &inst
->operands
[info
->idx
- opnd_backshift
];
1401 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1402 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1406 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1407 The fields array specifies which field to use. */
1409 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1410 const aarch64_opnd_info
*info
,
1412 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1413 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1415 if (info
->imm
.value
== 0x3f000000)
1416 insert_field (self
->fields
[0], code
, 0, 0);
1418 insert_field (self
->fields
[0], code
, 1, 0);
1422 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1423 The fields array specifies which field to use. */
1425 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1426 const aarch64_opnd_info
*info
,
1428 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1429 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1431 if (info
->imm
.value
== 0x3f000000)
1432 insert_field (self
->fields
[0], code
, 0, 0);
1434 insert_field (self
->fields
[0], code
, 1, 0);
1438 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1439 The fields array specifies which field to use. */
1441 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1442 const aarch64_opnd_info
*info
,
1444 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1445 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1447 if (info
->imm
.value
== 0)
1448 insert_field (self
->fields
[0], code
, 0, 0);
1450 insert_field (self
->fields
[0], code
, 1, 0);
1455 aarch64_ins_sme_za_vrs1 (const aarch64_operand
*self
,
1456 const aarch64_opnd_info
*info
,
1458 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1459 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1461 int za_reg
= info
->indexed_za
.regno
;
1462 int regno
= info
->indexed_za
.index
.regno
& 3;
1463 int imm
= info
->indexed_za
.index
.imm
;
1464 int v
= info
->indexed_za
.v
;
1465 int countm1
= info
->indexed_za
.index
.countm1
;
1467 insert_field (self
->fields
[0], code
, v
, 0);
1468 insert_field (self
->fields
[1], code
, regno
, 0);
1469 switch (info
->qualifier
)
1471 case AARCH64_OPND_QLF_S_B
:
1472 insert_field (self
->fields
[2], code
, imm
/ (countm1
+ 1), 0);
1474 case AARCH64_OPND_QLF_S_H
:
1475 case AARCH64_OPND_QLF_S_S
:
1476 insert_field (self
->fields
[2], code
, za_reg
, 0);
1477 insert_field (self
->fields
[3], code
, imm
/ (countm1
+ 1), 0);
1479 case AARCH64_OPND_QLF_S_D
:
1480 insert_field (self
->fields
[2], code
, za_reg
, 0);
1490 aarch64_ins_sme_za_vrs2 (const aarch64_operand
*self
,
1491 const aarch64_opnd_info
*info
,
1493 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1494 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1496 int za_reg
= info
->indexed_za
.regno
;
1497 int regno
= info
->indexed_za
.index
.regno
& 3;
1498 int imm
= info
->indexed_za
.index
.imm
;
1499 int v
= info
->indexed_za
.v
;
1500 int countm1
= info
->indexed_za
.index
.countm1
;
1502 insert_field (self
->fields
[0], code
, v
, 0);
1503 insert_field (self
->fields
[1], code
, regno
, 0);
1504 switch (info
->qualifier
)
1506 case AARCH64_OPND_QLF_S_B
:
1507 insert_field (self
->fields
[2], code
, imm
/ (countm1
+ 1), 0);
1509 case AARCH64_OPND_QLF_S_H
:
1510 insert_field (self
->fields
[2], code
, za_reg
, 0);
1511 insert_field (self
->fields
[3], code
, imm
/ (countm1
+ 1), 0);
1513 case AARCH64_OPND_QLF_S_S
:
1514 case AARCH64_OPND_QLF_S_D
:
1515 insert_field (self
->fields
[2], code
, za_reg
, 0);
1524 /* Encode in SME instruction such as MOVZA ZA tile slice to vector. */
1526 aarch64_ins_sme_za_tile_to_vec (const aarch64_operand
*self
,
1527 const aarch64_opnd_info
*info
,
1529 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1530 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1532 int fld_v
= info
->indexed_za
.v
;
1533 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1534 int fld_zan_imm
= info
->indexed_za
.index
.imm
;
1535 int regno
= info
->indexed_za
.regno
;
1537 switch (info
->qualifier
)
1539 case AARCH64_OPND_QLF_S_B
:
1540 insert_field (FLD_imm4_5
, code
, fld_zan_imm
, 0);
1542 case AARCH64_OPND_QLF_S_H
:
1543 insert_field (FLD_ZA8_1
, code
, regno
, 0);
1544 insert_field (FLD_imm3_5
, code
, fld_zan_imm
, 0);
1546 case AARCH64_OPND_QLF_S_S
:
1547 insert_field (FLD_ZA7_2
, code
, regno
, 0);
1548 insert_field (FLD_off2
, code
, fld_zan_imm
, 0);
1550 case AARCH64_OPND_QLF_S_D
:
1551 insert_field (FLD_ZA6_3
, code
, regno
, 0);
1552 insert_field (FLD_ol
, code
, fld_zan_imm
, 0);
1554 case AARCH64_OPND_QLF_S_Q
:
1555 insert_field (FLD_ZA5_4
, code
, regno
, 0);
1561 insert_field (self
->fields
[0], code
, fld_v
, 0);
1562 insert_field (self
->fields
[1], code
, fld_rv
, 0);
1567 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1568 vector indicator, vector selector and immediate. */
1570 aarch64_ins_sme_za_hv_tiles (const aarch64_operand
*self
,
1571 const aarch64_opnd_info
*info
,
1573 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1574 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1578 int fld_v
= info
->indexed_za
.v
;
1579 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1580 int fld_zan_imm
= info
->indexed_za
.index
.imm
;
1581 int regno
= info
->indexed_za
.regno
;
1583 switch (info
->qualifier
)
1585 case AARCH64_OPND_QLF_S_B
:
1589 case AARCH64_OPND_QLF_S_H
:
1592 fld_zan_imm
|= regno
<< 3;
1594 case AARCH64_OPND_QLF_S_S
:
1597 fld_zan_imm
|= regno
<< 2;
1599 case AARCH64_OPND_QLF_S_D
:
1602 fld_zan_imm
|= regno
<< 1;
1604 case AARCH64_OPND_QLF_S_Q
:
1607 fld_zan_imm
= regno
;
1613 insert_field (self
->fields
[0], code
, fld_size
, 0);
1614 insert_field (self
->fields
[1], code
, fld_q
, 0);
1615 insert_field (self
->fields
[2], code
, fld_v
, 0);
1616 insert_field (self
->fields
[3], code
, fld_rv
, 0);
1617 insert_field (self
->fields
[4], code
, fld_zan_imm
, 0);
1623 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand
*self
,
1624 const aarch64_opnd_info
*info
,
1626 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1627 aarch64_operand_error
*errors
1630 int ebytes
= aarch64_get_qualifier_esize (info
->qualifier
);
1631 int range_size
= get_opcode_dependent_value (inst
->opcode
);
1632 int fld_v
= info
->indexed_za
.v
;
1633 int fld_rv
= info
->indexed_za
.index
.regno
- 12;
1634 int imm
= info
->indexed_za
.index
.imm
;
1635 int max_value
= 16 / range_size
/ ebytes
;
1640 assert (imm
% range_size
== 0 && (imm
/ range_size
) < max_value
);
1641 int fld_zan_imm
= (info
->indexed_za
.regno
* max_value
) | (imm
/ range_size
);
1642 assert (fld_zan_imm
< (range_size
== 4 && ebytes
< 8 ? 4 : 8));
1644 insert_field (self
->fields
[0], code
, fld_v
, 0);
1645 insert_field (self
->fields
[1], code
, fld_rv
, 0);
1646 insert_field (self
->fields
[2], code
, fld_zan_imm
, 0);
1651 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1652 separated by commas, encoded in the "imm8" field.
1654 For programmer convenience an assembler must also accept the names of
1655 32-bit, 16-bit and 8-bit element tiles which are converted into the
1656 corresponding set of 64-bit element tiles.
1659 aarch64_ins_sme_za_list (const aarch64_operand
*self
,
1660 const aarch64_opnd_info
*info
,
1662 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1663 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1665 int fld_mask
= info
->imm
.value
;
1666 insert_field (self
->fields
[0], code
, fld_mask
, 0);
1671 aarch64_ins_sme_za_array (const aarch64_operand
*self
,
1672 const aarch64_opnd_info
*info
,
1674 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1675 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1677 int regno
= info
->indexed_za
.index
.regno
& 3;
1678 int imm
= info
->indexed_za
.index
.imm
;
1679 int countm1
= info
->indexed_za
.index
.countm1
;
1680 assert (imm
% (countm1
+ 1) == 0);
1681 insert_field (self
->fields
[0], code
, regno
, 0);
1682 insert_field (self
->fields
[1], code
, imm
/ (countm1
+ 1), 0);
1687 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand
*self
,
1688 const aarch64_opnd_info
*info
,
1690 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1691 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1693 int regno
= info
->addr
.base_regno
;
1694 int imm
= info
->addr
.offset
.imm
;
1695 insert_field (self
->fields
[0], code
, regno
, 0);
1696 insert_field (self
->fields
[1], code
, imm
, 0);
1700 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1702 aarch64_ins_sme_sm_za (const aarch64_operand
*self
,
1703 const aarch64_opnd_info
*info
,
1705 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1706 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1708 aarch64_insn fld_crm
;
1709 /* Set CRm[3:1] bits. */
1710 if (info
->reg
.regno
== 's')
1711 fld_crm
= 0x02 ; /* SVCRSM. */
1712 else if (info
->reg
.regno
== 'z')
1713 fld_crm
= 0x04; /* SVCRZA. */
1717 insert_field (self
->fields
[0], code
, fld_crm
, 0);
1721 /* Encode source scalable predicate register (Pn), name of the index base
1722 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1723 range 0 to one less than the number of vector elements in a 128-bit vector
1724 register, encoded in "i1:tszh:tszl".
1727 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand
*self
,
1728 const aarch64_opnd_info
*info
,
1730 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1731 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1733 int fld_pn
= info
->indexed_za
.regno
;
1734 int fld_rm
= info
->indexed_za
.index
.regno
- 12;
1735 int imm
= info
->indexed_za
.index
.imm
;
1736 int fld_i1
, fld_tszh
, fld_tshl
;
1738 insert_field (self
->fields
[0], code
, fld_rm
, 0);
1739 insert_field (self
->fields
[1], code
, fld_pn
, 0);
1741 /* Optional element index, defaulting to 0, in the range 0 to one less than
1742 the number of vector elements in a 128-bit vector register, encoded in
1752 switch (info
->qualifier
)
1754 case AARCH64_OPND_QLF_S_B
:
1755 /* <imm> is 4 bit value. */
1756 fld_i1
= (imm
>> 3) & 0x1;
1757 fld_tszh
= (imm
>> 2) & 0x1;
1758 fld_tshl
= ((imm
<< 1) | 0x1) & 0x7;
1760 case AARCH64_OPND_QLF_S_H
:
1761 /* <imm> is 3 bit value. */
1762 fld_i1
= (imm
>> 2) & 0x1;
1763 fld_tszh
= (imm
>> 1) & 0x1;
1764 fld_tshl
= ((imm
<< 2) | 0x2) & 0x7;
1766 case AARCH64_OPND_QLF_S_S
:
1767 /* <imm> is 2 bit value. */
1768 fld_i1
= (imm
>> 1) & 0x1;
1769 fld_tszh
= imm
& 0x1;
1772 case AARCH64_OPND_QLF_S_D
:
1773 /* <imm> is 1 bit value. */
1782 insert_field (self
->fields
[2], code
, fld_i1
, 0);
1783 insert_field (self
->fields
[3], code
, fld_tszh
, 0);
1784 insert_field (self
->fields
[4], code
, fld_tshl
, 0);
1788 /* Insert X0-X30. Register 31 is unallocated. */
1790 aarch64_ins_x0_to_x30 (const aarch64_operand
*self
,
1791 const aarch64_opnd_info
*info
,
1793 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1794 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1796 assert (info
->reg
.regno
<= 30);
1797 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
1801 /* Insert an indexed register, with the first field being the register
1802 number and the remaining fields being the index. */
1804 aarch64_ins_simple_index (const aarch64_operand
*self
,
1805 const aarch64_opnd_info
*info
,
1807 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1808 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1810 int bias
= get_operand_specific_data (self
);
1811 insert_field (self
->fields
[0], code
, info
->reglane
.regno
- bias
, 0);
1812 insert_all_fields_after (self
, 1, code
, info
->reglane
.index
);
1816 /* Insert a plain shift-right immediate, when there is only a single
1819 aarch64_ins_plain_shrimm (const aarch64_operand
*self
,
1820 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1821 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1822 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1824 unsigned int base
= 1 << get_operand_field_width (self
, 0);
1825 insert_field (self
->fields
[0], code
, base
- info
->imm
.value
, 0);
1829 /* Miscellaneous encoding functions. */
1831 /* Encode size[0], i.e. bit 22, for
1832 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1835 encode_asimd_fcvt (aarch64_inst
*inst
)
1838 aarch64_field field
= {0, 0};
1839 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_NIL
;
1841 switch (inst
->opcode
->op
)
1845 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1846 qualifier
= inst
->operands
[1].qualifier
;
1850 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1851 qualifier
= inst
->operands
[0].qualifier
;
1856 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1857 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1858 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1859 gen_sub_field (FLD_size
, 0, 1, &field
);
1860 insert_field_2 (&field
, &inst
->value
, value
, 0);
1863 /* Encode size[0], i.e. bit 22, for
1864 e.g. FCVTXN <Vb><d>, <Va><n>. */
1867 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1869 aarch64_insn val
= 1;
1870 aarch64_field field
= {0, 0};
1871 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1872 gen_sub_field (FLD_size
, 0, 1, &field
);
1873 insert_field_2 (&field
, &inst
->value
, val
, 0);
1876 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1878 encode_fcvt (aarch64_inst
*inst
)
1881 const aarch64_field field
= {15, 2};
1884 switch (inst
->operands
[0].qualifier
)
1886 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1887 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1888 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1891 insert_field_2 (&field
, &inst
->value
, val
, 0);
1896 /* Return the index in qualifiers_list that INST is using. Should only
1897 be called once the qualifiers are known to be valid. */
1900 aarch64_get_variant (struct aarch64_inst
*inst
)
1902 int i
, nops
, variant
;
1904 nops
= aarch64_num_of_operands (inst
->opcode
);
1905 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1907 for (i
= 0; i
< nops
; ++i
)
1908 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1909 != inst
->operands
[i
].qualifier
)
1917 /* Do miscellaneous encodings that are not common enough to be driven by
1921 do_misc_encoding (aarch64_inst
*inst
)
1925 switch (inst
->opcode
->op
)
1934 encode_asimd_fcvt (inst
);
1937 encode_asisd_fcvtxn (inst
);
1942 /* Copy Pn to Pm and Pg. */
1943 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1944 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1945 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1948 /* Copy Zd to Zm. */
1949 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1950 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1953 /* Fill in the zero immediate. */
1954 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1955 2, FLD_imm5
, FLD_SVE_tszh
);
1958 /* Copy Zn to Zm. */
1959 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1960 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1965 /* Copy Pd to Pm. */
1966 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1967 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1969 case OP_MOVZS_P_P_P
:
1971 /* Copy Pn to Pm. */
1972 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1973 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1975 case OP_NOTS_P_P_P_Z
:
1976 case OP_NOT_P_P_P_Z
:
1977 /* Copy Pg to Pm. */
1978 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1979 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1985 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1987 encode_sizeq (aarch64_inst
*inst
)
1990 enum aarch64_field_kind kind
;
1993 /* Get the index of the operand whose information we are going to use
1994 to encode the size and Q fields.
1995 This is deduced from the possible valid qualifier lists. */
1996 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1997 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1998 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1999 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
2001 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
2003 if (inst
->opcode
->iclass
== asisdlse
2004 || inst
->opcode
->iclass
== asisdlsep
2005 || inst
->opcode
->iclass
== asisdlso
2006 || inst
->opcode
->iclass
== asisdlsop
)
2007 kind
= FLD_vldst_size
;
2010 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
2013 /* Opcodes that have fields shared by multiple operands are usually flagged
2014 with flags. In this function, we detect such flags and use the
2015 information in one of the related operands to do the encoding. The 'one'
2016 operand is not any operand but one of the operands that has the enough
2017 information for such an encoding. */
2020 do_special_encoding (struct aarch64_inst
*inst
)
2023 aarch64_insn value
= 0;
2025 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
2027 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2028 if (inst
->opcode
->flags
& F_COND
)
2030 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
2032 if (inst
->opcode
->flags
& F_SF
)
2034 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
2035 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
2036 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
2038 insert_field (FLD_sf
, &inst
->value
, value
, 0);
2039 if (inst
->opcode
->flags
& F_N
)
2040 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
2042 if (inst
->opcode
->flags
& F_LSE_SZ
)
2044 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
2045 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
2046 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
2048 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
2050 if (inst
->opcode
->flags
& F_RCPC3_SIZE
)
2052 switch (inst
->operands
[0].qualifier
)
2054 case AARCH64_OPND_QLF_W
: value
= 2; break;
2055 case AARCH64_OPND_QLF_X
: value
= 3; break;
2056 case AARCH64_OPND_QLF_S_B
: value
= 0; break;
2057 case AARCH64_OPND_QLF_S_H
: value
= 1; break;
2058 case AARCH64_OPND_QLF_S_S
: value
= 2; break;
2059 case AARCH64_OPND_QLF_S_D
: value
= 3; break;
2060 case AARCH64_OPND_QLF_S_Q
: value
= 0; break;
2063 insert_field (FLD_rcpc3_size
, &inst
->value
, value
, 0);
2066 if (inst
->opcode
->flags
& F_SIZEQ
)
2067 encode_sizeq (inst
);
2068 if (inst
->opcode
->flags
& F_FPTYPE
)
2070 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
2071 switch (inst
->operands
[idx
].qualifier
)
2073 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
2074 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
2075 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
2078 insert_field (FLD_type
, &inst
->value
, value
, 0);
2080 if (inst
->opcode
->flags
& F_SSIZE
)
2082 enum aarch64_opnd_qualifier qualifier
;
2083 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
2084 qualifier
= inst
->operands
[idx
].qualifier
;
2085 assert (qualifier
>= AARCH64_OPND_QLF_S_B
2086 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
2087 value
= aarch64_get_qualifier_standard_value (qualifier
);
2088 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
2090 if (inst
->opcode
->flags
& F_T
)
2092 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
2093 aarch64_field field
= {0, 0};
2094 enum aarch64_opnd_qualifier qualifier
;
2097 qualifier
= inst
->operands
[idx
].qualifier
;
2098 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
2099 == AARCH64_OPND_CLASS_SIMD_REG
2100 && qualifier
>= AARCH64_OPND_QLF_V_8B
2101 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
2112 value
= aarch64_get_qualifier_standard_value (qualifier
);
2113 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
2114 num
= (int) value
>> 1;
2115 assert (num
>= 0 && num
<= 3);
2116 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
2117 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
2120 if ((inst
->opcode
->flags
& F_OPD_SIZE
) && inst
->opcode
->iclass
== sve2_urqvs
)
2122 enum aarch64_opnd_qualifier qualifier
[2];
2123 aarch64_insn value1
= 0;
2125 qualifier
[0] = inst
->operands
[idx
].qualifier
;
2126 qualifier
[1] = inst
->operands
[idx
+2].qualifier
;
2127 value
= aarch64_get_qualifier_standard_value (qualifier
[0]);
2128 value1
= aarch64_get_qualifier_standard_value (qualifier
[1]);
2129 assert ((value
>> 1) == value1
);
2130 insert_field (FLD_size
, &inst
->value
, value1
, inst
->opcode
->mask
);
2133 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
2135 /* Use Rt to encode in the case of e.g.
2136 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2137 enum aarch64_opnd_qualifier qualifier
;
2138 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
2140 /* Otherwise use the result operand, which has to be a integer
2143 assert (idx
== 0 || idx
== 1);
2144 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
2145 == AARCH64_OPND_CLASS_INT_REG
);
2146 qualifier
= inst
->operands
[idx
].qualifier
;
2147 insert_field (FLD_Q
, &inst
->value
,
2148 aarch64_get_qualifier_standard_value (qualifier
), 0);
2150 if (inst
->opcode
->flags
& F_LDS_SIZE
)
2152 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
2153 enum aarch64_opnd_qualifier qualifier
;
2154 aarch64_field field
= {0, 0};
2155 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
2156 == AARCH64_OPND_CLASS_INT_REG
);
2157 gen_sub_field (FLD_opc
, 0, 1, &field
);
2158 qualifier
= inst
->operands
[0].qualifier
;
2159 insert_field_2 (&field
, &inst
->value
,
2160 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
2162 /* Miscellaneous encoding as the last step. */
2163 if (inst
->opcode
->flags
& F_MISC
)
2164 do_misc_encoding (inst
);
2166 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
2169 /* Some instructions (including all SVE ones) use the instruction class
2170 to describe how a qualifiers_list index is represented in the instruction
2171 encoding. If INST is such an instruction, encode the chosen qualifier
2175 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
2178 switch (inst
->opcode
->iclass
)
2182 /* The variant is encoded as part of the immediate. */
2185 case sme_size_12_bh
:
2186 insert_field (FLD_S
, &inst
->value
, aarch64_get_variant (inst
), 0);
2189 case sme_size_12_bhs
:
2191 insert_field (FLD_SME_size_12
, &inst
->value
,
2192 aarch64_get_variant (inst
), 0);
2196 insert_field (FLD_SME_size_22
, &inst
->value
,
2197 aarch64_get_variant (inst
), 0);
2200 case sme_size_22_hsd
:
2201 insert_field (FLD_SME_size_22
, &inst
->value
,
2202 aarch64_get_variant (inst
) + 1, 0);
2205 case sme_size_12_hs
:
2206 insert_field (FLD_SME_size_12
, &inst
->value
,
2207 aarch64_get_variant (inst
) + 1, 0);
2211 insert_field (FLD_SME_sz_23
, &inst
->value
,
2212 aarch64_get_variant (inst
), 0);
2216 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
2217 0, 2, FLD_SVE_M_14
, FLD_size
);
2223 case sve_shift_pred
:
2224 case sve_shift_unpred
:
2225 case sve_shift_tsz_hsd
:
2226 case sve_shift_tsz_bhsd
:
2227 /* For indices and shift amounts, the variant is encoded as
2228 part of the immediate. */
2233 /* For sve_limm, the .B, .H, and .S forms are just a convenience
2234 and depend on the immediate. They don't have a separate
2241 /* These instructions have only a single variant. */
2245 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
2246 0, 2, FLD_SVE_M_16
, FLD_size
);
2250 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
2255 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
2259 /* MOD 3 For `OP_SVE_Vv_HSD`. */
2260 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) % 3 + 1, 0);
2267 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
2271 insert_field (FLD_SVE_sz2
, &inst
->value
, aarch64_get_variant (inst
), 0);
2275 insert_field (FLD_SVE_size
, &inst
->value
,
2276 aarch64_get_variant (inst
) + 1, 0);
2279 case sve_size_tsz_bhs
:
2280 insert_fields (&inst
->value
,
2281 (1 << aarch64_get_variant (inst
)),
2282 0, 2, FLD_SVE_tszl_19
, FLD_SVE_sz
);
2286 variant
= aarch64_get_variant (inst
) + 1;
2289 insert_field (FLD_size
, &inst
->value
, variant
, 0);
2297 /* Converters converting an alias opcode instruction to its real form. */
2299 /* ROR <Wd>, <Ws>, #<shift>
2301 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2303 convert_ror_to_extr (aarch64_inst
*inst
)
2305 copy_operand_info (inst
, 3, 2);
2306 copy_operand_info (inst
, 2, 1);
2309 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2311 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2313 convert_xtl_to_shll (aarch64_inst
*inst
)
2315 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
2316 inst
->operands
[2].imm
.value
= 0;
2320 LSR <Xd>, <Xn>, #<shift>
2322 UBFM <Xd>, <Xn>, #<shift>, #63. */
2324 convert_sr_to_bfm (aarch64_inst
*inst
)
2326 inst
->operands
[3].imm
.value
=
2327 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
2330 /* Convert MOV to ORR. */
2332 convert_mov_to_orr (aarch64_inst
*inst
)
2334 /* MOV <Vd>.<T>, <Vn>.<T>
2336 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2337 copy_operand_info (inst
, 2, 1);
2340 /* When <imms> >= <immr>, the instruction written:
2341 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2343 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2346 convert_bfx_to_bfm (aarch64_inst
*inst
)
2350 /* Convert the operand. */
2351 lsb
= inst
->operands
[2].imm
.value
;
2352 width
= inst
->operands
[3].imm
.value
;
2353 inst
->operands
[2].imm
.value
= lsb
;
2354 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
2357 /* When <imms> < <immr>, the instruction written:
2358 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2360 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2363 convert_bfi_to_bfm (aarch64_inst
*inst
)
2367 /* Convert the operand. */
2368 lsb
= inst
->operands
[2].imm
.value
;
2369 width
= inst
->operands
[3].imm
.value
;
2370 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2372 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2373 inst
->operands
[3].imm
.value
= width
- 1;
2377 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2378 inst
->operands
[3].imm
.value
= width
- 1;
2382 /* The instruction written:
2383 BFC <Xd>, #<lsb>, #<width>
2385 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2388 convert_bfc_to_bfm (aarch64_inst
*inst
)
2393 copy_operand_info (inst
, 3, 2);
2394 copy_operand_info (inst
, 2, 1);
2395 copy_operand_info (inst
, 1, 0);
2396 inst
->operands
[1].reg
.regno
= 0x1f;
2398 /* Convert the immediate operand. */
2399 lsb
= inst
->operands
[2].imm
.value
;
2400 width
= inst
->operands
[3].imm
.value
;
2401 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2403 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2404 inst
->operands
[3].imm
.value
= width
- 1;
2408 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2409 inst
->operands
[3].imm
.value
= width
- 1;
2413 /* The instruction written:
2414 LSL <Xd>, <Xn>, #<shift>
2416 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2419 convert_lsl_to_ubfm (aarch64_inst
*inst
)
2421 int64_t shift
= inst
->operands
[2].imm
.value
;
2423 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2425 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
2426 inst
->operands
[3].imm
.value
= 31 - shift
;
2430 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
2431 inst
->operands
[3].imm
.value
= 63 - shift
;
2435 /* CINC <Wd>, <Wn>, <cond>
2437 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2440 convert_to_csel (aarch64_inst
*inst
)
2442 copy_operand_info (inst
, 3, 2);
2443 copy_operand_info (inst
, 2, 1);
2444 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2447 /* CSET <Wd>, <cond>
2449 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2452 convert_cset_to_csinc (aarch64_inst
*inst
)
2454 copy_operand_info (inst
, 3, 1);
2455 copy_operand_info (inst
, 2, 0);
2456 copy_operand_info (inst
, 1, 0);
2457 inst
->operands
[1].reg
.regno
= 0x1f;
2458 inst
->operands
[2].reg
.regno
= 0x1f;
2459 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2464 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2467 convert_mov_to_movewide (aarch64_inst
*inst
)
2470 uint32_t shift_amount
;
2471 uint64_t value
= ~(uint64_t)0;
2473 switch (inst
->opcode
->op
)
2475 case OP_MOV_IMM_WIDE
:
2476 value
= inst
->operands
[1].imm
.value
;
2478 case OP_MOV_IMM_WIDEN
:
2479 value
= ~inst
->operands
[1].imm
.value
;
2484 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
2485 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
2486 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
2487 /* The constraint check should have guaranteed this wouldn't happen. */
2489 value
>>= shift_amount
;
2491 inst
->operands
[1].imm
.value
= value
;
2492 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
2493 inst
->operands
[1].shifter
.amount
= shift_amount
;
2498 ORR <Wd>, WZR, #<imm>. */
2501 convert_mov_to_movebitmask (aarch64_inst
*inst
)
2503 copy_operand_info (inst
, 2, 1);
2504 inst
->operands
[1].reg
.regno
= 0x1f;
2505 inst
->operands
[1].skip
= 0;
2508 /* Some alias opcodes are assembled by being converted to their real-form. */
2511 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
2513 const aarch64_opcode
*alias
= inst
->opcode
;
2515 if ((alias
->flags
& F_CONV
) == 0)
2516 goto convert_to_real_return
;
2522 convert_sr_to_bfm (inst
);
2525 convert_lsl_to_ubfm (inst
);
2530 convert_to_csel (inst
);
2534 convert_cset_to_csinc (inst
);
2539 convert_bfx_to_bfm (inst
);
2544 convert_bfi_to_bfm (inst
);
2547 convert_bfc_to_bfm (inst
);
2550 convert_mov_to_orr (inst
);
2552 case OP_MOV_IMM_WIDE
:
2553 case OP_MOV_IMM_WIDEN
:
2554 convert_mov_to_movewide (inst
);
2556 case OP_MOV_IMM_LOG
:
2557 convert_mov_to_movebitmask (inst
);
2560 convert_ror_to_extr (inst
);
2566 convert_xtl_to_shll (inst
);
2572 convert_to_real_return
:
2573 aarch64_replace_opcode (inst
, real
);
2576 /* Encode *INST_ORI of the opcode code OPCODE.
2577 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2578 matched operand qualifier sequence in *QLF_SEQ. */
2581 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
2582 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
2583 aarch64_opnd_qualifier_t
*qlf_seq
,
2584 aarch64_operand_error
*mismatch_detail
,
2585 aarch64_instr_sequence
* insn_sequence
)
2588 const aarch64_opcode
*aliased
;
2589 aarch64_inst copy
, *inst
;
2591 DEBUG_TRACE ("enter with %s", opcode
->name
);
2593 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2597 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
2598 if (inst
->opcode
== NULL
)
2599 inst
->opcode
= opcode
;
2601 /* Constrain the operands.
2602 After passing this, the encoding is guaranteed to succeed. */
2603 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
2605 DEBUG_TRACE ("FAIL since operand constraint not met");
2609 /* Get the base value.
2610 Note: this has to be before the aliasing handling below in order to
2611 get the base value from the alias opcode before we move on to the
2612 aliased opcode for encoding. */
2613 inst
->value
= opcode
->opcode
;
2615 /* No need to do anything else if the opcode does not have any operand. */
2616 if (aarch64_num_of_operands (opcode
) == 0)
2619 /* Assign operand indexes and check types. Also put the matched
2620 operand qualifiers in *QLF_SEQ to return. */
2621 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2623 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
2624 inst
->operands
[i
].idx
= i
;
2625 if (qlf_seq
!= NULL
)
2626 *qlf_seq
= inst
->operands
[i
].qualifier
;
2629 aliased
= aarch64_find_real_opcode (opcode
);
2630 /* If the opcode is an alias and it does not ask for direct encoding by
2631 itself, the instruction will be transformed to the form of real opcode
2632 and the encoding will be carried out using the rules for the aliased
2634 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
2636 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2637 aliased
->name
, opcode
->name
);
2638 /* Convert the operands to the form of the real opcode. */
2639 convert_to_real (inst
, aliased
);
2643 aarch64_opnd_info
*info
= inst
->operands
;
2645 /* Call the inserter of each operand. */
2646 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
2648 const aarch64_operand
*opnd
;
2649 enum aarch64_opnd type
= opcode
->operands
[i
];
2650 if (type
== AARCH64_OPND_NIL
)
2654 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2657 opnd
= &aarch64_operands
[type
];
2658 if (operand_has_inserter (opnd
)
2659 && !aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
,
2664 /* Call opcode encoders indicated by flags. */
2665 if (opcode_has_special_coder (opcode
))
2666 do_special_encoding (inst
);
2668 /* Possibly use the instruction class to encode the chosen qualifier
2670 aarch64_encode_variant_using_iclass (inst
);
2672 /* Run a verifier if the instruction has one set. */
2673 if (opcode
->verifier
)
2675 enum err_type result
= opcode
->verifier (inst
, *code
, 0, true,
2676 mismatch_detail
, insn_sequence
);
2688 /* Always run constrain verifiers, this is needed because constrains need to
2689 maintain a global state. Regardless if the instruction has the flag set
2691 enum err_type result
= verify_constraints (inst
, *code
, 0, true,
2692 mismatch_detail
, insn_sequence
);
2705 DEBUG_TRACE ("exit with %s", opcode
->name
);
2707 *code
= inst
->value
;