1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2022 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
40 insert_fields (aarch64_insn
*code
, aarch64_insn value
, aarch64_insn mask
, ...)
43 const aarch64_field
*field
;
44 enum aarch64_field_kind kind
;
48 num
= va_arg (va
, uint32_t);
52 kind
= va_arg (va
, enum aarch64_field_kind
);
53 field
= &fields
[kind
];
54 insert_field (kind
, code
, value
, mask
);
55 value
>>= field
->width
;
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
64 insert_all_fields (const aarch64_operand
*self
, aarch64_insn
*code
,
68 enum aarch64_field_kind kind
;
70 for (i
= ARRAY_SIZE (self
->fields
); i
-- > 0; )
71 if (self
->fields
[i
] != FLD_NIL
)
73 kind
= self
->fields
[i
];
74 insert_field (kind
, code
, value
, 0);
75 value
>>= fields
[kind
].width
;
79 /* Operand inserters. */
83 aarch64_ins_none (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
84 const aarch64_opnd_info
*info ATTRIBUTE_UNUSED
,
85 aarch64_insn
*code ATTRIBUTE_UNUSED
,
86 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
87 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
92 /* Insert register number. */
94 aarch64_ins_regno (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
96 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
97 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
99 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
103 /* Insert register number, index and/or other data for SIMD register element
104 operand, e.g. the last source operand in
105 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
107 aarch64_ins_reglane (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
108 aarch64_insn
*code
, const aarch64_inst
*inst
,
109 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
112 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, inst
->opcode
->mask
);
113 /* index and/or type */
114 if (inst
->opcode
->iclass
== asisdone
|| inst
->opcode
->iclass
== asimdins
)
116 int pos
= info
->qualifier
- AARCH64_OPND_QLF_S_B
;
117 if (info
->type
== AARCH64_OPND_En
118 && inst
->opcode
->operands
[0] == AARCH64_OPND_Ed
)
120 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
121 assert (info
->idx
== 1); /* Vn */
122 aarch64_insn value
= info
->reglane
.index
<< pos
;
123 insert_field (FLD_imm4
, code
, value
, 0);
127 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
134 aarch64_insn value
= ((info
->reglane
.index
<< 1) | 1) << pos
;
135 insert_field (FLD_imm5
, code
, value
, 0);
138 else if (inst
->opcode
->iclass
== dotproduct
)
140 unsigned reglane_index
= info
->reglane
.index
;
141 switch (info
->qualifier
)
143 case AARCH64_OPND_QLF_S_4B
:
144 case AARCH64_OPND_QLF_S_2H
:
146 assert (reglane_index
< 4);
147 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
153 else if (inst
->opcode
->iclass
== cryptosm3
)
155 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
156 unsigned reglane_index
= info
->reglane
.index
;
157 assert (reglane_index
< 4);
158 insert_field (FLD_SM3_imm2
, code
, reglane_index
, 0);
162 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
163 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
164 unsigned reglane_index
= info
->reglane
.index
;
166 if (inst
->opcode
->op
== OP_FCMLA_ELEM
)
167 /* Complex operand takes two elements. */
170 switch (info
->qualifier
)
172 case AARCH64_OPND_QLF_S_H
:
174 assert (reglane_index
< 8);
175 insert_fields (code
, reglane_index
, 0, 3, FLD_M
, FLD_L
, FLD_H
);
177 case AARCH64_OPND_QLF_S_S
:
179 assert (reglane_index
< 4);
180 insert_fields (code
, reglane_index
, 0, 2, FLD_L
, FLD_H
);
182 case AARCH64_OPND_QLF_S_D
:
184 assert (reglane_index
< 2);
185 insert_field (FLD_H
, code
, reglane_index
, 0);
194 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
196 aarch64_ins_reglist (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
198 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
199 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
202 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
204 insert_field (FLD_len
, code
, info
->reglist
.num_regs
- 1, 0);
208 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
209 in AdvSIMD load/store instructions. */
211 aarch64_ins_ldst_reglist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
212 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
213 const aarch64_inst
*inst
,
214 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
216 aarch64_insn value
= 0;
217 /* Number of elements in each structure to be loaded/stored. */
218 unsigned num
= get_opcode_dependent_value (inst
->opcode
);
221 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
226 switch (info
->reglist
.num_regs
)
228 case 1: value
= 0x7; break;
229 case 2: value
= 0xa; break;
230 case 3: value
= 0x6; break;
231 case 4: value
= 0x2; break;
232 default: return false;
236 value
= info
->reglist
.num_regs
== 4 ? 0x3 : 0x8;
247 insert_field (FLD_opcode
, code
, value
, 0);
252 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
253 single structure to all lanes instructions. */
255 aarch64_ins_ldst_reglist_r (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
256 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
257 const aarch64_inst
*inst
,
258 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
261 /* The opcode dependent area stores the number of elements in
262 each structure to be loaded/stored. */
263 int is_ld1r
= get_opcode_dependent_value (inst
->opcode
) == 1;
266 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
268 value
= (aarch64_insn
) 0;
269 if (is_ld1r
&& info
->reglist
.num_regs
== 2)
270 /* OP_LD1R does not have alternating variant, but have "two consecutive"
272 value
= (aarch64_insn
) 1;
273 insert_field (FLD_S
, code
, value
, 0);
278 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
279 operand e.g. Vt in AdvSIMD load/store single element instructions. */
281 aarch64_ins_ldst_elemlist (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
282 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
283 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
284 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
286 aarch64_field field
= {0, 0};
287 aarch64_insn QSsize
= 0; /* fields Q:S:size. */
288 aarch64_insn opcodeh2
= 0; /* opcode<2:1> */
290 assert (info
->reglist
.has_index
);
293 insert_field (FLD_Rt
, code
, info
->reglist
.first_regno
, 0);
294 /* Encode the index, opcode<2:1> and size. */
295 switch (info
->qualifier
)
297 case AARCH64_OPND_QLF_S_B
:
298 /* Index encoded in "Q:S:size". */
299 QSsize
= info
->reglist
.index
;
302 case AARCH64_OPND_QLF_S_H
:
303 /* Index encoded in "Q:S:size<1>". */
304 QSsize
= info
->reglist
.index
<< 1;
307 case AARCH64_OPND_QLF_S_S
:
308 /* Index encoded in "Q:S". */
309 QSsize
= info
->reglist
.index
<< 2;
312 case AARCH64_OPND_QLF_S_D
:
313 /* Index encoded in "Q". */
314 QSsize
= info
->reglist
.index
<< 3 | 0x1;
320 insert_fields (code
, QSsize
, 0, 3, FLD_vldst_size
, FLD_S
, FLD_Q
);
321 gen_sub_field (FLD_asisdlso_opcode
, 1, 2, &field
);
322 insert_field_2 (&field
, code
, opcodeh2
, 0);
327 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
328 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
329 or SSHR <V><d>, <V><n>, #<shift>. */
331 aarch64_ins_advsimd_imm_shift (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
332 const aarch64_opnd_info
*info
,
333 aarch64_insn
*code
, const aarch64_inst
*inst
,
334 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
336 unsigned val
= aarch64_get_qualifier_standard_value (info
->qualifier
);
339 if (inst
->opcode
->iclass
== asimdshf
)
343 0000 x SEE AdvSIMD modified immediate
352 Q
= (val
& 0x1) ? 1 : 0;
353 insert_field (FLD_Q
, code
, Q
, inst
->opcode
->mask
);
357 assert (info
->type
== AARCH64_OPND_IMM_VLSR
358 || info
->type
== AARCH64_OPND_IMM_VLSL
);
360 if (info
->type
== AARCH64_OPND_IMM_VLSR
)
363 0000 SEE AdvSIMD modified immediate
364 0001 (16-UInt(immh:immb))
365 001x (32-UInt(immh:immb))
366 01xx (64-UInt(immh:immb))
367 1xxx (128-UInt(immh:immb)) */
368 imm
= (16 << (unsigned)val
) - info
->imm
.value
;
372 0000 SEE AdvSIMD modified immediate
373 0001 (UInt(immh:immb)-8)
374 001x (UInt(immh:immb)-16)
375 01xx (UInt(immh:immb)-32)
376 1xxx (UInt(immh:immb)-64) */
377 imm
= info
->imm
.value
+ (8 << (unsigned)val
);
378 insert_fields (code
, imm
, 0, 2, FLD_immb
, FLD_immh
);
383 /* Insert fields for e.g. the immediate operands in
384 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
386 aarch64_ins_imm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
388 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
389 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
393 imm
= info
->imm
.value
;
394 if (operand_need_shift_by_two (self
))
396 if (operand_need_shift_by_four (self
))
398 insert_all_fields (self
, code
, imm
);
402 /* Insert immediate and its shift amount for e.g. the last operand in
403 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
405 aarch64_ins_imm_half (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
406 aarch64_insn
*code
, const aarch64_inst
*inst
,
407 aarch64_operand_error
*errors
)
410 aarch64_ins_imm (self
, info
, code
, inst
, errors
);
412 insert_field (FLD_hw
, code
, info
->shifter
.amount
>> 4, 0);
416 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
417 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
419 aarch64_ins_advsimd_imm_modified (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
420 const aarch64_opnd_info
*info
,
422 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
423 aarch64_operand_error
*errors
426 enum aarch64_opnd_qualifier opnd0_qualifier
= inst
->operands
[0].qualifier
;
427 uint64_t imm
= info
->imm
.value
;
428 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
429 int amount
= info
->shifter
.amount
;
430 aarch64_field field
= {0, 0};
432 /* a:b:c:d:e:f:g:h */
433 if (!info
->imm
.is_fp
&& aarch64_get_qualifier_esize (opnd0_qualifier
) == 8)
435 /* Either MOVI <Dd>, #<imm>
436 or MOVI <Vd>.2D, #<imm>.
437 <imm> is a 64-bit immediate
438 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
439 encoded in "a:b:c:d:e:f:g:h". */
440 imm
= aarch64_shrink_expanded_imm8 (imm
);
441 assert ((int)imm
>= 0);
443 insert_fields (code
, imm
, 0, 2, FLD_defgh
, FLD_abc
);
445 if (kind
== AARCH64_MOD_NONE
)
448 /* shift amount partially in cmode */
449 assert (kind
== AARCH64_MOD_LSL
|| kind
== AARCH64_MOD_MSL
);
450 if (kind
== AARCH64_MOD_LSL
)
452 /* AARCH64_MOD_LSL: shift zeros. */
453 int esize
= aarch64_get_qualifier_esize (opnd0_qualifier
);
454 assert (esize
== 4 || esize
== 2 || esize
== 1);
455 /* For 8-bit move immediate, the optional LSL #0 does not require
461 gen_sub_field (FLD_cmode
, 1, 2, &field
); /* per word */
463 gen_sub_field (FLD_cmode
, 1, 1, &field
); /* per halfword */
467 /* AARCH64_MOD_MSL: shift ones. */
469 gen_sub_field (FLD_cmode
, 0, 1, &field
); /* per word */
471 insert_field_2 (&field
, code
, amount
, 0);
476 /* Insert fields for an 8-bit floating-point immediate. */
478 aarch64_ins_fpimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
480 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
481 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
483 insert_all_fields (self
, code
, info
->imm
.value
);
487 /* Insert 1-bit rotation immediate (#90 or #270). */
489 aarch64_ins_imm_rotate1 (const aarch64_operand
*self
,
490 const aarch64_opnd_info
*info
,
491 aarch64_insn
*code
, const aarch64_inst
*inst
,
492 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
494 uint64_t rot
= (info
->imm
.value
- 90) / 180;
496 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
500 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
502 aarch64_ins_imm_rotate2 (const aarch64_operand
*self
,
503 const aarch64_opnd_info
*info
,
504 aarch64_insn
*code
, const aarch64_inst
*inst
,
505 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
507 uint64_t rot
= info
->imm
.value
/ 90;
509 insert_field (self
->fields
[0], code
, rot
, inst
->opcode
->mask
);
513 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
514 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
516 aarch64_ins_fbits (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
518 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
519 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
521 insert_field (self
->fields
[0], code
, 64 - info
->imm
.value
, 0);
525 /* Insert arithmetic immediate for e.g. the last operand in
526 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
528 aarch64_ins_aimm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
529 aarch64_insn
*code
, const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
530 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
533 aarch64_insn value
= info
->shifter
.amount
? 1 : 0;
534 insert_field (self
->fields
[0], code
, value
, 0);
535 /* imm12 (unsigned) */
536 insert_field (self
->fields
[1], code
, info
->imm
.value
, 0);
540 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
541 the operand should be inverted before encoding. */
543 aarch64_ins_limm_1 (const aarch64_operand
*self
,
544 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
545 const aarch64_inst
*inst
, bool invert_p
,
546 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
550 uint64_t imm
= info
->imm
.value
;
551 int esize
= aarch64_get_qualifier_esize (inst
->operands
[0].qualifier
);
555 /* The constraint check should guarantee that this will work. */
556 res
= aarch64_logical_immediate_p (imm
, esize
, &value
);
558 insert_fields (code
, value
, 0, 3, self
->fields
[2], self
->fields
[1],
563 /* Insert logical/bitmask immediate for e.g. the last operand in
564 ORR <Wd|WSP>, <Wn>, #<imm>. */
566 aarch64_ins_limm (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
567 aarch64_insn
*code
, const aarch64_inst
*inst
,
568 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
570 return aarch64_ins_limm_1 (self
, info
, code
, inst
,
571 inst
->opcode
->op
== OP_BIC
, errors
);
574 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
576 aarch64_ins_inv_limm (const aarch64_operand
*self
,
577 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
578 const aarch64_inst
*inst
,
579 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
581 return aarch64_ins_limm_1 (self
, info
, code
, inst
, true, errors
);
584 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
585 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
587 aarch64_ins_ft (const aarch64_operand
*self
, const aarch64_opnd_info
*info
,
588 aarch64_insn
*code
, const aarch64_inst
*inst
,
589 aarch64_operand_error
*errors
)
591 aarch64_insn value
= 0;
593 assert (info
->idx
== 0);
596 aarch64_ins_regno (self
, info
, code
, inst
, errors
);
597 if (inst
->opcode
->iclass
== ldstpair_indexed
598 || inst
->opcode
->iclass
== ldstnapair_offs
599 || inst
->opcode
->iclass
== ldstpair_off
600 || inst
->opcode
->iclass
== loadlit
)
603 switch (info
->qualifier
)
605 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
606 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
607 case AARCH64_OPND_QLF_S_Q
: value
= 2; break;
608 default: return false;
610 insert_field (FLD_ldst_size
, code
, value
, 0);
615 value
= aarch64_get_qualifier_standard_value (info
->qualifier
);
616 insert_fields (code
, value
, 0, 2, FLD_ldst_size
, FLD_opc1
);
622 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
624 aarch64_ins_addr_simple (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
625 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
626 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
627 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
630 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
634 /* Encode the address operand for e.g.
635 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
637 aarch64_ins_addr_regoff (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
638 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
639 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
640 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
643 enum aarch64_modifier_kind kind
= info
->shifter
.kind
;
646 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
648 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
650 if (kind
== AARCH64_MOD_LSL
)
651 kind
= AARCH64_MOD_UXTX
; /* Trick to enable the table-driven. */
652 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
654 if (info
->qualifier
!= AARCH64_OPND_QLF_S_B
)
655 S
= info
->shifter
.amount
!= 0;
657 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
661 Must be #0 if <extend> is explicitly LSL. */
662 S
= info
->shifter
.operator_present
&& info
->shifter
.amount_present
;
663 insert_field (FLD_S
, code
, S
, 0);
668 /* Encode the address operand for e.g.
669 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
671 aarch64_ins_addr_offset (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
672 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
673 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
674 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
677 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
680 int imm
= info
->addr
.offset
.imm
;
681 insert_field (self
->fields
[1], code
, imm
, 0);
684 if (info
->addr
.writeback
)
686 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
687 insert_field (self
->fields
[2], code
, 1, 0);
692 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
694 aarch64_ins_addr_simm (const aarch64_operand
*self
,
695 const aarch64_opnd_info
*info
,
697 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
698 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
703 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
704 /* simm (imm9 or imm7) */
705 imm
= info
->addr
.offset
.imm
;
706 if (self
->fields
[0] == FLD_imm7
707 || info
->qualifier
== AARCH64_OPND_QLF_imm_tag
)
708 /* scaled immediate in ld/st pair instructions.. */
709 imm
>>= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
710 insert_field (self
->fields
[0], code
, imm
, 0);
711 /* pre/post- index */
712 if (info
->addr
.writeback
)
714 assert (inst
->opcode
->iclass
!= ldst_unscaled
715 && inst
->opcode
->iclass
!= ldstnapair_offs
716 && inst
->opcode
->iclass
!= ldstpair_off
717 && inst
->opcode
->iclass
!= ldst_unpriv
);
718 assert (info
->addr
.preind
!= info
->addr
.postind
);
719 if (info
->addr
.preind
)
720 insert_field (self
->fields
[1], code
, 1, 0);
726 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
728 aarch64_ins_addr_simm10 (const aarch64_operand
*self
,
729 const aarch64_opnd_info
*info
,
731 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
732 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
737 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
739 imm
= info
->addr
.offset
.imm
>> 3;
740 insert_field (self
->fields
[1], code
, imm
>> 9, 0);
741 insert_field (self
->fields
[2], code
, imm
, 0);
743 if (info
->addr
.writeback
)
745 assert (info
->addr
.preind
== 1 && info
->addr
.postind
== 0);
746 insert_field (self
->fields
[3], code
, 1, 0);
751 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
753 aarch64_ins_addr_uimm12 (const aarch64_operand
*self
,
754 const aarch64_opnd_info
*info
,
756 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
757 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
759 int shift
= get_logsz (aarch64_get_qualifier_esize (info
->qualifier
));
762 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
764 insert_field (self
->fields
[1], code
,info
->addr
.offset
.imm
>> shift
, 0);
768 /* Encode the address operand for e.g.
769 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
771 aarch64_ins_simd_addr_post (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
772 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
773 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
774 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
777 insert_field (FLD_Rn
, code
, info
->addr
.base_regno
, 0);
779 if (info
->addr
.offset
.is_reg
)
780 insert_field (FLD_Rm
, code
, info
->addr
.offset
.regno
, 0);
782 insert_field (FLD_Rm
, code
, 0x1f, 0);
786 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
788 aarch64_ins_cond (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
789 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
790 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
791 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
794 insert_field (FLD_cond
, code
, info
->cond
->value
, 0);
798 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
800 aarch64_ins_sysreg (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
801 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
802 const aarch64_inst
*inst
,
803 aarch64_operand_error
*detail ATTRIBUTE_UNUSED
)
805 /* If a system instruction check if we have any restrictions on which
806 registers it can use. */
807 if (inst
->opcode
->iclass
== ic_system
)
809 uint64_t opcode_flags
810 = inst
->opcode
->flags
& (F_SYS_READ
| F_SYS_WRITE
);
811 uint32_t sysreg_flags
812 = info
->sysreg
.flags
& (F_REG_READ
| F_REG_WRITE
);
814 /* Check to see if it's read-only, else check if it's write only.
815 if it's both or unspecified don't care. */
816 if (opcode_flags
== F_SYS_READ
818 && sysreg_flags
!= F_REG_READ
)
820 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
821 detail
->error
= _("specified register cannot be read from");
822 detail
->index
= info
->idx
;
823 detail
->non_fatal
= true;
825 else if (opcode_flags
== F_SYS_WRITE
827 && sysreg_flags
!= F_REG_WRITE
)
829 detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
830 detail
->error
= _("specified register cannot be written to");
831 detail
->index
= info
->idx
;
832 detail
->non_fatal
= true;
835 /* op0:op1:CRn:CRm:op2 */
836 insert_fields (code
, info
->sysreg
.value
, inst
->opcode
->mask
, 5,
837 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
, FLD_op0
);
841 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
843 aarch64_ins_pstatefield (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
844 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
845 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
846 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
849 insert_fields (code
, info
->pstatefield
, inst
->opcode
->mask
, 2,
852 /* Extra CRm mask. */
853 if (info
->sysreg
.flags
| F_REG_IN_CRM
)
854 insert_field (FLD_CRm
, code
, PSTATE_DECODE_CRM (info
->sysreg
.flags
), 0);
858 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
860 aarch64_ins_sysins_op (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
861 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
862 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
863 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
865 /* op1:CRn:CRm:op2 */
866 insert_fields (code
, info
->sysins_op
->value
, inst
->opcode
->mask
, 4,
867 FLD_op2
, FLD_CRm
, FLD_CRn
, FLD_op1
);
871 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
874 aarch64_ins_barrier (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
875 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
876 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
877 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
880 insert_field (FLD_CRm
, code
, info
->barrier
->value
, 0);
884 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
887 aarch64_ins_barrier_dsb_nxs (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
888 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
889 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
890 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
892 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
893 encoded in CRm<3:2>. */
894 aarch64_insn value
= (info
->barrier
->value
>> 2) - 4;
895 insert_field (FLD_CRm_dsb_nxs
, code
, value
, 0);
899 /* Encode the prefetch operation option operand for e.g.
900 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
903 aarch64_ins_prfop (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
904 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
905 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
906 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
909 insert_field (FLD_Rt
, code
, info
->prfop
->value
, 0);
913 /* Encode the hint number for instructions that alias HINT but take an
917 aarch64_ins_hint (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
918 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
919 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
920 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
923 insert_fields (code
, info
->hint_option
->value
, 0, 2, FLD_op2
, FLD_CRm
);
927 /* Encode the extended register operand for e.g.
928 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
930 aarch64_ins_reg_extended (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
931 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
932 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
933 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
935 enum aarch64_modifier_kind kind
;
938 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
940 kind
= info
->shifter
.kind
;
941 if (kind
== AARCH64_MOD_LSL
)
942 kind
= info
->qualifier
== AARCH64_OPND_QLF_W
943 ? AARCH64_MOD_UXTW
: AARCH64_MOD_UXTX
;
944 insert_field (FLD_option
, code
, aarch64_get_operand_modifier_value (kind
), 0);
946 insert_field (FLD_imm3
, code
, info
->shifter
.amount
, 0);
951 /* Encode the shifted register operand for e.g.
952 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
954 aarch64_ins_reg_shifted (const aarch64_operand
*self ATTRIBUTE_UNUSED
,
955 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
956 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
957 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
960 insert_field (FLD_Rm
, code
, info
->reg
.regno
, 0);
962 insert_field (FLD_shift
, code
,
963 aarch64_get_operand_modifier_value (info
->shifter
.kind
), 0);
965 insert_field (FLD_imm6
, code
, info
->shifter
.amount
, 0);
970 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
971 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
972 SELF's operand-dependent value. fields[0] specifies the field that
973 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
975 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand
*self
,
976 const aarch64_opnd_info
*info
,
978 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
979 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
981 int factor
= 1 + get_operand_specific_data (self
);
982 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
983 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
987 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
988 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
989 SELF's operand-dependent value. fields[0] specifies the field that
990 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
992 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand
*self
,
993 const aarch64_opnd_info
*info
,
995 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
996 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
998 int factor
= 1 + get_operand_specific_data (self
);
999 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1000 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1004 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1005 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1006 SELF's operand-dependent value. fields[0] specifies the field that
1007 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1008 and imm3 fields, with imm3 being the less-significant part. */
1010 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand
*self
,
1011 const aarch64_opnd_info
*info
,
1013 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1014 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1016 int factor
= 1 + get_operand_specific_data (self
);
1017 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1018 insert_fields (code
, info
->addr
.offset
.imm
/ factor
, 0,
1019 2, FLD_imm3
, FLD_SVE_imm6
);
1023 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1024 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1025 value. fields[0] specifies the base register field. */
1027 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand
*self
,
1028 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1029 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1030 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1032 int factor
= 1 << get_operand_specific_data (self
);
1033 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1034 insert_field (FLD_SVE_imm4
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1038 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1039 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1040 value. fields[0] specifies the base register field. */
1042 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand
*self
,
1043 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1044 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1045 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1047 int factor
= 1 << get_operand_specific_data (self
);
1048 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1049 insert_field (FLD_SVE_imm6
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1053 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1054 is SELF's operand-dependent value. fields[0] specifies the base
1055 register field and fields[1] specifies the offset register field. */
1057 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand
*self
,
1058 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1059 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1060 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1062 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1063 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1067 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1068 <shift> is SELF's operand-dependent value. fields[0] specifies the
1069 base register field, fields[1] specifies the offset register field and
1070 fields[2] is a single-bit field that selects SXTW over UXTW. */
1072 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand
*self
,
1073 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1074 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1075 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1077 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1078 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1079 if (info
->shifter
.kind
== AARCH64_MOD_UXTW
)
1080 insert_field (self
->fields
[2], code
, 0, 0);
1082 insert_field (self
->fields
[2], code
, 1, 0);
1086 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1087 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1088 fields[0] specifies the base register field. */
1090 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand
*self
,
1091 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1092 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1093 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1095 int factor
= 1 << get_operand_specific_data (self
);
1096 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1097 insert_field (FLD_imm5
, code
, info
->addr
.offset
.imm
/ factor
, 0);
1101 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1102 where <modifier> is fixed by the instruction and where <msz> is a
1103 2-bit unsigned number. fields[0] specifies the base register field
1104 and fields[1] specifies the offset register field. */
1106 aarch64_ext_sve_addr_zz (const aarch64_operand
*self
,
1107 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1108 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1110 insert_field (self
->fields
[0], code
, info
->addr
.base_regno
, 0);
1111 insert_field (self
->fields
[1], code
, info
->addr
.offset
.regno
, 0);
1112 insert_field (FLD_SVE_msz
, code
, info
->shifter
.amount
, 0);
1116 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1117 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1118 field and fields[1] specifies the offset register field. */
1120 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand
*self
,
1121 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1122 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1123 aarch64_operand_error
*errors
)
1125 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1128 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1129 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1130 field and fields[1] specifies the offset register field. */
1132 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand
*self
,
1133 const aarch64_opnd_info
*info
,
1135 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1136 aarch64_operand_error
*errors
)
1138 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1141 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1142 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1143 field and fields[1] specifies the offset register field. */
1145 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand
*self
,
1146 const aarch64_opnd_info
*info
,
1148 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1149 aarch64_operand_error
*errors
)
1151 return aarch64_ext_sve_addr_zz (self
, info
, code
, errors
);
1154 /* Encode an SVE ADD/SUB immediate. */
1156 aarch64_ins_sve_aimm (const aarch64_operand
*self
,
1157 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1158 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1159 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1161 if (info
->shifter
.amount
== 8)
1162 insert_all_fields (self
, code
, (info
->imm
.value
& 0xff) | 256);
1163 else if (info
->imm
.value
!= 0 && (info
->imm
.value
& 0xff) == 0)
1164 insert_all_fields (self
, code
, ((info
->imm
.value
/ 256) & 0xff) | 256);
1166 insert_all_fields (self
, code
, info
->imm
.value
& 0xff);
1170 /* Encode an SVE CPY/DUP immediate. */
1172 aarch64_ins_sve_asimm (const aarch64_operand
*self
,
1173 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1174 const aarch64_inst
*inst
,
1175 aarch64_operand_error
*errors
)
1177 return aarch64_ins_sve_aimm (self
, info
, code
, inst
, errors
);
1180 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1181 array specifies which field to use for Zn. MM is encoded in the
1182 concatenation of imm5 and SVE_tszh, with imm5 being the less
1183 significant part. */
1185 aarch64_ins_sve_index (const aarch64_operand
*self
,
1186 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1187 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1188 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1190 unsigned int esize
= aarch64_get_qualifier_esize (info
->qualifier
);
1191 insert_field (self
->fields
[0], code
, info
->reglane
.regno
, 0);
1192 insert_fields (code
, (info
->reglane
.index
* 2 + 1) * esize
, 0,
1193 2, FLD_imm5
, FLD_SVE_tszh
);
1197 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1199 aarch64_ins_sve_limm_mov (const aarch64_operand
*self
,
1200 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1201 const aarch64_inst
*inst
,
1202 aarch64_operand_error
*errors
)
1204 return aarch64_ins_limm (self
, info
, code
, inst
, errors
);
1207 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1208 and where MM occupies the most-significant part. The operand-dependent
1209 value specifies the number of bits in Zn. */
1211 aarch64_ins_sve_quad_index (const aarch64_operand
*self
,
1212 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1213 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1214 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1216 unsigned int reg_bits
= get_operand_specific_data (self
);
1217 assert (info
->reglane
.regno
< (1U << reg_bits
));
1218 unsigned int val
= (info
->reglane
.index
<< reg_bits
) + info
->reglane
.regno
;
1219 insert_all_fields (self
, code
, val
);
1223 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1226 aarch64_ins_sve_reglist (const aarch64_operand
*self
,
1227 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1228 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1229 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1231 insert_field (self
->fields
[0], code
, info
->reglist
.first_regno
, 0);
1235 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1236 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1239 aarch64_ins_sve_scale (const aarch64_operand
*self
,
1240 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1241 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1242 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1244 insert_all_fields (self
, code
, info
->imm
.value
);
1245 insert_field (FLD_SVE_imm4
, code
, info
->shifter
.amount
- 1, 0);
1249 /* Encode an SVE shift left immediate. */
1251 aarch64_ins_sve_shlimm (const aarch64_operand
*self
,
1252 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1253 const aarch64_inst
*inst
,
1254 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1256 const aarch64_opnd_info
*prev_operand
;
1259 assert (info
->idx
> 0);
1260 prev_operand
= &inst
->operands
[info
->idx
- 1];
1261 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1262 insert_all_fields (self
, code
, 8 * esize
+ info
->imm
.value
);
1266 /* Encode an SVE shift right immediate. */
1268 aarch64_ins_sve_shrimm (const aarch64_operand
*self
,
1269 const aarch64_opnd_info
*info
, aarch64_insn
*code
,
1270 const aarch64_inst
*inst
,
1271 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1273 const aarch64_opnd_info
*prev_operand
;
1276 unsigned int opnd_backshift
= get_operand_specific_data (self
);
1277 assert (info
->idx
>= (int)opnd_backshift
);
1278 prev_operand
= &inst
->operands
[info
->idx
- opnd_backshift
];
1279 esize
= aarch64_get_qualifier_esize (prev_operand
->qualifier
);
1280 insert_all_fields (self
, code
, 16 * esize
- info
->imm
.value
);
1284 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1285 The fields array specifies which field to use. */
1287 aarch64_ins_sve_float_half_one (const aarch64_operand
*self
,
1288 const aarch64_opnd_info
*info
,
1290 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1291 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1293 if (info
->imm
.value
== 0x3f000000)
1294 insert_field (self
->fields
[0], code
, 0, 0);
1296 insert_field (self
->fields
[0], code
, 1, 0);
1300 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1301 The fields array specifies which field to use. */
1303 aarch64_ins_sve_float_half_two (const aarch64_operand
*self
,
1304 const aarch64_opnd_info
*info
,
1306 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1307 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1309 if (info
->imm
.value
== 0x3f000000)
1310 insert_field (self
->fields
[0], code
, 0, 0);
1312 insert_field (self
->fields
[0], code
, 1, 0);
1316 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1317 The fields array specifies which field to use. */
1319 aarch64_ins_sve_float_zero_one (const aarch64_operand
*self
,
1320 const aarch64_opnd_info
*info
,
1322 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1323 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1325 if (info
->imm
.value
== 0)
1326 insert_field (self
->fields
[0], code
, 0, 0);
1328 insert_field (self
->fields
[0], code
, 1, 0);
1332 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1333 vector indicator, vector selector and immediate. */
1335 aarch64_ins_sme_za_hv_tiles (const aarch64_operand
*self
,
1336 const aarch64_opnd_info
*info
,
1338 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1339 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1343 int fld_v
= info
->za_tile_vector
.v
;
1344 int fld_rv
= info
->za_tile_vector
.index
.regno
- 12;
1345 int fld_zan_imm
= info
->za_tile_vector
.index
.imm
;
1346 int regno
= info
->za_tile_vector
.regno
;
1348 switch (info
->qualifier
)
1350 case AARCH64_OPND_QLF_S_B
:
1354 case AARCH64_OPND_QLF_S_H
:
1357 fld_zan_imm
|= regno
<< 3;
1359 case AARCH64_OPND_QLF_S_S
:
1362 fld_zan_imm
|= regno
<< 2;
1364 case AARCH64_OPND_QLF_S_D
:
1367 fld_zan_imm
|= regno
<< 1;
1369 case AARCH64_OPND_QLF_S_Q
:
1372 fld_zan_imm
= regno
;
1378 insert_field (self
->fields
[0], code
, fld_size
, 0);
1379 insert_field (self
->fields
[1], code
, fld_q
, 0);
1380 insert_field (self
->fields
[2], code
, fld_v
, 0);
1381 insert_field (self
->fields
[3], code
, fld_rv
, 0);
1382 insert_field (self
->fields
[4], code
, fld_zan_imm
, 0);
1387 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1388 separated by commas, encoded in the "imm8" field.
1390 For programmer convenience an assembler must also accept the names of
1391 32-bit, 16-bit and 8-bit element tiles which are converted into the
1392 corresponding set of 64-bit element tiles.
1395 aarch64_ins_sme_za_list (const aarch64_operand
*self
,
1396 const aarch64_opnd_info
*info
,
1398 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1399 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1401 int fld_mask
= info
->imm
.value
;
1402 insert_field (self
->fields
[0], code
, fld_mask
, 0);
1407 aarch64_ins_sme_za_array (const aarch64_operand
*self
,
1408 const aarch64_opnd_info
*info
,
1410 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1411 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1413 int regno
= info
->za_tile_vector
.index
.regno
- 12;
1414 int imm
= info
->za_tile_vector
.index
.imm
;
1415 insert_field (self
->fields
[0], code
, regno
, 0);
1416 insert_field (self
->fields
[1], code
, imm
, 0);
1421 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand
*self
,
1422 const aarch64_opnd_info
*info
,
1424 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1425 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1427 int regno
= info
->addr
.base_regno
;
1428 int imm
= info
->addr
.offset
.imm
;
1429 insert_field (self
->fields
[0], code
, regno
, 0);
1430 insert_field (self
->fields
[1], code
, imm
, 0);
1434 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1436 aarch64_ins_sme_sm_za (const aarch64_operand
*self
,
1437 const aarch64_opnd_info
*info
,
1439 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1440 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1442 aarch64_insn fld_crm
;
1443 /* Set CRm[3:1] bits. */
1444 if (info
->reg
.regno
== 's')
1445 fld_crm
= 0x02 ; /* SVCRSM. */
1446 else if (info
->reg
.regno
== 'z')
1447 fld_crm
= 0x04; /* SVCRZA. */
1451 insert_field (self
->fields
[0], code
, fld_crm
, 0);
1455 /* Encode source scalable predicate register (Pn), name of the index base
1456 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1457 range 0 to one less than the number of vector elements in a 128-bit vector
1458 register, encoded in "i1:tszh:tszl".
1461 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand
*self
,
1462 const aarch64_opnd_info
*info
,
1464 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1465 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1467 int fld_pn
= info
->za_tile_vector
.regno
;
1468 int fld_rm
= info
->za_tile_vector
.index
.regno
- 12;
1469 int imm
= info
->za_tile_vector
.index
.imm
;
1470 int fld_i1
, fld_tszh
, fld_tshl
;
1472 insert_field (self
->fields
[0], code
, fld_rm
, 0);
1473 insert_field (self
->fields
[1], code
, fld_pn
, 0);
1475 /* Optional element index, defaulting to 0, in the range 0 to one less than
1476 the number of vector elements in a 128-bit vector register, encoded in
1486 switch (info
->qualifier
)
1488 case AARCH64_OPND_QLF_S_B
:
1489 /* <imm> is 4 bit value. */
1490 fld_i1
= (imm
>> 3) & 0x1;
1491 fld_tszh
= (imm
>> 2) & 0x1;
1492 fld_tshl
= ((imm
<< 1) | 0x1) & 0x7;
1494 case AARCH64_OPND_QLF_S_H
:
1495 /* <imm> is 3 bit value. */
1496 fld_i1
= (imm
>> 2) & 0x1;
1497 fld_tszh
= (imm
>> 1) & 0x1;
1498 fld_tshl
= ((imm
<< 2) | 0x2) & 0x7;
1500 case AARCH64_OPND_QLF_S_S
:
1501 /* <imm> is 2 bit value. */
1502 fld_i1
= (imm
>> 1) & 0x1;
1503 fld_tszh
= imm
& 0x1;
1506 case AARCH64_OPND_QLF_S_D
:
1507 /* <imm> is 1 bit value. */
1516 insert_field (self
->fields
[2], code
, fld_i1
, 0);
1517 insert_field (self
->fields
[3], code
, fld_tszh
, 0);
1518 insert_field (self
->fields
[4], code
, fld_tshl
, 0);
1522 /* Insert X0-X30. Register 31 is unallocated. */
1524 aarch64_ins_x0_to_x30 (const aarch64_operand
*self
,
1525 const aarch64_opnd_info
*info
,
1527 const aarch64_inst
*inst ATTRIBUTE_UNUSED
,
1528 aarch64_operand_error
*errors ATTRIBUTE_UNUSED
)
1530 assert (info
->reg
.regno
<= 30);
1531 insert_field (self
->fields
[0], code
, info
->reg
.regno
, 0);
1535 /* Miscellaneous encoding functions. */
1537 /* Encode size[0], i.e. bit 22, for
1538 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1541 encode_asimd_fcvt (aarch64_inst
*inst
)
1544 aarch64_field field
= {0, 0};
1545 enum aarch64_opnd_qualifier qualifier
= AARCH64_OPND_QLF_NIL
;
1547 switch (inst
->opcode
->op
)
1551 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1552 qualifier
= inst
->operands
[1].qualifier
;
1556 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1557 qualifier
= inst
->operands
[0].qualifier
;
1562 assert (qualifier
== AARCH64_OPND_QLF_V_4S
1563 || qualifier
== AARCH64_OPND_QLF_V_2D
);
1564 value
= (qualifier
== AARCH64_OPND_QLF_V_4S
) ? 0 : 1;
1565 gen_sub_field (FLD_size
, 0, 1, &field
);
1566 insert_field_2 (&field
, &inst
->value
, value
, 0);
1569 /* Encode size[0], i.e. bit 22, for
1570 e.g. FCVTXN <Vb><d>, <Va><n>. */
1573 encode_asisd_fcvtxn (aarch64_inst
*inst
)
1575 aarch64_insn val
= 1;
1576 aarch64_field field
= {0, 0};
1577 assert (inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_S
);
1578 gen_sub_field (FLD_size
, 0, 1, &field
);
1579 insert_field_2 (&field
, &inst
->value
, val
, 0);
1582 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1584 encode_fcvt (aarch64_inst
*inst
)
1587 const aarch64_field field
= {15, 2};
1590 switch (inst
->operands
[0].qualifier
)
1592 case AARCH64_OPND_QLF_S_S
: val
= 0; break;
1593 case AARCH64_OPND_QLF_S_D
: val
= 1; break;
1594 case AARCH64_OPND_QLF_S_H
: val
= 3; break;
1597 insert_field_2 (&field
, &inst
->value
, val
, 0);
1602 /* Return the index in qualifiers_list that INST is using. Should only
1603 be called once the qualifiers are known to be valid. */
1606 aarch64_get_variant (struct aarch64_inst
*inst
)
1608 int i
, nops
, variant
;
1610 nops
= aarch64_num_of_operands (inst
->opcode
);
1611 for (variant
= 0; variant
< AARCH64_MAX_QLF_SEQ_NUM
; ++variant
)
1613 for (i
= 0; i
< nops
; ++i
)
1614 if (inst
->opcode
->qualifiers_list
[variant
][i
]
1615 != inst
->operands
[i
].qualifier
)
1623 /* Do miscellaneous encodings that are not common enough to be driven by
1627 do_misc_encoding (aarch64_inst
*inst
)
1631 switch (inst
->opcode
->op
)
1640 encode_asimd_fcvt (inst
);
1643 encode_asisd_fcvtxn (inst
);
1647 /* Copy Pn to Pm and Pg. */
1648 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1649 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1650 insert_field (FLD_SVE_Pg4_10
, &inst
->value
, value
, 0);
1653 /* Copy Zd to Zm. */
1654 value
= extract_field (FLD_SVE_Zd
, inst
->value
, 0);
1655 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1658 /* Fill in the zero immediate. */
1659 insert_fields (&inst
->value
, 1 << aarch64_get_variant (inst
), 0,
1660 2, FLD_imm5
, FLD_SVE_tszh
);
1663 /* Copy Zn to Zm. */
1664 value
= extract_field (FLD_SVE_Zn
, inst
->value
, 0);
1665 insert_field (FLD_SVE_Zm_16
, &inst
->value
, value
, 0);
1670 /* Copy Pd to Pm. */
1671 value
= extract_field (FLD_SVE_Pd
, inst
->value
, 0);
1672 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1674 case OP_MOVZS_P_P_P
:
1676 /* Copy Pn to Pm. */
1677 value
= extract_field (FLD_SVE_Pn
, inst
->value
, 0);
1678 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1680 case OP_NOTS_P_P_P_Z
:
1681 case OP_NOT_P_P_P_Z
:
1682 /* Copy Pg to Pm. */
1683 value
= extract_field (FLD_SVE_Pg4_10
, inst
->value
, 0);
1684 insert_field (FLD_SVE_Pm
, &inst
->value
, value
, 0);
1690 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1692 encode_sizeq (aarch64_inst
*inst
)
1695 enum aarch64_field_kind kind
;
1698 /* Get the index of the operand whose information we are going to use
1699 to encode the size and Q fields.
1700 This is deduced from the possible valid qualifier lists. */
1701 idx
= aarch64_select_operand_for_sizeq_field_coding (inst
->opcode
);
1702 DEBUG_TRACE ("idx: %d; qualifier: %s", idx
,
1703 aarch64_get_qualifier_name (inst
->operands
[idx
].qualifier
));
1704 sizeq
= aarch64_get_qualifier_standard_value (inst
->operands
[idx
].qualifier
);
1706 insert_field (FLD_Q
, &inst
->value
, sizeq
& 0x1, inst
->opcode
->mask
);
1708 if (inst
->opcode
->iclass
== asisdlse
1709 || inst
->opcode
->iclass
== asisdlsep
1710 || inst
->opcode
->iclass
== asisdlso
1711 || inst
->opcode
->iclass
== asisdlsop
)
1712 kind
= FLD_vldst_size
;
1715 insert_field (kind
, &inst
->value
, (sizeq
>> 1) & 0x3, inst
->opcode
->mask
);
1718 /* Opcodes that have fields shared by multiple operands are usually flagged
1719 with flags. In this function, we detect such flags and use the
1720 information in one of the related operands to do the encoding. The 'one'
1721 operand is not any operand but one of the operands that has the enough
1722 information for such an encoding. */
1725 do_special_encoding (struct aarch64_inst
*inst
)
1728 aarch64_insn value
= 0;
1730 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst
->value
);
1732 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1733 if (inst
->opcode
->flags
& F_COND
)
1735 insert_field (FLD_cond2
, &inst
->value
, inst
->cond
->value
, 0);
1737 if (inst
->opcode
->flags
& F_SF
)
1739 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1740 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1741 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1743 insert_field (FLD_sf
, &inst
->value
, value
, 0);
1744 if (inst
->opcode
->flags
& F_N
)
1745 insert_field (FLD_N
, &inst
->value
, value
, inst
->opcode
->mask
);
1747 if (inst
->opcode
->flags
& F_LSE_SZ
)
1749 idx
= select_operand_for_sf_field_coding (inst
->opcode
);
1750 value
= (inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_X
1751 || inst
->operands
[idx
].qualifier
== AARCH64_OPND_QLF_SP
)
1753 insert_field (FLD_lse_sz
, &inst
->value
, value
, 0);
1755 if (inst
->opcode
->flags
& F_SIZEQ
)
1756 encode_sizeq (inst
);
1757 if (inst
->opcode
->flags
& F_FPTYPE
)
1759 idx
= select_operand_for_fptype_field_coding (inst
->opcode
);
1760 switch (inst
->operands
[idx
].qualifier
)
1762 case AARCH64_OPND_QLF_S_S
: value
= 0; break;
1763 case AARCH64_OPND_QLF_S_D
: value
= 1; break;
1764 case AARCH64_OPND_QLF_S_H
: value
= 3; break;
1767 insert_field (FLD_type
, &inst
->value
, value
, 0);
1769 if (inst
->opcode
->flags
& F_SSIZE
)
1771 enum aarch64_opnd_qualifier qualifier
;
1772 idx
= select_operand_for_scalar_size_field_coding (inst
->opcode
);
1773 qualifier
= inst
->operands
[idx
].qualifier
;
1774 assert (qualifier
>= AARCH64_OPND_QLF_S_B
1775 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
1776 value
= aarch64_get_qualifier_standard_value (qualifier
);
1777 insert_field (FLD_size
, &inst
->value
, value
, inst
->opcode
->mask
);
1779 if (inst
->opcode
->flags
& F_T
)
1781 int num
; /* num of consecutive '0's on the right side of imm5<3:0>. */
1782 aarch64_field field
= {0, 0};
1783 enum aarch64_opnd_qualifier qualifier
;
1786 qualifier
= inst
->operands
[idx
].qualifier
;
1787 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1788 == AARCH64_OPND_CLASS_SIMD_REG
1789 && qualifier
>= AARCH64_OPND_QLF_V_8B
1790 && qualifier
<= AARCH64_OPND_QLF_V_2D
);
1801 value
= aarch64_get_qualifier_standard_value (qualifier
);
1802 insert_field (FLD_Q
, &inst
->value
, value
& 0x1, inst
->opcode
->mask
);
1803 num
= (int) value
>> 1;
1804 assert (num
>= 0 && num
<= 3);
1805 gen_sub_field (FLD_imm5
, 0, num
+ 1, &field
);
1806 insert_field_2 (&field
, &inst
->value
, 1 << num
, inst
->opcode
->mask
);
1808 if (inst
->opcode
->flags
& F_GPRSIZE_IN_Q
)
1810 /* Use Rt to encode in the case of e.g.
1811 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1812 enum aarch64_opnd_qualifier qualifier
;
1813 idx
= aarch64_operand_index (inst
->opcode
->operands
, AARCH64_OPND_Rt
);
1815 /* Otherwise use the result operand, which has to be a integer
1818 assert (idx
== 0 || idx
== 1);
1819 assert (aarch64_get_operand_class (inst
->opcode
->operands
[idx
])
1820 == AARCH64_OPND_CLASS_INT_REG
);
1821 qualifier
= inst
->operands
[idx
].qualifier
;
1822 insert_field (FLD_Q
, &inst
->value
,
1823 aarch64_get_qualifier_standard_value (qualifier
), 0);
1825 if (inst
->opcode
->flags
& F_LDS_SIZE
)
1827 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1828 enum aarch64_opnd_qualifier qualifier
;
1829 aarch64_field field
= {0, 0};
1830 assert (aarch64_get_operand_class (inst
->opcode
->operands
[0])
1831 == AARCH64_OPND_CLASS_INT_REG
);
1832 gen_sub_field (FLD_opc
, 0, 1, &field
);
1833 qualifier
= inst
->operands
[0].qualifier
;
1834 insert_field_2 (&field
, &inst
->value
,
1835 1 - aarch64_get_qualifier_standard_value (qualifier
), 0);
1837 /* Miscellaneous encoding as the last step. */
1838 if (inst
->opcode
->flags
& F_MISC
)
1839 do_misc_encoding (inst
);
1841 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst
->value
);
1844 /* Some instructions (including all SVE ones) use the instruction class
1845 to describe how a qualifiers_list index is represented in the instruction
1846 encoding. If INST is such an instruction, encode the chosen qualifier
1850 aarch64_encode_variant_using_iclass (struct aarch64_inst
*inst
)
1853 switch (inst
->opcode
->iclass
)
1856 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1857 0, 2, FLD_SVE_M_14
, FLD_size
);
1861 case sve_shift_pred
:
1862 case sve_shift_unpred
:
1863 case sve_shift_tsz_hsd
:
1864 case sve_shift_tsz_bhsd
:
1865 /* For indices and shift amounts, the variant is encoded as
1866 part of the immediate. */
1870 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1871 and depend on the immediate. They don't have a separate
1876 /* sve_misc instructions have only a single variant. */
1880 insert_fields (&inst
->value
, aarch64_get_variant (inst
),
1881 0, 2, FLD_SVE_M_16
, FLD_size
);
1885 insert_field (FLD_SVE_M_4
, &inst
->value
, aarch64_get_variant (inst
), 0);
1890 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
), 0);
1894 insert_field (FLD_size
, &inst
->value
, aarch64_get_variant (inst
) + 1, 0);
1899 insert_field (FLD_SVE_sz
, &inst
->value
, aarch64_get_variant (inst
), 0);
1903 insert_field (FLD_SVE_sz2
, &inst
->value
, aarch64_get_variant (inst
), 0);
1907 insert_field (FLD_SVE_size
, &inst
->value
,
1908 aarch64_get_variant (inst
) + 1, 0);
1911 case sve_size_tsz_bhs
:
1912 insert_fields (&inst
->value
,
1913 (1 << aarch64_get_variant (inst
)),
1914 0, 2, FLD_SVE_tszl_19
, FLD_SVE_sz
);
1918 variant
= aarch64_get_variant (inst
) + 1;
1921 insert_field (FLD_size
, &inst
->value
, variant
, 0);
1929 /* Converters converting an alias opcode instruction to its real form. */
1931 /* ROR <Wd>, <Ws>, #<shift>
1933 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1935 convert_ror_to_extr (aarch64_inst
*inst
)
1937 copy_operand_info (inst
, 3, 2);
1938 copy_operand_info (inst
, 2, 1);
1941 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1943 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1945 convert_xtl_to_shll (aarch64_inst
*inst
)
1947 inst
->operands
[2].qualifier
= inst
->operands
[1].qualifier
;
1948 inst
->operands
[2].imm
.value
= 0;
1952 LSR <Xd>, <Xn>, #<shift>
1954 UBFM <Xd>, <Xn>, #<shift>, #63. */
1956 convert_sr_to_bfm (aarch64_inst
*inst
)
1958 inst
->operands
[3].imm
.value
=
1959 inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
? 31 : 63;
1962 /* Convert MOV to ORR. */
1964 convert_mov_to_orr (aarch64_inst
*inst
)
1966 /* MOV <Vd>.<T>, <Vn>.<T>
1968 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1969 copy_operand_info (inst
, 2, 1);
1972 /* When <imms> >= <immr>, the instruction written:
1973 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1975 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1978 convert_bfx_to_bfm (aarch64_inst
*inst
)
1982 /* Convert the operand. */
1983 lsb
= inst
->operands
[2].imm
.value
;
1984 width
= inst
->operands
[3].imm
.value
;
1985 inst
->operands
[2].imm
.value
= lsb
;
1986 inst
->operands
[3].imm
.value
= lsb
+ width
- 1;
1989 /* When <imms> < <immr>, the instruction written:
1990 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1992 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1995 convert_bfi_to_bfm (aarch64_inst
*inst
)
1999 /* Convert the operand. */
2000 lsb
= inst
->operands
[2].imm
.value
;
2001 width
= inst
->operands
[3].imm
.value
;
2002 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2004 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2005 inst
->operands
[3].imm
.value
= width
- 1;
2009 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2010 inst
->operands
[3].imm
.value
= width
- 1;
2014 /* The instruction written:
2015 BFC <Xd>, #<lsb>, #<width>
2017 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2020 convert_bfc_to_bfm (aarch64_inst
*inst
)
2025 copy_operand_info (inst
, 3, 2);
2026 copy_operand_info (inst
, 2, 1);
2027 copy_operand_info (inst
, 1, 0);
2028 inst
->operands
[1].reg
.regno
= 0x1f;
2030 /* Convert the immediate operand. */
2031 lsb
= inst
->operands
[2].imm
.value
;
2032 width
= inst
->operands
[3].imm
.value
;
2033 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2035 inst
->operands
[2].imm
.value
= (32 - lsb
) & 0x1f;
2036 inst
->operands
[3].imm
.value
= width
- 1;
2040 inst
->operands
[2].imm
.value
= (64 - lsb
) & 0x3f;
2041 inst
->operands
[3].imm
.value
= width
- 1;
2045 /* The instruction written:
2046 LSL <Xd>, <Xn>, #<shift>
2048 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2051 convert_lsl_to_ubfm (aarch64_inst
*inst
)
2053 int64_t shift
= inst
->operands
[2].imm
.value
;
2055 if (inst
->operands
[2].qualifier
== AARCH64_OPND_QLF_imm_0_31
)
2057 inst
->operands
[2].imm
.value
= (32 - shift
) & 0x1f;
2058 inst
->operands
[3].imm
.value
= 31 - shift
;
2062 inst
->operands
[2].imm
.value
= (64 - shift
) & 0x3f;
2063 inst
->operands
[3].imm
.value
= 63 - shift
;
2067 /* CINC <Wd>, <Wn>, <cond>
2069 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2072 convert_to_csel (aarch64_inst
*inst
)
2074 copy_operand_info (inst
, 3, 2);
2075 copy_operand_info (inst
, 2, 1);
2076 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2079 /* CSET <Wd>, <cond>
2081 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2084 convert_cset_to_csinc (aarch64_inst
*inst
)
2086 copy_operand_info (inst
, 3, 1);
2087 copy_operand_info (inst
, 2, 0);
2088 copy_operand_info (inst
, 1, 0);
2089 inst
->operands
[1].reg
.regno
= 0x1f;
2090 inst
->operands
[2].reg
.regno
= 0x1f;
2091 inst
->operands
[3].cond
= get_inverted_cond (inst
->operands
[3].cond
);
2096 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2099 convert_mov_to_movewide (aarch64_inst
*inst
)
2102 uint32_t shift_amount
;
2103 uint64_t value
= ~(uint64_t)0;
2105 switch (inst
->opcode
->op
)
2107 case OP_MOV_IMM_WIDE
:
2108 value
= inst
->operands
[1].imm
.value
;
2110 case OP_MOV_IMM_WIDEN
:
2111 value
= ~inst
->operands
[1].imm
.value
;
2116 inst
->operands
[1].type
= AARCH64_OPND_HALF
;
2117 is32
= inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_W
;
2118 if (! aarch64_wide_constant_p (value
, is32
, &shift_amount
))
2119 /* The constraint check should have guaranteed this wouldn't happen. */
2121 value
>>= shift_amount
;
2123 inst
->operands
[1].imm
.value
= value
;
2124 inst
->operands
[1].shifter
.kind
= AARCH64_MOD_LSL
;
2125 inst
->operands
[1].shifter
.amount
= shift_amount
;
2130 ORR <Wd>, WZR, #<imm>. */
2133 convert_mov_to_movebitmask (aarch64_inst
*inst
)
2135 copy_operand_info (inst
, 2, 1);
2136 inst
->operands
[1].reg
.regno
= 0x1f;
2137 inst
->operands
[1].skip
= 0;
2140 /* Some alias opcodes are assembled by being converted to their real-form. */
2143 convert_to_real (aarch64_inst
*inst
, const aarch64_opcode
*real
)
2145 const aarch64_opcode
*alias
= inst
->opcode
;
2147 if ((alias
->flags
& F_CONV
) == 0)
2148 goto convert_to_real_return
;
2154 convert_sr_to_bfm (inst
);
2157 convert_lsl_to_ubfm (inst
);
2162 convert_to_csel (inst
);
2166 convert_cset_to_csinc (inst
);
2171 convert_bfx_to_bfm (inst
);
2176 convert_bfi_to_bfm (inst
);
2179 convert_bfc_to_bfm (inst
);
2182 convert_mov_to_orr (inst
);
2184 case OP_MOV_IMM_WIDE
:
2185 case OP_MOV_IMM_WIDEN
:
2186 convert_mov_to_movewide (inst
);
2188 case OP_MOV_IMM_LOG
:
2189 convert_mov_to_movebitmask (inst
);
2192 convert_ror_to_extr (inst
);
2198 convert_xtl_to_shll (inst
);
2204 convert_to_real_return
:
2205 aarch64_replace_opcode (inst
, real
);
2208 /* Encode *INST_ORI of the opcode code OPCODE.
2209 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2210 matched operand qualifier sequence in *QLF_SEQ. */
2213 aarch64_opcode_encode (const aarch64_opcode
*opcode
,
2214 const aarch64_inst
*inst_ori
, aarch64_insn
*code
,
2215 aarch64_opnd_qualifier_t
*qlf_seq
,
2216 aarch64_operand_error
*mismatch_detail
,
2217 aarch64_instr_sequence
* insn_sequence
)
2220 const aarch64_opcode
*aliased
;
2221 aarch64_inst copy
, *inst
;
2223 DEBUG_TRACE ("enter with %s", opcode
->name
);
2225 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2229 assert (inst
->opcode
== NULL
|| inst
->opcode
== opcode
);
2230 if (inst
->opcode
== NULL
)
2231 inst
->opcode
= opcode
;
2233 /* Constrain the operands.
2234 After passing this, the encoding is guaranteed to succeed. */
2235 if (aarch64_match_operands_constraint (inst
, mismatch_detail
) == 0)
2237 DEBUG_TRACE ("FAIL since operand constraint not met");
2241 /* Get the base value.
2242 Note: this has to be before the aliasing handling below in order to
2243 get the base value from the alias opcode before we move on to the
2244 aliased opcode for encoding. */
2245 inst
->value
= opcode
->opcode
;
2247 /* No need to do anything else if the opcode does not have any operand. */
2248 if (aarch64_num_of_operands (opcode
) == 0)
2251 /* Assign operand indexes and check types. Also put the matched
2252 operand qualifiers in *QLF_SEQ to return. */
2253 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2255 assert (opcode
->operands
[i
] == inst
->operands
[i
].type
);
2256 inst
->operands
[i
].idx
= i
;
2257 if (qlf_seq
!= NULL
)
2258 *qlf_seq
= inst
->operands
[i
].qualifier
;
2261 aliased
= aarch64_find_real_opcode (opcode
);
2262 /* If the opcode is an alias and it does not ask for direct encoding by
2263 itself, the instruction will be transformed to the form of real opcode
2264 and the encoding will be carried out using the rules for the aliased
2266 if (aliased
!= NULL
&& (opcode
->flags
& F_CONV
))
2268 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2269 aliased
->name
, opcode
->name
);
2270 /* Convert the operands to the form of the real opcode. */
2271 convert_to_real (inst
, aliased
);
2275 aarch64_opnd_info
*info
= inst
->operands
;
2277 /* Call the inserter of each operand. */
2278 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++info
)
2280 const aarch64_operand
*opnd
;
2281 enum aarch64_opnd type
= opcode
->operands
[i
];
2282 if (type
== AARCH64_OPND_NIL
)
2286 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2289 opnd
= &aarch64_operands
[type
];
2290 if (operand_has_inserter (opnd
)
2291 && !aarch64_insert_operand (opnd
, info
, &inst
->value
, inst
,
2296 /* Call opcode encoders indicated by flags. */
2297 if (opcode_has_special_coder (opcode
))
2298 do_special_encoding (inst
);
2300 /* Possibly use the instruction class to encode the chosen qualifier
2302 aarch64_encode_variant_using_iclass (inst
);
2304 /* Run a verifier if the instruction has one set. */
2305 if (opcode
->verifier
)
2307 enum err_type result
= opcode
->verifier (inst
, *code
, 0, true,
2308 mismatch_detail
, insn_sequence
);
2320 /* Always run constrain verifiers, this is needed because constrains need to
2321 maintain a global state. Regardless if the instruction has the flag set
2323 enum err_type result
= verify_constraints (inst
, *code
, 0, true,
2324 mismatch_detail
, insn_sequence
);
2337 DEBUG_TRACE ("exit with %s", opcode
->name
);
2339 *code
= inst
->value
;