Automatic date update in version.in
[binutils-gdb.git] / opcodes / aarch64-asm.c
blobde4c452ff0434e0bd830c9b87b16ecf1aa5f9e82
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2022 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
27 /* Utilities. */
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
57 va_end (va);
60 /* Insert a raw field value VALUE into all fields in SELF->fields.
61 The least significant bit goes in the final field. */
63 static void
64 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
65 aarch64_insn value)
67 unsigned int i;
68 enum aarch64_field_kind kind;
70 for (i = ARRAY_SIZE (self->fields); i-- > 0; )
71 if (self->fields[i] != FLD_NIL)
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
79 /* Operand inserters. */
81 /* Insert nothing. */
82 bool
83 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
84 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
85 aarch64_insn *code ATTRIBUTE_UNUSED,
86 const aarch64_inst *inst ATTRIBUTE_UNUSED,
87 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
89 return true;
92 /* Insert register number. */
93 bool
94 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
95 aarch64_insn *code,
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
99 insert_field (self->fields[0], code, info->reg.regno, 0);
100 return true;
103 /* Insert register number, index and/or other data for SIMD register element
104 operand, e.g. the last source operand in
105 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
106 bool
107 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
108 aarch64_insn *code, const aarch64_inst *inst,
109 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
111 /* regno */
112 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
113 /* index and/or type */
114 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
116 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
117 if (info->type == AARCH64_OPND_En
118 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
120 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
121 assert (info->idx == 1); /* Vn */
122 aarch64_insn value = info->reglane.index << pos;
123 insert_field (FLD_imm4, code, value, 0);
125 else
127 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
128 imm5<3:0> <V>
129 0000 RESERVED
130 xxx1 B
131 xx10 H
132 x100 S
133 1000 D */
134 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
135 insert_field (FLD_imm5, code, value, 0);
138 else if (inst->opcode->iclass == dotproduct)
140 unsigned reglane_index = info->reglane.index;
141 switch (info->qualifier)
143 case AARCH64_OPND_QLF_S_4B:
144 case AARCH64_OPND_QLF_S_2H:
145 /* L:H */
146 assert (reglane_index < 4);
147 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
148 break;
149 default:
150 return false;
153 else if (inst->opcode->iclass == cryptosm3)
155 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
156 unsigned reglane_index = info->reglane.index;
157 assert (reglane_index < 4);
158 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
160 else
162 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
163 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
164 unsigned reglane_index = info->reglane.index;
166 if (inst->opcode->op == OP_FCMLA_ELEM)
167 /* Complex operand takes two elements. */
168 reglane_index *= 2;
170 switch (info->qualifier)
172 case AARCH64_OPND_QLF_S_H:
173 /* H:L:M */
174 assert (reglane_index < 8);
175 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
176 break;
177 case AARCH64_OPND_QLF_S_S:
178 /* H:L */
179 assert (reglane_index < 4);
180 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
181 break;
182 case AARCH64_OPND_QLF_S_D:
183 /* H */
184 assert (reglane_index < 2);
185 insert_field (FLD_H, code, reglane_index, 0);
186 break;
187 default:
188 return false;
191 return true;
194 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
195 bool
196 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
197 aarch64_insn *code,
198 const aarch64_inst *inst ATTRIBUTE_UNUSED,
199 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
201 /* R */
202 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
203 /* len */
204 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
205 return true;
208 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
209 in AdvSIMD load/store instructions. */
210 bool
211 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
212 const aarch64_opnd_info *info, aarch64_insn *code,
213 const aarch64_inst *inst,
214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
216 aarch64_insn value = 0;
217 /* Number of elements in each structure to be loaded/stored. */
218 unsigned num = get_opcode_dependent_value (inst->opcode);
220 /* Rt */
221 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
222 /* opcode */
223 switch (num)
225 case 1:
226 switch (info->reglist.num_regs)
228 case 1: value = 0x7; break;
229 case 2: value = 0xa; break;
230 case 3: value = 0x6; break;
231 case 4: value = 0x2; break;
232 default: return false;
234 break;
235 case 2:
236 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
237 break;
238 case 3:
239 value = 0x4;
240 break;
241 case 4:
242 value = 0x0;
243 break;
244 default:
245 return false;
247 insert_field (FLD_opcode, code, value, 0);
249 return true;
252 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
253 single structure to all lanes instructions. */
254 bool
255 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
256 const aarch64_opnd_info *info, aarch64_insn *code,
257 const aarch64_inst *inst,
258 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
260 aarch64_insn value;
261 /* The opcode dependent area stores the number of elements in
262 each structure to be loaded/stored. */
263 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
265 /* Rt */
266 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
267 /* S */
268 value = (aarch64_insn) 0;
269 if (is_ld1r && info->reglist.num_regs == 2)
270 /* OP_LD1R does not have alternating variant, but have "two consecutive"
271 instead. */
272 value = (aarch64_insn) 1;
273 insert_field (FLD_S, code, value, 0);
275 return true;
278 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
279 operand e.g. Vt in AdvSIMD load/store single element instructions. */
280 bool
281 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
282 const aarch64_opnd_info *info, aarch64_insn *code,
283 const aarch64_inst *inst ATTRIBUTE_UNUSED,
284 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
286 aarch64_field field = {0, 0};
287 aarch64_insn QSsize = 0; /* fields Q:S:size. */
288 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
290 assert (info->reglist.has_index);
292 /* Rt */
293 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
294 /* Encode the index, opcode<2:1> and size. */
295 switch (info->qualifier)
297 case AARCH64_OPND_QLF_S_B:
298 /* Index encoded in "Q:S:size". */
299 QSsize = info->reglist.index;
300 opcodeh2 = 0x0;
301 break;
302 case AARCH64_OPND_QLF_S_H:
303 /* Index encoded in "Q:S:size<1>". */
304 QSsize = info->reglist.index << 1;
305 opcodeh2 = 0x1;
306 break;
307 case AARCH64_OPND_QLF_S_S:
308 /* Index encoded in "Q:S". */
309 QSsize = info->reglist.index << 2;
310 opcodeh2 = 0x2;
311 break;
312 case AARCH64_OPND_QLF_S_D:
313 /* Index encoded in "Q". */
314 QSsize = info->reglist.index << 3 | 0x1;
315 opcodeh2 = 0x2;
316 break;
317 default:
318 return false;
320 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
321 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
322 insert_field_2 (&field, code, opcodeh2, 0);
324 return true;
327 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
328 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
329 or SSHR <V><d>, <V><n>, #<shift>. */
330 bool
331 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
332 const aarch64_opnd_info *info,
333 aarch64_insn *code, const aarch64_inst *inst,
334 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
336 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
337 aarch64_insn Q, imm;
339 if (inst->opcode->iclass == asimdshf)
341 /* Q
342 immh Q <T>
343 0000 x SEE AdvSIMD modified immediate
344 0001 0 8B
345 0001 1 16B
346 001x 0 4H
347 001x 1 8H
348 01xx 0 2S
349 01xx 1 4S
350 1xxx 0 RESERVED
351 1xxx 1 2D */
352 Q = (val & 0x1) ? 1 : 0;
353 insert_field (FLD_Q, code, Q, inst->opcode->mask);
354 val >>= 1;
357 assert (info->type == AARCH64_OPND_IMM_VLSR
358 || info->type == AARCH64_OPND_IMM_VLSL);
360 if (info->type == AARCH64_OPND_IMM_VLSR)
361 /* immh:immb
362 immh <shift>
363 0000 SEE AdvSIMD modified immediate
364 0001 (16-UInt(immh:immb))
365 001x (32-UInt(immh:immb))
366 01xx (64-UInt(immh:immb))
367 1xxx (128-UInt(immh:immb)) */
368 imm = (16 << (unsigned)val) - info->imm.value;
369 else
370 /* immh:immb
371 immh <shift>
372 0000 SEE AdvSIMD modified immediate
373 0001 (UInt(immh:immb)-8)
374 001x (UInt(immh:immb)-16)
375 01xx (UInt(immh:immb)-32)
376 1xxx (UInt(immh:immb)-64) */
377 imm = info->imm.value + (8 << (unsigned)val);
378 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
380 return true;
383 /* Insert fields for e.g. the immediate operands in
384 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
385 bool
386 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
387 aarch64_insn *code,
388 const aarch64_inst *inst ATTRIBUTE_UNUSED,
389 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
391 int64_t imm;
393 imm = info->imm.value;
394 if (operand_need_shift_by_two (self))
395 imm >>= 2;
396 if (operand_need_shift_by_four (self))
397 imm >>= 4;
398 insert_all_fields (self, code, imm);
399 return true;
402 /* Insert immediate and its shift amount for e.g. the last operand in
403 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
404 bool
405 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
406 aarch64_insn *code, const aarch64_inst *inst,
407 aarch64_operand_error *errors)
409 /* imm16 */
410 aarch64_ins_imm (self, info, code, inst, errors);
411 /* hw */
412 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
413 return true;
416 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
417 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
418 bool
419 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
420 const aarch64_opnd_info *info,
421 aarch64_insn *code,
422 const aarch64_inst *inst ATTRIBUTE_UNUSED,
423 aarch64_operand_error *errors
424 ATTRIBUTE_UNUSED)
426 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
427 uint64_t imm = info->imm.value;
428 enum aarch64_modifier_kind kind = info->shifter.kind;
429 int amount = info->shifter.amount;
430 aarch64_field field = {0, 0};
432 /* a:b:c:d:e:f:g:h */
433 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
435 /* Either MOVI <Dd>, #<imm>
436 or MOVI <Vd>.2D, #<imm>.
437 <imm> is a 64-bit immediate
438 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
439 encoded in "a:b:c:d:e:f:g:h". */
440 imm = aarch64_shrink_expanded_imm8 (imm);
441 assert ((int)imm >= 0);
443 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
445 if (kind == AARCH64_MOD_NONE)
446 return true;
448 /* shift amount partially in cmode */
449 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
450 if (kind == AARCH64_MOD_LSL)
452 /* AARCH64_MOD_LSL: shift zeros. */
453 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
454 assert (esize == 4 || esize == 2 || esize == 1);
455 /* For 8-bit move immediate, the optional LSL #0 does not require
456 encoding. */
457 if (esize == 1)
458 return true;
459 amount >>= 3;
460 if (esize == 4)
461 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
462 else
463 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
465 else
467 /* AARCH64_MOD_MSL: shift ones. */
468 amount >>= 4;
469 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
471 insert_field_2 (&field, code, amount, 0);
473 return true;
476 /* Insert fields for an 8-bit floating-point immediate. */
477 bool
478 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
479 aarch64_insn *code,
480 const aarch64_inst *inst ATTRIBUTE_UNUSED,
481 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
483 insert_all_fields (self, code, info->imm.value);
484 return true;
487 /* Insert 1-bit rotation immediate (#90 or #270). */
488 bool
489 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
490 const aarch64_opnd_info *info,
491 aarch64_insn *code, const aarch64_inst *inst,
492 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
494 uint64_t rot = (info->imm.value - 90) / 180;
495 assert (rot < 2U);
496 insert_field (self->fields[0], code, rot, inst->opcode->mask);
497 return true;
500 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
501 bool
502 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
503 const aarch64_opnd_info *info,
504 aarch64_insn *code, const aarch64_inst *inst,
505 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
507 uint64_t rot = info->imm.value / 90;
508 assert (rot < 4U);
509 insert_field (self->fields[0], code, rot, inst->opcode->mask);
510 return true;
513 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
514 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
515 bool
516 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
517 aarch64_insn *code,
518 const aarch64_inst *inst ATTRIBUTE_UNUSED,
519 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
521 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
522 return true;
525 /* Insert arithmetic immediate for e.g. the last operand in
526 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
527 bool
528 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
529 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
530 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
532 /* shift */
533 aarch64_insn value = info->shifter.amount ? 1 : 0;
534 insert_field (self->fields[0], code, value, 0);
535 /* imm12 (unsigned) */
536 insert_field (self->fields[1], code, info->imm.value, 0);
537 return true;
540 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
541 the operand should be inverted before encoding. */
542 static bool
543 aarch64_ins_limm_1 (const aarch64_operand *self,
544 const aarch64_opnd_info *info, aarch64_insn *code,
545 const aarch64_inst *inst, bool invert_p,
546 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
548 bool res;
549 aarch64_insn value;
550 uint64_t imm = info->imm.value;
551 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
553 if (invert_p)
554 imm = ~imm;
555 /* The constraint check should guarantee that this will work. */
556 res = aarch64_logical_immediate_p (imm, esize, &value);
557 if (res)
558 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
559 self->fields[0]);
560 return res;
563 /* Insert logical/bitmask immediate for e.g. the last operand in
564 ORR <Wd|WSP>, <Wn>, #<imm>. */
565 bool
566 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
567 aarch64_insn *code, const aarch64_inst *inst,
568 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
570 return aarch64_ins_limm_1 (self, info, code, inst,
571 inst->opcode->op == OP_BIC, errors);
574 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
575 bool
576 aarch64_ins_inv_limm (const aarch64_operand *self,
577 const aarch64_opnd_info *info, aarch64_insn *code,
578 const aarch64_inst *inst,
579 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
581 return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
584 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
585 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
586 bool
587 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
588 aarch64_insn *code, const aarch64_inst *inst,
589 aarch64_operand_error *errors)
591 aarch64_insn value = 0;
593 assert (info->idx == 0);
595 /* Rt */
596 aarch64_ins_regno (self, info, code, inst, errors);
597 if (inst->opcode->iclass == ldstpair_indexed
598 || inst->opcode->iclass == ldstnapair_offs
599 || inst->opcode->iclass == ldstpair_off
600 || inst->opcode->iclass == loadlit)
602 /* size */
603 switch (info->qualifier)
605 case AARCH64_OPND_QLF_S_S: value = 0; break;
606 case AARCH64_OPND_QLF_S_D: value = 1; break;
607 case AARCH64_OPND_QLF_S_Q: value = 2; break;
608 default: return false;
610 insert_field (FLD_ldst_size, code, value, 0);
612 else
614 /* opc[1]:size */
615 value = aarch64_get_qualifier_standard_value (info->qualifier);
616 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
619 return true;
622 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
623 bool
624 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
625 const aarch64_opnd_info *info, aarch64_insn *code,
626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
627 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
629 /* Rn */
630 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
631 return true;
634 /* Encode the address operand for e.g.
635 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
636 bool
637 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
638 const aarch64_opnd_info *info, aarch64_insn *code,
639 const aarch64_inst *inst ATTRIBUTE_UNUSED,
640 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
642 aarch64_insn S;
643 enum aarch64_modifier_kind kind = info->shifter.kind;
645 /* Rn */
646 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
647 /* Rm */
648 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
649 /* option */
650 if (kind == AARCH64_MOD_LSL)
651 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
652 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
653 /* S */
654 if (info->qualifier != AARCH64_OPND_QLF_S_B)
655 S = info->shifter.amount != 0;
656 else
657 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
658 S <amount>
659 0 [absent]
660 1 #0
661 Must be #0 if <extend> is explicitly LSL. */
662 S = info->shifter.operator_present && info->shifter.amount_present;
663 insert_field (FLD_S, code, S, 0);
665 return true;
668 /* Encode the address operand for e.g.
669 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
670 bool
671 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
672 const aarch64_opnd_info *info, aarch64_insn *code,
673 const aarch64_inst *inst ATTRIBUTE_UNUSED,
674 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
676 /* Rn */
677 insert_field (self->fields[0], code, info->addr.base_regno, 0);
679 /* simm9 */
680 int imm = info->addr.offset.imm;
681 insert_field (self->fields[1], code, imm, 0);
683 /* writeback */
684 if (info->addr.writeback)
686 assert (info->addr.preind == 1 && info->addr.postind == 0);
687 insert_field (self->fields[2], code, 1, 0);
689 return true;
692 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
693 bool
694 aarch64_ins_addr_simm (const aarch64_operand *self,
695 const aarch64_opnd_info *info,
696 aarch64_insn *code,
697 const aarch64_inst *inst ATTRIBUTE_UNUSED,
698 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
700 int imm;
702 /* Rn */
703 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
704 /* simm (imm9 or imm7) */
705 imm = info->addr.offset.imm;
706 if (self->fields[0] == FLD_imm7
707 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
708 /* scaled immediate in ld/st pair instructions.. */
709 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
710 insert_field (self->fields[0], code, imm, 0);
711 /* pre/post- index */
712 if (info->addr.writeback)
714 assert (inst->opcode->iclass != ldst_unscaled
715 && inst->opcode->iclass != ldstnapair_offs
716 && inst->opcode->iclass != ldstpair_off
717 && inst->opcode->iclass != ldst_unpriv);
718 assert (info->addr.preind != info->addr.postind);
719 if (info->addr.preind)
720 insert_field (self->fields[1], code, 1, 0);
723 return true;
726 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
727 bool
728 aarch64_ins_addr_simm10 (const aarch64_operand *self,
729 const aarch64_opnd_info *info,
730 aarch64_insn *code,
731 const aarch64_inst *inst ATTRIBUTE_UNUSED,
732 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
734 int imm;
736 /* Rn */
737 insert_field (self->fields[0], code, info->addr.base_regno, 0);
738 /* simm10 */
739 imm = info->addr.offset.imm >> 3;
740 insert_field (self->fields[1], code, imm >> 9, 0);
741 insert_field (self->fields[2], code, imm, 0);
742 /* writeback */
743 if (info->addr.writeback)
745 assert (info->addr.preind == 1 && info->addr.postind == 0);
746 insert_field (self->fields[3], code, 1, 0);
748 return true;
751 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
752 bool
753 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
754 const aarch64_opnd_info *info,
755 aarch64_insn *code,
756 const aarch64_inst *inst ATTRIBUTE_UNUSED,
757 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
759 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
761 /* Rn */
762 insert_field (self->fields[0], code, info->addr.base_regno, 0);
763 /* uimm12 */
764 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
765 return true;
768 /* Encode the address operand for e.g.
769 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
770 bool
771 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
772 const aarch64_opnd_info *info, aarch64_insn *code,
773 const aarch64_inst *inst ATTRIBUTE_UNUSED,
774 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
776 /* Rn */
777 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
778 /* Rm | #<amount> */
779 if (info->addr.offset.is_reg)
780 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
781 else
782 insert_field (FLD_Rm, code, 0x1f, 0);
783 return true;
786 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
787 bool
788 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
789 const aarch64_opnd_info *info, aarch64_insn *code,
790 const aarch64_inst *inst ATTRIBUTE_UNUSED,
791 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
793 /* cond */
794 insert_field (FLD_cond, code, info->cond->value, 0);
795 return true;
798 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
799 bool
800 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
801 const aarch64_opnd_info *info, aarch64_insn *code,
802 const aarch64_inst *inst,
803 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
805 /* If a system instruction check if we have any restrictions on which
806 registers it can use. */
807 if (inst->opcode->iclass == ic_system)
809 uint64_t opcode_flags
810 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
811 uint32_t sysreg_flags
812 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
814 /* Check to see if it's read-only, else check if it's write only.
815 if it's both or unspecified don't care. */
816 if (opcode_flags == F_SYS_READ
817 && sysreg_flags
818 && sysreg_flags != F_REG_READ)
820 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
821 detail->error = _("specified register cannot be read from");
822 detail->index = info->idx;
823 detail->non_fatal = true;
825 else if (opcode_flags == F_SYS_WRITE
826 && sysreg_flags
827 && sysreg_flags != F_REG_WRITE)
829 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
830 detail->error = _("specified register cannot be written to");
831 detail->index = info->idx;
832 detail->non_fatal = true;
835 /* op0:op1:CRn:CRm:op2 */
836 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
837 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
838 return true;
841 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
842 bool
843 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
844 const aarch64_opnd_info *info, aarch64_insn *code,
845 const aarch64_inst *inst ATTRIBUTE_UNUSED,
846 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
848 /* op1:op2 */
849 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
850 FLD_op2, FLD_op1);
852 /* Extra CRm mask. */
853 if (info->sysreg.flags | F_REG_IN_CRM)
854 insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0);
855 return true;
858 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
859 bool
860 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
861 const aarch64_opnd_info *info, aarch64_insn *code,
862 const aarch64_inst *inst ATTRIBUTE_UNUSED,
863 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
865 /* op1:CRn:CRm:op2 */
866 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
867 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
868 return true;
871 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
873 bool
874 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
875 const aarch64_opnd_info *info, aarch64_insn *code,
876 const aarch64_inst *inst ATTRIBUTE_UNUSED,
877 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
879 /* CRm */
880 insert_field (FLD_CRm, code, info->barrier->value, 0);
881 return true;
884 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
886 bool
887 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
888 const aarch64_opnd_info *info, aarch64_insn *code,
889 const aarch64_inst *inst ATTRIBUTE_UNUSED,
890 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
892 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
893 encoded in CRm<3:2>. */
894 aarch64_insn value = (info->barrier->value >> 2) - 4;
895 insert_field (FLD_CRm_dsb_nxs, code, value, 0);
896 return true;
899 /* Encode the prefetch operation option operand for e.g.
900 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
902 bool
903 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
904 const aarch64_opnd_info *info, aarch64_insn *code,
905 const aarch64_inst *inst ATTRIBUTE_UNUSED,
906 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
908 /* prfop in Rt */
909 insert_field (FLD_Rt, code, info->prfop->value, 0);
910 return true;
913 /* Encode the hint number for instructions that alias HINT but take an
914 operand. */
916 bool
917 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
918 const aarch64_opnd_info *info, aarch64_insn *code,
919 const aarch64_inst *inst ATTRIBUTE_UNUSED,
920 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
922 /* CRm:op2. */
923 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
924 return true;
927 /* Encode the extended register operand for e.g.
928 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
929 bool
930 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
931 const aarch64_opnd_info *info, aarch64_insn *code,
932 const aarch64_inst *inst ATTRIBUTE_UNUSED,
933 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
935 enum aarch64_modifier_kind kind;
937 /* Rm */
938 insert_field (FLD_Rm, code, info->reg.regno, 0);
939 /* option */
940 kind = info->shifter.kind;
941 if (kind == AARCH64_MOD_LSL)
942 kind = info->qualifier == AARCH64_OPND_QLF_W
943 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
944 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
945 /* imm3 */
946 insert_field (FLD_imm3, code, info->shifter.amount, 0);
948 return true;
951 /* Encode the shifted register operand for e.g.
952 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
953 bool
954 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
955 const aarch64_opnd_info *info, aarch64_insn *code,
956 const aarch64_inst *inst ATTRIBUTE_UNUSED,
957 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
959 /* Rm */
960 insert_field (FLD_Rm, code, info->reg.regno, 0);
961 /* shift */
962 insert_field (FLD_shift, code,
963 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
964 /* imm6 */
965 insert_field (FLD_imm6, code, info->shifter.amount, 0);
967 return true;
970 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
971 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
972 SELF's operand-dependent value. fields[0] specifies the field that
973 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
974 bool
975 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
976 const aarch64_opnd_info *info,
977 aarch64_insn *code,
978 const aarch64_inst *inst ATTRIBUTE_UNUSED,
979 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
981 int factor = 1 + get_operand_specific_data (self);
982 insert_field (self->fields[0], code, info->addr.base_regno, 0);
983 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
984 return true;
987 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
988 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
989 SELF's operand-dependent value. fields[0] specifies the field that
990 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
991 bool
992 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
993 const aarch64_opnd_info *info,
994 aarch64_insn *code,
995 const aarch64_inst *inst ATTRIBUTE_UNUSED,
996 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
998 int factor = 1 + get_operand_specific_data (self);
999 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1000 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1001 return true;
1004 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1005 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1006 SELF's operand-dependent value. fields[0] specifies the field that
1007 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1008 and imm3 fields, with imm3 being the less-significant part. */
1009 bool
1010 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1011 const aarch64_opnd_info *info,
1012 aarch64_insn *code,
1013 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1014 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1016 int factor = 1 + get_operand_specific_data (self);
1017 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1018 insert_fields (code, info->addr.offset.imm / factor, 0,
1019 2, FLD_imm3, FLD_SVE_imm6);
1020 return true;
1023 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1024 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1025 value. fields[0] specifies the base register field. */
1026 bool
1027 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1028 const aarch64_opnd_info *info, aarch64_insn *code,
1029 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1030 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1032 int factor = 1 << get_operand_specific_data (self);
1033 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1034 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1035 return true;
1038 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1039 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1040 value. fields[0] specifies the base register field. */
1041 bool
1042 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1043 const aarch64_opnd_info *info, aarch64_insn *code,
1044 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1045 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1047 int factor = 1 << get_operand_specific_data (self);
1048 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1049 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1050 return true;
1053 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1054 is SELF's operand-dependent value. fields[0] specifies the base
1055 register field and fields[1] specifies the offset register field. */
1056 bool
1057 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1058 const aarch64_opnd_info *info, aarch64_insn *code,
1059 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1060 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1062 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1063 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1064 return true;
1067 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1068 <shift> is SELF's operand-dependent value. fields[0] specifies the
1069 base register field, fields[1] specifies the offset register field and
1070 fields[2] is a single-bit field that selects SXTW over UXTW. */
1071 bool
1072 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1073 const aarch64_opnd_info *info, aarch64_insn *code,
1074 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1075 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1077 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1078 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1079 if (info->shifter.kind == AARCH64_MOD_UXTW)
1080 insert_field (self->fields[2], code, 0, 0);
1081 else
1082 insert_field (self->fields[2], code, 1, 0);
1083 return true;
1086 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1087 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1088 fields[0] specifies the base register field. */
1089 bool
1090 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1091 const aarch64_opnd_info *info, aarch64_insn *code,
1092 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1093 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1095 int factor = 1 << get_operand_specific_data (self);
1096 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1097 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1098 return true;
1101 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1102 where <modifier> is fixed by the instruction and where <msz> is a
1103 2-bit unsigned number. fields[0] specifies the base register field
1104 and fields[1] specifies the offset register field. */
1105 static bool
1106 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1107 const aarch64_opnd_info *info, aarch64_insn *code,
1108 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1110 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1111 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1112 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1113 return true;
1116 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1117 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1118 field and fields[1] specifies the offset register field. */
1119 bool
1120 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1121 const aarch64_opnd_info *info, aarch64_insn *code,
1122 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1123 aarch64_operand_error *errors)
1125 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1128 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1129 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1130 field and fields[1] specifies the offset register field. */
1131 bool
1132 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1133 const aarch64_opnd_info *info,
1134 aarch64_insn *code,
1135 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1136 aarch64_operand_error *errors)
1138 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1141 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1142 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1143 field and fields[1] specifies the offset register field. */
1144 bool
1145 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1146 const aarch64_opnd_info *info,
1147 aarch64_insn *code,
1148 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1149 aarch64_operand_error *errors)
1151 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1154 /* Encode an SVE ADD/SUB immediate. */
1155 bool
1156 aarch64_ins_sve_aimm (const aarch64_operand *self,
1157 const aarch64_opnd_info *info, aarch64_insn *code,
1158 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1159 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1161 if (info->shifter.amount == 8)
1162 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1163 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1164 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1165 else
1166 insert_all_fields (self, code, info->imm.value & 0xff);
1167 return true;
1170 /* Encode an SVE CPY/DUP immediate. */
1171 bool
1172 aarch64_ins_sve_asimm (const aarch64_operand *self,
1173 const aarch64_opnd_info *info, aarch64_insn *code,
1174 const aarch64_inst *inst,
1175 aarch64_operand_error *errors)
1177 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1180 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1181 array specifies which field to use for Zn. MM is encoded in the
1182 concatenation of imm5 and SVE_tszh, with imm5 being the less
1183 significant part. */
1184 bool
1185 aarch64_ins_sve_index (const aarch64_operand *self,
1186 const aarch64_opnd_info *info, aarch64_insn *code,
1187 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1188 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1190 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1191 insert_field (self->fields[0], code, info->reglane.regno, 0);
1192 insert_fields (code, (info->reglane.index * 2 + 1) * esize, 0,
1193 2, FLD_imm5, FLD_SVE_tszh);
1194 return true;
1197 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1198 bool
1199 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1200 const aarch64_opnd_info *info, aarch64_insn *code,
1201 const aarch64_inst *inst,
1202 aarch64_operand_error *errors)
1204 return aarch64_ins_limm (self, info, code, inst, errors);
1207 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1208 and where MM occupies the most-significant part. The operand-dependent
1209 value specifies the number of bits in Zn. */
1210 bool
1211 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1212 const aarch64_opnd_info *info, aarch64_insn *code,
1213 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1214 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1216 unsigned int reg_bits = get_operand_specific_data (self);
1217 assert (info->reglane.regno < (1U << reg_bits));
1218 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1219 insert_all_fields (self, code, val);
1220 return true;
1223 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1224 to use for Zn. */
1225 bool
1226 aarch64_ins_sve_reglist (const aarch64_operand *self,
1227 const aarch64_opnd_info *info, aarch64_insn *code,
1228 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1229 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1231 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1232 return true;
1235 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1236 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1237 field. */
1238 bool
1239 aarch64_ins_sve_scale (const aarch64_operand *self,
1240 const aarch64_opnd_info *info, aarch64_insn *code,
1241 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1242 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1244 insert_all_fields (self, code, info->imm.value);
1245 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1246 return true;
1249 /* Encode an SVE shift left immediate. */
1250 bool
1251 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1252 const aarch64_opnd_info *info, aarch64_insn *code,
1253 const aarch64_inst *inst,
1254 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1256 const aarch64_opnd_info *prev_operand;
1257 unsigned int esize;
1259 assert (info->idx > 0);
1260 prev_operand = &inst->operands[info->idx - 1];
1261 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1262 insert_all_fields (self, code, 8 * esize + info->imm.value);
1263 return true;
1266 /* Encode an SVE shift right immediate. */
1267 bool
1268 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1269 const aarch64_opnd_info *info, aarch64_insn *code,
1270 const aarch64_inst *inst,
1271 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1273 const aarch64_opnd_info *prev_operand;
1274 unsigned int esize;
1276 unsigned int opnd_backshift = get_operand_specific_data (self);
1277 assert (info->idx >= (int)opnd_backshift);
1278 prev_operand = &inst->operands[info->idx - opnd_backshift];
1279 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1280 insert_all_fields (self, code, 16 * esize - info->imm.value);
1281 return true;
1284 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1285 The fields array specifies which field to use. */
1286 bool
1287 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1288 const aarch64_opnd_info *info,
1289 aarch64_insn *code,
1290 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1291 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1293 if (info->imm.value == 0x3f000000)
1294 insert_field (self->fields[0], code, 0, 0);
1295 else
1296 insert_field (self->fields[0], code, 1, 0);
1297 return true;
1300 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1301 The fields array specifies which field to use. */
1302 bool
1303 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1304 const aarch64_opnd_info *info,
1305 aarch64_insn *code,
1306 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1307 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1309 if (info->imm.value == 0x3f000000)
1310 insert_field (self->fields[0], code, 0, 0);
1311 else
1312 insert_field (self->fields[0], code, 1, 0);
1313 return true;
1316 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1317 The fields array specifies which field to use. */
1318 bool
1319 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1320 const aarch64_opnd_info *info,
1321 aarch64_insn *code,
1322 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1323 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1325 if (info->imm.value == 0)
1326 insert_field (self->fields[0], code, 0, 0);
1327 else
1328 insert_field (self->fields[0], code, 1, 0);
1329 return true;
1332 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1333 vector indicator, vector selector and immediate. */
1334 bool
1335 aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self,
1336 const aarch64_opnd_info *info,
1337 aarch64_insn *code,
1338 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1339 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1341 int fld_size;
1342 int fld_q;
1343 int fld_v = info->za_tile_vector.v;
1344 int fld_rv = info->za_tile_vector.index.regno - 12;
1345 int fld_zan_imm = info->za_tile_vector.index.imm;
1346 int regno = info->za_tile_vector.regno;
1348 switch (info->qualifier)
1350 case AARCH64_OPND_QLF_S_B:
1351 fld_size = 0;
1352 fld_q = 0;
1353 break;
1354 case AARCH64_OPND_QLF_S_H:
1355 fld_size = 1;
1356 fld_q = 0;
1357 fld_zan_imm |= regno << 3;
1358 break;
1359 case AARCH64_OPND_QLF_S_S:
1360 fld_size = 2;
1361 fld_q = 0;
1362 fld_zan_imm |= regno << 2;
1363 break;
1364 case AARCH64_OPND_QLF_S_D:
1365 fld_size = 3;
1366 fld_q = 0;
1367 fld_zan_imm |= regno << 1;
1368 break;
1369 case AARCH64_OPND_QLF_S_Q:
1370 fld_size = 3;
1371 fld_q = 1;
1372 fld_zan_imm = regno;
1373 break;
1374 default:
1375 return false;
1378 insert_field (self->fields[0], code, fld_size, 0);
1379 insert_field (self->fields[1], code, fld_q, 0);
1380 insert_field (self->fields[2], code, fld_v, 0);
1381 insert_field (self->fields[3], code, fld_rv, 0);
1382 insert_field (self->fields[4], code, fld_zan_imm, 0);
1384 return true;
1387 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1388 separated by commas, encoded in the "imm8" field.
1390 For programmer convenience an assembler must also accept the names of
1391 32-bit, 16-bit and 8-bit element tiles which are converted into the
1392 corresponding set of 64-bit element tiles.
1394 bool
1395 aarch64_ins_sme_za_list (const aarch64_operand *self,
1396 const aarch64_opnd_info *info,
1397 aarch64_insn *code,
1398 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1399 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1401 int fld_mask = info->imm.value;
1402 insert_field (self->fields[0], code, fld_mask, 0);
1403 return true;
1406 bool
1407 aarch64_ins_sme_za_array (const aarch64_operand *self,
1408 const aarch64_opnd_info *info,
1409 aarch64_insn *code,
1410 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1411 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1413 int regno = info->za_tile_vector.index.regno - 12;
1414 int imm = info->za_tile_vector.index.imm;
1415 insert_field (self->fields[0], code, regno, 0);
1416 insert_field (self->fields[1], code, imm, 0);
1417 return true;
1420 bool
1421 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self,
1422 const aarch64_opnd_info *info,
1423 aarch64_insn *code,
1424 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1425 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1427 int regno = info->addr.base_regno;
1428 int imm = info->addr.offset.imm;
1429 insert_field (self->fields[0], code, regno, 0);
1430 insert_field (self->fields[1], code, imm, 0);
1431 return true;
1434 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1435 bool
1436 aarch64_ins_sme_sm_za (const aarch64_operand *self,
1437 const aarch64_opnd_info *info,
1438 aarch64_insn *code,
1439 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1440 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1442 aarch64_insn fld_crm;
1443 /* Set CRm[3:1] bits. */
1444 if (info->reg.regno == 's')
1445 fld_crm = 0x02 ; /* SVCRSM. */
1446 else if (info->reg.regno == 'z')
1447 fld_crm = 0x04; /* SVCRZA. */
1448 else
1449 return false;
1451 insert_field (self->fields[0], code, fld_crm, 0);
1452 return true;
1455 /* Encode source scalable predicate register (Pn), name of the index base
1456 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1457 range 0 to one less than the number of vector elements in a 128-bit vector
1458 register, encoded in "i1:tszh:tszl".
1460 bool
1461 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self,
1462 const aarch64_opnd_info *info,
1463 aarch64_insn *code,
1464 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1465 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1467 int fld_pn = info->za_tile_vector.regno;
1468 int fld_rm = info->za_tile_vector.index.regno - 12;
1469 int imm = info->za_tile_vector.index.imm;
1470 int fld_i1, fld_tszh, fld_tshl;
1472 insert_field (self->fields[0], code, fld_rm, 0);
1473 insert_field (self->fields[1], code, fld_pn, 0);
1475 /* Optional element index, defaulting to 0, in the range 0 to one less than
1476 the number of vector elements in a 128-bit vector register, encoded in
1477 "i1:tszh:tszl".
1479 i1 tszh tszl <T>
1480 0 0 000 RESERVED
1481 x x xx1 B
1482 x x x10 H
1483 x x 100 S
1484 x 1 000 D
1486 switch (info->qualifier)
1488 case AARCH64_OPND_QLF_S_B:
1489 /* <imm> is 4 bit value. */
1490 fld_i1 = (imm >> 3) & 0x1;
1491 fld_tszh = (imm >> 2) & 0x1;
1492 fld_tshl = ((imm << 1) | 0x1) & 0x7;
1493 break;
1494 case AARCH64_OPND_QLF_S_H:
1495 /* <imm> is 3 bit value. */
1496 fld_i1 = (imm >> 2) & 0x1;
1497 fld_tszh = (imm >> 1) & 0x1;
1498 fld_tshl = ((imm << 2) | 0x2) & 0x7;
1499 break;
1500 case AARCH64_OPND_QLF_S_S:
1501 /* <imm> is 2 bit value. */
1502 fld_i1 = (imm >> 1) & 0x1;
1503 fld_tszh = imm & 0x1;
1504 fld_tshl = 0x4;
1505 break;
1506 case AARCH64_OPND_QLF_S_D:
1507 /* <imm> is 1 bit value. */
1508 fld_i1 = imm & 0x1;
1509 fld_tszh = 0x1;
1510 fld_tshl = 0x0;
1511 break;
1512 default:
1513 return false;
1516 insert_field (self->fields[2], code, fld_i1, 0);
1517 insert_field (self->fields[3], code, fld_tszh, 0);
1518 insert_field (self->fields[4], code, fld_tshl, 0);
1519 return true;
1522 /* Insert X0-X30. Register 31 is unallocated. */
1523 bool
1524 aarch64_ins_x0_to_x30 (const aarch64_operand *self,
1525 const aarch64_opnd_info *info,
1526 aarch64_insn *code,
1527 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1528 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1530 assert (info->reg.regno <= 30);
1531 insert_field (self->fields[0], code, info->reg.regno, 0);
1532 return true;
1535 /* Miscellaneous encoding functions. */
1537 /* Encode size[0], i.e. bit 22, for
1538 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1540 static void
1541 encode_asimd_fcvt (aarch64_inst *inst)
1543 aarch64_insn value;
1544 aarch64_field field = {0, 0};
1545 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
1547 switch (inst->opcode->op)
1549 case OP_FCVTN:
1550 case OP_FCVTN2:
1551 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1552 qualifier = inst->operands[1].qualifier;
1553 break;
1554 case OP_FCVTL:
1555 case OP_FCVTL2:
1556 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1557 qualifier = inst->operands[0].qualifier;
1558 break;
1559 default:
1560 return;
1562 assert (qualifier == AARCH64_OPND_QLF_V_4S
1563 || qualifier == AARCH64_OPND_QLF_V_2D);
1564 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1565 gen_sub_field (FLD_size, 0, 1, &field);
1566 insert_field_2 (&field, &inst->value, value, 0);
1569 /* Encode size[0], i.e. bit 22, for
1570 e.g. FCVTXN <Vb><d>, <Va><n>. */
1572 static void
1573 encode_asisd_fcvtxn (aarch64_inst *inst)
1575 aarch64_insn val = 1;
1576 aarch64_field field = {0, 0};
1577 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1578 gen_sub_field (FLD_size, 0, 1, &field);
1579 insert_field_2 (&field, &inst->value, val, 0);
1582 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1583 static void
1584 encode_fcvt (aarch64_inst *inst)
1586 aarch64_insn val;
1587 const aarch64_field field = {15, 2};
1589 /* opc dstsize */
1590 switch (inst->operands[0].qualifier)
1592 case AARCH64_OPND_QLF_S_S: val = 0; break;
1593 case AARCH64_OPND_QLF_S_D: val = 1; break;
1594 case AARCH64_OPND_QLF_S_H: val = 3; break;
1595 default: abort ();
1597 insert_field_2 (&field, &inst->value, val, 0);
1599 return;
1602 /* Return the index in qualifiers_list that INST is using. Should only
1603 be called once the qualifiers are known to be valid. */
1605 static int
1606 aarch64_get_variant (struct aarch64_inst *inst)
1608 int i, nops, variant;
1610 nops = aarch64_num_of_operands (inst->opcode);
1611 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1613 for (i = 0; i < nops; ++i)
1614 if (inst->opcode->qualifiers_list[variant][i]
1615 != inst->operands[i].qualifier)
1616 break;
1617 if (i == nops)
1618 return variant;
1620 abort ();
1623 /* Do miscellaneous encodings that are not common enough to be driven by
1624 flags. */
1626 static void
1627 do_misc_encoding (aarch64_inst *inst)
1629 unsigned int value;
1631 switch (inst->opcode->op)
1633 case OP_FCVT:
1634 encode_fcvt (inst);
1635 break;
1636 case OP_FCVTN:
1637 case OP_FCVTN2:
1638 case OP_FCVTL:
1639 case OP_FCVTL2:
1640 encode_asimd_fcvt (inst);
1641 break;
1642 case OP_FCVTXN_S:
1643 encode_asisd_fcvtxn (inst);
1644 break;
1645 case OP_MOV_P_P:
1646 case OP_MOVS_P_P:
1647 /* Copy Pn to Pm and Pg. */
1648 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1649 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1650 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1651 break;
1652 case OP_MOV_Z_P_Z:
1653 /* Copy Zd to Zm. */
1654 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1655 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1656 break;
1657 case OP_MOV_Z_V:
1658 /* Fill in the zero immediate. */
1659 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1660 2, FLD_imm5, FLD_SVE_tszh);
1661 break;
1662 case OP_MOV_Z_Z:
1663 /* Copy Zn to Zm. */
1664 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1665 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1666 break;
1667 case OP_MOV_Z_Zi:
1668 break;
1669 case OP_MOVM_P_P_P:
1670 /* Copy Pd to Pm. */
1671 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1672 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1673 break;
1674 case OP_MOVZS_P_P_P:
1675 case OP_MOVZ_P_P_P:
1676 /* Copy Pn to Pm. */
1677 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1678 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1679 break;
1680 case OP_NOTS_P_P_P_Z:
1681 case OP_NOT_P_P_P_Z:
1682 /* Copy Pg to Pm. */
1683 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1684 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1685 break;
1686 default: break;
1690 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1691 static void
1692 encode_sizeq (aarch64_inst *inst)
1694 aarch64_insn sizeq;
1695 enum aarch64_field_kind kind;
1696 int idx;
1698 /* Get the index of the operand whose information we are going to use
1699 to encode the size and Q fields.
1700 This is deduced from the possible valid qualifier lists. */
1701 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1702 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1703 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1704 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
1705 /* Q */
1706 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
1707 /* size */
1708 if (inst->opcode->iclass == asisdlse
1709 || inst->opcode->iclass == asisdlsep
1710 || inst->opcode->iclass == asisdlso
1711 || inst->opcode->iclass == asisdlsop)
1712 kind = FLD_vldst_size;
1713 else
1714 kind = FLD_size;
1715 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
1718 /* Opcodes that have fields shared by multiple operands are usually flagged
1719 with flags. In this function, we detect such flags and use the
1720 information in one of the related operands to do the encoding. The 'one'
1721 operand is not any operand but one of the operands that has the enough
1722 information for such an encoding. */
1724 static void
1725 do_special_encoding (struct aarch64_inst *inst)
1727 int idx;
1728 aarch64_insn value = 0;
1730 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
1732 /* Condition for truly conditional executed instructions, e.g. b.cond. */
1733 if (inst->opcode->flags & F_COND)
1735 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
1737 if (inst->opcode->flags & F_SF)
1739 idx = select_operand_for_sf_field_coding (inst->opcode);
1740 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1741 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1742 ? 1 : 0;
1743 insert_field (FLD_sf, &inst->value, value, 0);
1744 if (inst->opcode->flags & F_N)
1745 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
1747 if (inst->opcode->flags & F_LSE_SZ)
1749 idx = select_operand_for_sf_field_coding (inst->opcode);
1750 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
1751 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
1752 ? 1 : 0;
1753 insert_field (FLD_lse_sz, &inst->value, value, 0);
1755 if (inst->opcode->flags & F_SIZEQ)
1756 encode_sizeq (inst);
1757 if (inst->opcode->flags & F_FPTYPE)
1759 idx = select_operand_for_fptype_field_coding (inst->opcode);
1760 switch (inst->operands[idx].qualifier)
1762 case AARCH64_OPND_QLF_S_S: value = 0; break;
1763 case AARCH64_OPND_QLF_S_D: value = 1; break;
1764 case AARCH64_OPND_QLF_S_H: value = 3; break;
1765 default: return;
1767 insert_field (FLD_type, &inst->value, value, 0);
1769 if (inst->opcode->flags & F_SSIZE)
1771 enum aarch64_opnd_qualifier qualifier;
1772 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
1773 qualifier = inst->operands[idx].qualifier;
1774 assert (qualifier >= AARCH64_OPND_QLF_S_B
1775 && qualifier <= AARCH64_OPND_QLF_S_Q);
1776 value = aarch64_get_qualifier_standard_value (qualifier);
1777 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
1779 if (inst->opcode->flags & F_T)
1781 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
1782 aarch64_field field = {0, 0};
1783 enum aarch64_opnd_qualifier qualifier;
1785 idx = 0;
1786 qualifier = inst->operands[idx].qualifier;
1787 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1788 == AARCH64_OPND_CLASS_SIMD_REG
1789 && qualifier >= AARCH64_OPND_QLF_V_8B
1790 && qualifier <= AARCH64_OPND_QLF_V_2D);
1791 /* imm5<3:0> q <t>
1792 0000 x reserved
1793 xxx1 0 8b
1794 xxx1 1 16b
1795 xx10 0 4h
1796 xx10 1 8h
1797 x100 0 2s
1798 x100 1 4s
1799 1000 0 reserved
1800 1000 1 2d */
1801 value = aarch64_get_qualifier_standard_value (qualifier);
1802 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
1803 num = (int) value >> 1;
1804 assert (num >= 0 && num <= 3);
1805 gen_sub_field (FLD_imm5, 0, num + 1, &field);
1806 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
1808 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
1810 /* Use Rt to encode in the case of e.g.
1811 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
1812 enum aarch64_opnd_qualifier qualifier;
1813 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
1814 if (idx == -1)
1815 /* Otherwise use the result operand, which has to be a integer
1816 register. */
1817 idx = 0;
1818 assert (idx == 0 || idx == 1);
1819 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
1820 == AARCH64_OPND_CLASS_INT_REG);
1821 qualifier = inst->operands[idx].qualifier;
1822 insert_field (FLD_Q, &inst->value,
1823 aarch64_get_qualifier_standard_value (qualifier), 0);
1825 if (inst->opcode->flags & F_LDS_SIZE)
1827 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1828 enum aarch64_opnd_qualifier qualifier;
1829 aarch64_field field = {0, 0};
1830 assert (aarch64_get_operand_class (inst->opcode->operands[0])
1831 == AARCH64_OPND_CLASS_INT_REG);
1832 gen_sub_field (FLD_opc, 0, 1, &field);
1833 qualifier = inst->operands[0].qualifier;
1834 insert_field_2 (&field, &inst->value,
1835 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
1837 /* Miscellaneous encoding as the last step. */
1838 if (inst->opcode->flags & F_MISC)
1839 do_misc_encoding (inst);
1841 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
1844 /* Some instructions (including all SVE ones) use the instruction class
1845 to describe how a qualifiers_list index is represented in the instruction
1846 encoding. If INST is such an instruction, encode the chosen qualifier
1847 variant. */
1849 static void
1850 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
1852 int variant = 0;
1853 switch (inst->opcode->iclass)
1855 case sve_cpy:
1856 insert_fields (&inst->value, aarch64_get_variant (inst),
1857 0, 2, FLD_SVE_M_14, FLD_size);
1858 break;
1860 case sve_index:
1861 case sve_shift_pred:
1862 case sve_shift_unpred:
1863 case sve_shift_tsz_hsd:
1864 case sve_shift_tsz_bhsd:
1865 /* For indices and shift amounts, the variant is encoded as
1866 part of the immediate. */
1867 break;
1869 case sve_limm:
1870 /* For sve_limm, the .B, .H, and .S forms are just a convenience
1871 and depend on the immediate. They don't have a separate
1872 encoding. */
1873 break;
1875 case sve_misc:
1876 /* sve_misc instructions have only a single variant. */
1877 break;
1879 case sve_movprfx:
1880 insert_fields (&inst->value, aarch64_get_variant (inst),
1881 0, 2, FLD_SVE_M_16, FLD_size);
1882 break;
1884 case sve_pred_zm:
1885 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
1886 break;
1888 case sve_size_bhs:
1889 case sve_size_bhsd:
1890 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
1891 break;
1893 case sve_size_hsd:
1894 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) + 1, 0);
1895 break;
1897 case sve_size_bh:
1898 case sve_size_sd:
1899 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
1900 break;
1902 case sve_size_sd2:
1903 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
1904 break;
1906 case sve_size_hsd2:
1907 insert_field (FLD_SVE_size, &inst->value,
1908 aarch64_get_variant (inst) + 1, 0);
1909 break;
1911 case sve_size_tsz_bhs:
1912 insert_fields (&inst->value,
1913 (1 << aarch64_get_variant (inst)),
1914 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
1915 break;
1917 case sve_size_13:
1918 variant = aarch64_get_variant (inst) + 1;
1919 if (variant == 2)
1920 variant = 3;
1921 insert_field (FLD_size, &inst->value, variant, 0);
1922 break;
1924 default:
1925 break;
1929 /* Converters converting an alias opcode instruction to its real form. */
1931 /* ROR <Wd>, <Ws>, #<shift>
1932 is equivalent to:
1933 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
1934 static void
1935 convert_ror_to_extr (aarch64_inst *inst)
1937 copy_operand_info (inst, 3, 2);
1938 copy_operand_info (inst, 2, 1);
1941 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
1942 is equivalent to:
1943 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
1944 static void
1945 convert_xtl_to_shll (aarch64_inst *inst)
1947 inst->operands[2].qualifier = inst->operands[1].qualifier;
1948 inst->operands[2].imm.value = 0;
1951 /* Convert
1952 LSR <Xd>, <Xn>, #<shift>
1954 UBFM <Xd>, <Xn>, #<shift>, #63. */
1955 static void
1956 convert_sr_to_bfm (aarch64_inst *inst)
1958 inst->operands[3].imm.value =
1959 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
1962 /* Convert MOV to ORR. */
1963 static void
1964 convert_mov_to_orr (aarch64_inst *inst)
1966 /* MOV <Vd>.<T>, <Vn>.<T>
1967 is equivalent to:
1968 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
1969 copy_operand_info (inst, 2, 1);
1972 /* When <imms> >= <immr>, the instruction written:
1973 SBFX <Xd>, <Xn>, #<lsb>, #<width>
1974 is equivalent to:
1975 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
1977 static void
1978 convert_bfx_to_bfm (aarch64_inst *inst)
1980 int64_t lsb, width;
1982 /* Convert the operand. */
1983 lsb = inst->operands[2].imm.value;
1984 width = inst->operands[3].imm.value;
1985 inst->operands[2].imm.value = lsb;
1986 inst->operands[3].imm.value = lsb + width - 1;
1989 /* When <imms> < <immr>, the instruction written:
1990 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
1991 is equivalent to:
1992 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
1994 static void
1995 convert_bfi_to_bfm (aarch64_inst *inst)
1997 int64_t lsb, width;
1999 /* Convert the operand. */
2000 lsb = inst->operands[2].imm.value;
2001 width = inst->operands[3].imm.value;
2002 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2004 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2005 inst->operands[3].imm.value = width - 1;
2007 else
2009 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2010 inst->operands[3].imm.value = width - 1;
2014 /* The instruction written:
2015 BFC <Xd>, #<lsb>, #<width>
2016 is equivalent to:
2017 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2019 static void
2020 convert_bfc_to_bfm (aarch64_inst *inst)
2022 int64_t lsb, width;
2024 /* Insert XZR. */
2025 copy_operand_info (inst, 3, 2);
2026 copy_operand_info (inst, 2, 1);
2027 copy_operand_info (inst, 1, 0);
2028 inst->operands[1].reg.regno = 0x1f;
2030 /* Convert the immediate operand. */
2031 lsb = inst->operands[2].imm.value;
2032 width = inst->operands[3].imm.value;
2033 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2035 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2036 inst->operands[3].imm.value = width - 1;
2038 else
2040 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2041 inst->operands[3].imm.value = width - 1;
2045 /* The instruction written:
2046 LSL <Xd>, <Xn>, #<shift>
2047 is equivalent to:
2048 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2050 static void
2051 convert_lsl_to_ubfm (aarch64_inst *inst)
2053 int64_t shift = inst->operands[2].imm.value;
2055 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2057 inst->operands[2].imm.value = (32 - shift) & 0x1f;
2058 inst->operands[3].imm.value = 31 - shift;
2060 else
2062 inst->operands[2].imm.value = (64 - shift) & 0x3f;
2063 inst->operands[3].imm.value = 63 - shift;
2067 /* CINC <Wd>, <Wn>, <cond>
2068 is equivalent to:
2069 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2071 static void
2072 convert_to_csel (aarch64_inst *inst)
2074 copy_operand_info (inst, 3, 2);
2075 copy_operand_info (inst, 2, 1);
2076 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2079 /* CSET <Wd>, <cond>
2080 is equivalent to:
2081 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2083 static void
2084 convert_cset_to_csinc (aarch64_inst *inst)
2086 copy_operand_info (inst, 3, 1);
2087 copy_operand_info (inst, 2, 0);
2088 copy_operand_info (inst, 1, 0);
2089 inst->operands[1].reg.regno = 0x1f;
2090 inst->operands[2].reg.regno = 0x1f;
2091 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2094 /* MOV <Wd>, #<imm>
2095 is equivalent to:
2096 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2098 static void
2099 convert_mov_to_movewide (aarch64_inst *inst)
2101 int is32;
2102 uint32_t shift_amount;
2103 uint64_t value = ~(uint64_t)0;
2105 switch (inst->opcode->op)
2107 case OP_MOV_IMM_WIDE:
2108 value = inst->operands[1].imm.value;
2109 break;
2110 case OP_MOV_IMM_WIDEN:
2111 value = ~inst->operands[1].imm.value;
2112 break;
2113 default:
2114 return;
2116 inst->operands[1].type = AARCH64_OPND_HALF;
2117 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2118 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
2119 /* The constraint check should have guaranteed this wouldn't happen. */
2120 return;
2121 value >>= shift_amount;
2122 value &= 0xffff;
2123 inst->operands[1].imm.value = value;
2124 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
2125 inst->operands[1].shifter.amount = shift_amount;
2128 /* MOV <Wd>, #<imm>
2129 is equivalent to:
2130 ORR <Wd>, WZR, #<imm>. */
2132 static void
2133 convert_mov_to_movebitmask (aarch64_inst *inst)
2135 copy_operand_info (inst, 2, 1);
2136 inst->operands[1].reg.regno = 0x1f;
2137 inst->operands[1].skip = 0;
2140 /* Some alias opcodes are assembled by being converted to their real-form. */
2142 static void
2143 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
2145 const aarch64_opcode *alias = inst->opcode;
2147 if ((alias->flags & F_CONV) == 0)
2148 goto convert_to_real_return;
2150 switch (alias->op)
2152 case OP_ASR_IMM:
2153 case OP_LSR_IMM:
2154 convert_sr_to_bfm (inst);
2155 break;
2156 case OP_LSL_IMM:
2157 convert_lsl_to_ubfm (inst);
2158 break;
2159 case OP_CINC:
2160 case OP_CINV:
2161 case OP_CNEG:
2162 convert_to_csel (inst);
2163 break;
2164 case OP_CSET:
2165 case OP_CSETM:
2166 convert_cset_to_csinc (inst);
2167 break;
2168 case OP_UBFX:
2169 case OP_BFXIL:
2170 case OP_SBFX:
2171 convert_bfx_to_bfm (inst);
2172 break;
2173 case OP_SBFIZ:
2174 case OP_BFI:
2175 case OP_UBFIZ:
2176 convert_bfi_to_bfm (inst);
2177 break;
2178 case OP_BFC:
2179 convert_bfc_to_bfm (inst);
2180 break;
2181 case OP_MOV_V:
2182 convert_mov_to_orr (inst);
2183 break;
2184 case OP_MOV_IMM_WIDE:
2185 case OP_MOV_IMM_WIDEN:
2186 convert_mov_to_movewide (inst);
2187 break;
2188 case OP_MOV_IMM_LOG:
2189 convert_mov_to_movebitmask (inst);
2190 break;
2191 case OP_ROR_IMM:
2192 convert_ror_to_extr (inst);
2193 break;
2194 case OP_SXTL:
2195 case OP_SXTL2:
2196 case OP_UXTL:
2197 case OP_UXTL2:
2198 convert_xtl_to_shll (inst);
2199 break;
2200 default:
2201 break;
2204 convert_to_real_return:
2205 aarch64_replace_opcode (inst, real);
2208 /* Encode *INST_ORI of the opcode code OPCODE.
2209 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2210 matched operand qualifier sequence in *QLF_SEQ. */
2212 bool
2213 aarch64_opcode_encode (const aarch64_opcode *opcode,
2214 const aarch64_inst *inst_ori, aarch64_insn *code,
2215 aarch64_opnd_qualifier_t *qlf_seq,
2216 aarch64_operand_error *mismatch_detail,
2217 aarch64_instr_sequence* insn_sequence)
2219 int i;
2220 const aarch64_opcode *aliased;
2221 aarch64_inst copy, *inst;
2223 DEBUG_TRACE ("enter with %s", opcode->name);
2225 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2226 copy = *inst_ori;
2227 inst = &copy;
2229 assert (inst->opcode == NULL || inst->opcode == opcode);
2230 if (inst->opcode == NULL)
2231 inst->opcode = opcode;
2233 /* Constrain the operands.
2234 After passing this, the encoding is guaranteed to succeed. */
2235 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2237 DEBUG_TRACE ("FAIL since operand constraint not met");
2238 return 0;
2241 /* Get the base value.
2242 Note: this has to be before the aliasing handling below in order to
2243 get the base value from the alias opcode before we move on to the
2244 aliased opcode for encoding. */
2245 inst->value = opcode->opcode;
2247 /* No need to do anything else if the opcode does not have any operand. */
2248 if (aarch64_num_of_operands (opcode) == 0)
2249 goto encoding_exit;
2251 /* Assign operand indexes and check types. Also put the matched
2252 operand qualifiers in *QLF_SEQ to return. */
2253 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2255 assert (opcode->operands[i] == inst->operands[i].type);
2256 inst->operands[i].idx = i;
2257 if (qlf_seq != NULL)
2258 *qlf_seq = inst->operands[i].qualifier;
2261 aliased = aarch64_find_real_opcode (opcode);
2262 /* If the opcode is an alias and it does not ask for direct encoding by
2263 itself, the instruction will be transformed to the form of real opcode
2264 and the encoding will be carried out using the rules for the aliased
2265 opcode. */
2266 if (aliased != NULL && (opcode->flags & F_CONV))
2268 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2269 aliased->name, opcode->name);
2270 /* Convert the operands to the form of the real opcode. */
2271 convert_to_real (inst, aliased);
2272 opcode = aliased;
2275 aarch64_opnd_info *info = inst->operands;
2277 /* Call the inserter of each operand. */
2278 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2280 const aarch64_operand *opnd;
2281 enum aarch64_opnd type = opcode->operands[i];
2282 if (type == AARCH64_OPND_NIL)
2283 break;
2284 if (info->skip)
2286 DEBUG_TRACE ("skip the incomplete operand %d", i);
2287 continue;
2289 opnd = &aarch64_operands[type];
2290 if (operand_has_inserter (opnd)
2291 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2292 mismatch_detail))
2293 return false;
2296 /* Call opcode encoders indicated by flags. */
2297 if (opcode_has_special_coder (opcode))
2298 do_special_encoding (inst);
2300 /* Possibly use the instruction class to encode the chosen qualifier
2301 variant. */
2302 aarch64_encode_variant_using_iclass (inst);
2304 /* Run a verifier if the instruction has one set. */
2305 if (opcode->verifier)
2307 enum err_type result = opcode->verifier (inst, *code, 0, true,
2308 mismatch_detail, insn_sequence);
2309 switch (result)
2311 case ERR_UND:
2312 case ERR_UNP:
2313 case ERR_NYI:
2314 return false;
2315 default:
2316 break;
2320 /* Always run constrain verifiers, this is needed because constrains need to
2321 maintain a global state. Regardless if the instruction has the flag set
2322 or not. */
2323 enum err_type result = verify_constraints (inst, *code, 0, true,
2324 mismatch_detail, insn_sequence);
2325 switch (result)
2327 case ERR_UND:
2328 case ERR_UNP:
2329 case ERR_NYI:
2330 return false;
2331 default:
2332 break;
2336 encoding_exit:
2337 DEBUG_TRACE ("exit with %s", opcode->name);
2339 *code = inst->value;
2341 return true;