testsuite: skip confirmation in 'gdb_reinitialize_dir'
[binutils-gdb.git] / opcodes / aarch64-asm.c
blobcd79ec19cdc2a26281541efe795460eb00ef3887
1 /* aarch64-asm.c -- AArch64 assembler support.
2 Copyright (C) 2012-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 #include "sysdep.h"
22 #include <stdarg.h>
23 #include "libiberty.h"
24 #include "aarch64-asm.h"
25 #include "opintl.h"
27 /* Utilities. */
29 /* The unnamed arguments consist of the number of fields and information about
30 these fields where the VALUE will be inserted into CODE. MASK can be zero or
31 the base mask of the opcode.
33 N.B. the fields are required to be in such an order than the least signficant
34 field for VALUE comes the first, e.g. the <index> in
35 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
36 is encoded in H:L:M in some cases, the fields H:L:M should be passed in
37 the order of M, L, H. */
39 static inline void
40 insert_fields (aarch64_insn *code, aarch64_insn value, aarch64_insn mask, ...)
42 uint32_t num;
43 const aarch64_field *field;
44 enum aarch64_field_kind kind;
45 va_list va;
47 va_start (va, mask);
48 num = va_arg (va, uint32_t);
49 assert (num <= 5);
50 while (num--)
52 kind = va_arg (va, enum aarch64_field_kind);
53 field = &fields[kind];
54 insert_field (kind, code, value, mask);
55 value >>= field->width;
57 va_end (va);
60 /* Insert a raw field value VALUE into all fields in SELF->fields after START.
61 The least significant bit goes in the final field. */
63 static void
64 insert_all_fields_after (const aarch64_operand *self, unsigned int start,
65 aarch64_insn *code, aarch64_insn value)
67 unsigned int i;
68 enum aarch64_field_kind kind;
70 for (i = ARRAY_SIZE (self->fields); i-- > start; )
71 if (self->fields[i] != FLD_NIL)
73 kind = self->fields[i];
74 insert_field (kind, code, value, 0);
75 value >>= fields[kind].width;
79 /* Insert a raw field value VALUE into all fields in SELF->fields.
80 The least significant bit goes in the final field. */
82 static void
83 insert_all_fields (const aarch64_operand *self, aarch64_insn *code,
84 aarch64_insn value)
86 return insert_all_fields_after (self, 0, code, value);
89 /* Operand inserters. */
91 /* Insert nothing. */
92 bool
93 aarch64_ins_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
94 const aarch64_opnd_info *info ATTRIBUTE_UNUSED,
95 aarch64_insn *code ATTRIBUTE_UNUSED,
96 const aarch64_inst *inst ATTRIBUTE_UNUSED,
97 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
99 return true;
102 /* Insert register number. */
103 bool
104 aarch64_ins_regno (const aarch64_operand *self, const aarch64_opnd_info *info,
105 aarch64_insn *code,
106 const aarch64_inst *inst ATTRIBUTE_UNUSED,
107 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
109 int val = info->reg.regno - get_operand_specific_data (self);
110 insert_field (self->fields[0], code, val, 0);
111 return true;
114 /* Insert register number, index and/or other data for SIMD register element
115 operand, e.g. the last source operand in
116 SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
117 bool
118 aarch64_ins_reglane (const aarch64_operand *self, const aarch64_opnd_info *info,
119 aarch64_insn *code, const aarch64_inst *inst,
120 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
122 /* regno */
123 insert_field (self->fields[0], code, info->reglane.regno, inst->opcode->mask);
124 /* index and/or type */
125 if (inst->opcode->iclass == asisdone || inst->opcode->iclass == asimdins)
127 int pos = info->qualifier - AARCH64_OPND_QLF_S_B;
128 if (info->type == AARCH64_OPND_En
129 && inst->opcode->operands[0] == AARCH64_OPND_Ed)
131 /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>]. */
132 assert (info->idx == 1); /* Vn */
133 aarch64_insn value = info->reglane.index << pos;
134 insert_field (FLD_imm4_11, code, value, 0);
136 else
138 /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
139 imm5<3:0> <V>
140 0000 RESERVED
141 xxx1 B
142 xx10 H
143 x100 S
144 1000 D */
145 aarch64_insn value = ((info->reglane.index << 1) | 1) << pos;
146 insert_field (FLD_imm5, code, value, 0);
149 else if (inst->opcode->iclass == dotproduct)
151 unsigned reglane_index = info->reglane.index;
152 switch (info->qualifier)
154 case AARCH64_OPND_QLF_S_4B:
155 case AARCH64_OPND_QLF_S_2H:
156 /* H:L */
157 assert (reglane_index < 4);
158 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
159 break;
160 case AARCH64_OPND_QLF_S_2B:
161 /* H:L:M */
162 assert (reglane_index < 8);
163 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
164 break;
165 default:
166 return false;
169 else if (inst->opcode->iclass == cryptosm3)
171 /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>]. */
172 unsigned reglane_index = info->reglane.index;
173 assert (reglane_index < 4);
174 insert_field (FLD_SM3_imm2, code, reglane_index, 0);
176 else
178 /* index for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
179 or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]. */
180 unsigned reglane_index = info->reglane.index;
182 if (inst->opcode->op == OP_FCMLA_ELEM)
183 /* Complex operand takes two elements. */
184 reglane_index *= 2;
186 switch (info->qualifier)
188 case AARCH64_OPND_QLF_S_B:
189 /* H:imm3 */
190 assert (reglane_index < 16);
191 insert_fields (code, reglane_index, 0, 2, FLD_imm3_19, FLD_H);
192 break;
193 case AARCH64_OPND_QLF_S_H:
194 /* H:L:M */
195 assert (reglane_index < 8);
196 insert_fields (code, reglane_index, 0, 3, FLD_M, FLD_L, FLD_H);
197 break;
198 case AARCH64_OPND_QLF_S_S:
199 /* H:L */
200 assert (reglane_index < 4);
201 insert_fields (code, reglane_index, 0, 2, FLD_L, FLD_H);
202 break;
203 case AARCH64_OPND_QLF_S_D:
204 /* H */
205 assert (reglane_index < 2);
206 insert_field (FLD_H, code, reglane_index, 0);
207 break;
208 default:
209 return false;
212 return true;
215 /* Insert regno and len field of a register list operand, e.g. Vn in TBL. */
216 bool
217 aarch64_ins_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
218 aarch64_insn *code,
219 const aarch64_inst *inst ATTRIBUTE_UNUSED,
220 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
222 /* R */
223 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
224 /* len */
225 insert_field (FLD_len, code, info->reglist.num_regs - 1, 0);
226 return true;
229 /* Insert Rt and opcode fields for a register list operand, e.g. Vt
230 in AdvSIMD load/store instructions. */
231 bool
232 aarch64_ins_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
233 const aarch64_opnd_info *info, aarch64_insn *code,
234 const aarch64_inst *inst,
235 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
237 aarch64_insn value = 0;
238 /* Number of elements in each structure to be loaded/stored. */
239 unsigned num = get_opcode_dependent_value (inst->opcode);
241 /* Rt */
242 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
243 /* opcode */
244 switch (num)
246 case 1:
247 switch (info->reglist.num_regs)
249 case 1: value = 0x7; break;
250 case 2: value = 0xa; break;
251 case 3: value = 0x6; break;
252 case 4: value = 0x2; break;
253 default: return false;
255 break;
256 case 2:
257 value = info->reglist.num_regs == 4 ? 0x3 : 0x8;
258 break;
259 case 3:
260 value = 0x4;
261 break;
262 case 4:
263 value = 0x0;
264 break;
265 default:
266 return false;
268 insert_field (FLD_opcode, code, value, 0);
270 return true;
273 /* Insert Rt and S fields for a register list operand, e.g. Vt in AdvSIMD load
274 single structure to all lanes instructions. */
275 bool
276 aarch64_ins_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
277 const aarch64_opnd_info *info, aarch64_insn *code,
278 const aarch64_inst *inst,
279 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
281 aarch64_insn value;
282 /* The opcode dependent area stores the number of elements in
283 each structure to be loaded/stored. */
284 int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
286 /* Rt */
287 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
288 /* S */
289 value = (aarch64_insn) 0;
290 if (is_ld1r && info->reglist.num_regs == 2)
291 /* OP_LD1R does not have alternating variant, but have "two consecutive"
292 instead. */
293 value = (aarch64_insn) 1;
294 insert_field (FLD_S, code, value, 0);
296 return true;
299 /* Insert regnos of register list operand for AdvSIMD lut instructions. */
300 bool
301 aarch64_ins_lut_reglist (const aarch64_operand *self, const aarch64_opnd_info *info,
302 aarch64_insn *code,
303 const aarch64_inst *inst ATTRIBUTE_UNUSED,
304 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
306 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
307 return true;
310 /* Insert Q, opcode<2:1>, S, size and Rt fields for a register element list
311 operand e.g. Vt in AdvSIMD load/store single element instructions. */
312 bool
313 aarch64_ins_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
314 const aarch64_opnd_info *info, aarch64_insn *code,
315 const aarch64_inst *inst ATTRIBUTE_UNUSED,
316 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
318 aarch64_field field = {0, 0};
319 aarch64_insn QSsize = 0; /* fields Q:S:size. */
320 aarch64_insn opcodeh2 = 0; /* opcode<2:1> */
322 assert (info->reglist.has_index);
324 /* Rt */
325 insert_field (FLD_Rt, code, info->reglist.first_regno, 0);
326 /* Encode the index, opcode<2:1> and size. */
327 switch (info->qualifier)
329 case AARCH64_OPND_QLF_S_B:
330 /* Index encoded in "Q:S:size". */
331 QSsize = info->reglist.index;
332 opcodeh2 = 0x0;
333 break;
334 case AARCH64_OPND_QLF_S_H:
335 /* Index encoded in "Q:S:size<1>". */
336 QSsize = info->reglist.index << 1;
337 opcodeh2 = 0x1;
338 break;
339 case AARCH64_OPND_QLF_S_S:
340 /* Index encoded in "Q:S". */
341 QSsize = info->reglist.index << 2;
342 opcodeh2 = 0x2;
343 break;
344 case AARCH64_OPND_QLF_S_D:
345 /* Index encoded in "Q". */
346 QSsize = info->reglist.index << 3 | 0x1;
347 opcodeh2 = 0x2;
348 break;
349 default:
350 return false;
352 insert_fields (code, QSsize, 0, 3, FLD_vldst_size, FLD_S, FLD_Q);
353 gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
354 insert_field_2 (&field, code, opcodeh2, 0);
356 return true;
359 /* Insert fields immh:immb and/or Q for e.g. the shift immediate in
360 SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
361 or SSHR <V><d>, <V><n>, #<shift>. */
362 bool
363 aarch64_ins_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
364 const aarch64_opnd_info *info,
365 aarch64_insn *code, const aarch64_inst *inst,
366 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
368 unsigned val = aarch64_get_qualifier_standard_value (info->qualifier);
369 aarch64_insn Q, imm;
371 if (inst->opcode->iclass == asimdshf)
373 /* Q
374 immh Q <T>
375 0000 x SEE AdvSIMD modified immediate
376 0001 0 8B
377 0001 1 16B
378 001x 0 4H
379 001x 1 8H
380 01xx 0 2S
381 01xx 1 4S
382 1xxx 0 RESERVED
383 1xxx 1 2D */
384 Q = (val & 0x1) ? 1 : 0;
385 insert_field (FLD_Q, code, Q, inst->opcode->mask);
386 val >>= 1;
389 assert (info->type == AARCH64_OPND_IMM_VLSR
390 || info->type == AARCH64_OPND_IMM_VLSL);
392 if (info->type == AARCH64_OPND_IMM_VLSR)
393 /* immh:immb
394 immh <shift>
395 0000 SEE AdvSIMD modified immediate
396 0001 (16-UInt(immh:immb))
397 001x (32-UInt(immh:immb))
398 01xx (64-UInt(immh:immb))
399 1xxx (128-UInt(immh:immb)) */
400 imm = (16 << (unsigned)val) - info->imm.value;
401 else
402 /* immh:immb
403 immh <shift>
404 0000 SEE AdvSIMD modified immediate
405 0001 (UInt(immh:immb)-8)
406 001x (UInt(immh:immb)-16)
407 01xx (UInt(immh:immb)-32)
408 1xxx (UInt(immh:immb)-64) */
409 imm = info->imm.value + (8 << (unsigned)val);
410 insert_fields (code, imm, 0, 2, FLD_immb, FLD_immh);
412 return true;
415 /* Insert fields for e.g. the immediate operands in
416 BFM <Wd>, <Wn>, #<immr>, #<imms>. */
417 bool
418 aarch64_ins_imm (const aarch64_operand *self, const aarch64_opnd_info *info,
419 aarch64_insn *code,
420 const aarch64_inst *inst ATTRIBUTE_UNUSED,
421 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
423 int64_t imm;
425 imm = info->imm.value;
426 if (operand_need_shift_by_two (self))
427 imm >>= 2;
428 if (operand_need_shift_by_three (self))
429 imm >>= 3;
430 if (operand_need_shift_by_four (self))
431 imm >>= 4;
432 insert_all_fields (self, code, imm);
433 return true;
436 /* Insert immediate and its shift amount for e.g. the last operand in
437 MOVZ <Wd>, #<imm16>{, LSL #<shift>}. */
438 bool
439 aarch64_ins_imm_half (const aarch64_operand *self, const aarch64_opnd_info *info,
440 aarch64_insn *code, const aarch64_inst *inst,
441 aarch64_operand_error *errors)
443 /* imm16 */
444 aarch64_ins_imm (self, info, code, inst, errors);
445 /* hw */
446 insert_field (FLD_hw, code, info->shifter.amount >> 4, 0);
447 return true;
450 /* Insert cmode and "a:b:c:d:e:f:g:h" fields for e.g. the last operand in
451 MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}. */
452 bool
453 aarch64_ins_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
454 const aarch64_opnd_info *info,
455 aarch64_insn *code,
456 const aarch64_inst *inst ATTRIBUTE_UNUSED,
457 aarch64_operand_error *errors
458 ATTRIBUTE_UNUSED)
460 enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
461 uint64_t imm = info->imm.value;
462 enum aarch64_modifier_kind kind = info->shifter.kind;
463 int amount = info->shifter.amount;
464 aarch64_field field = {0, 0};
466 /* a:b:c:d:e:f:g:h */
467 if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
469 /* Either MOVI <Dd>, #<imm>
470 or MOVI <Vd>.2D, #<imm>.
471 <imm> is a 64-bit immediate
472 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
473 encoded in "a:b:c:d:e:f:g:h". */
474 imm = aarch64_shrink_expanded_imm8 (imm);
475 assert ((int)imm >= 0);
477 insert_fields (code, imm, 0, 2, FLD_defgh, FLD_abc);
479 if (kind == AARCH64_MOD_NONE)
480 return true;
482 /* shift amount partially in cmode */
483 assert (kind == AARCH64_MOD_LSL || kind == AARCH64_MOD_MSL);
484 if (kind == AARCH64_MOD_LSL)
486 /* AARCH64_MOD_LSL: shift zeros. */
487 int esize = aarch64_get_qualifier_esize (opnd0_qualifier);
488 assert (esize == 4 || esize == 2 || esize == 1);
489 /* For 8-bit move immediate, the optional LSL #0 does not require
490 encoding. */
491 if (esize == 1)
492 return true;
493 amount >>= 3;
494 if (esize == 4)
495 gen_sub_field (FLD_cmode, 1, 2, &field); /* per word */
496 else
497 gen_sub_field (FLD_cmode, 1, 1, &field); /* per halfword */
499 else
501 /* AARCH64_MOD_MSL: shift ones. */
502 amount >>= 4;
503 gen_sub_field (FLD_cmode, 0, 1, &field); /* per word */
505 insert_field_2 (&field, code, amount, 0);
507 return true;
510 /* Insert fields for an 8-bit floating-point immediate. */
511 bool
512 aarch64_ins_fpimm (const aarch64_operand *self, const aarch64_opnd_info *info,
513 aarch64_insn *code,
514 const aarch64_inst *inst ATTRIBUTE_UNUSED,
515 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
517 insert_all_fields (self, code, info->imm.value);
518 return true;
521 /* Insert 1-bit rotation immediate (#90 or #270). */
522 bool
523 aarch64_ins_imm_rotate1 (const aarch64_operand *self,
524 const aarch64_opnd_info *info,
525 aarch64_insn *code, const aarch64_inst *inst,
526 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
528 uint64_t rot = (info->imm.value - 90) / 180;
529 assert (rot < 2U);
530 insert_field (self->fields[0], code, rot, inst->opcode->mask);
531 return true;
534 /* Insert 2-bit rotation immediate (#0, #90, #180 or #270). */
535 bool
536 aarch64_ins_imm_rotate2 (const aarch64_operand *self,
537 const aarch64_opnd_info *info,
538 aarch64_insn *code, const aarch64_inst *inst,
539 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
541 uint64_t rot = info->imm.value / 90;
542 assert (rot < 4U);
543 insert_field (self->fields[0], code, rot, inst->opcode->mask);
544 return true;
547 /* Insert #<fbits> for the immediate operand in fp fix-point instructions,
548 e.g. SCVTF <Dd>, <Wn>, #<fbits>. */
549 bool
550 aarch64_ins_fbits (const aarch64_operand *self, const aarch64_opnd_info *info,
551 aarch64_insn *code,
552 const aarch64_inst *inst ATTRIBUTE_UNUSED,
553 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
555 insert_field (self->fields[0], code, 64 - info->imm.value, 0);
556 return true;
559 /* Insert arithmetic immediate for e.g. the last operand in
560 SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}. */
561 bool
562 aarch64_ins_aimm (const aarch64_operand *self, const aarch64_opnd_info *info,
563 aarch64_insn *code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
564 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
566 /* shift */
567 aarch64_insn value = info->shifter.amount ? 1 : 0;
568 insert_field (self->fields[0], code, value, 0);
569 /* imm12 (unsigned) */
570 insert_field (self->fields[1], code, info->imm.value, 0);
571 return true;
574 /* Common routine shared by aarch64_ins{,_inv}_limm. INVERT_P says whether
575 the operand should be inverted before encoding. */
576 static bool
577 aarch64_ins_limm_1 (const aarch64_operand *self,
578 const aarch64_opnd_info *info, aarch64_insn *code,
579 const aarch64_inst *inst, bool invert_p,
580 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582 bool res;
583 aarch64_insn value;
584 uint64_t imm = info->imm.value;
585 int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
587 if (invert_p)
588 imm = ~imm;
589 /* The constraint check should guarantee that this will work. */
590 res = aarch64_logical_immediate_p (imm, esize, &value);
591 if (res)
592 insert_fields (code, value, 0, 3, self->fields[2], self->fields[1],
593 self->fields[0]);
594 return res;
597 /* Insert logical/bitmask immediate for e.g. the last operand in
598 ORR <Wd|WSP>, <Wn>, #<imm>. */
599 bool
600 aarch64_ins_limm (const aarch64_operand *self, const aarch64_opnd_info *info,
601 aarch64_insn *code, const aarch64_inst *inst,
602 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
604 return aarch64_ins_limm_1 (self, info, code, inst,
605 inst->opcode->op == OP_BIC, errors);
608 /* Insert a logical/bitmask immediate for the BIC alias of AND (etc.). */
609 bool
610 aarch64_ins_inv_limm (const aarch64_operand *self,
611 const aarch64_opnd_info *info, aarch64_insn *code,
612 const aarch64_inst *inst,
613 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
615 return aarch64_ins_limm_1 (self, info, code, inst, true, errors);
618 /* Encode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
619 or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>. */
620 bool
621 aarch64_ins_ft (const aarch64_operand *self, const aarch64_opnd_info *info,
622 aarch64_insn *code, const aarch64_inst *inst,
623 aarch64_operand_error *errors)
625 aarch64_insn value = 0;
627 assert (info->idx == 0);
629 /* Rt */
630 aarch64_ins_regno (self, info, code, inst, errors);
631 if (inst->opcode->iclass == ldstpair_indexed
632 || inst->opcode->iclass == ldstnapair_offs
633 || inst->opcode->iclass == ldstpair_off
634 || inst->opcode->iclass == loadlit)
636 /* size */
637 switch (info->qualifier)
639 case AARCH64_OPND_QLF_S_S: value = 0; break;
640 case AARCH64_OPND_QLF_S_D: value = 1; break;
641 case AARCH64_OPND_QLF_S_Q: value = 2; break;
642 default: return false;
644 insert_field (FLD_ldst_size, code, value, 0);
646 else
648 /* opc[1]:size */
649 value = aarch64_get_qualifier_standard_value (info->qualifier);
650 insert_fields (code, value, 0, 2, FLD_ldst_size, FLD_opc1);
653 return true;
656 /* Encode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}]. */
657 bool
658 aarch64_ins_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
659 const aarch64_opnd_info *info, aarch64_insn *code,
660 const aarch64_inst *inst ATTRIBUTE_UNUSED,
661 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
663 /* Rn */
664 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
665 return true;
668 /* Encode the address operand for e.g.
669 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
670 bool
671 aarch64_ins_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
672 const aarch64_opnd_info *info, aarch64_insn *code,
673 const aarch64_inst *inst ATTRIBUTE_UNUSED,
674 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
676 aarch64_insn S;
677 enum aarch64_modifier_kind kind = info->shifter.kind;
679 /* Rn */
680 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
681 /* Rm */
682 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
683 /* option */
684 if (kind == AARCH64_MOD_LSL)
685 kind = AARCH64_MOD_UXTX; /* Trick to enable the table-driven. */
686 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
687 /* S */
688 if (info->qualifier != AARCH64_OPND_QLF_S_B)
689 S = info->shifter.amount != 0;
690 else
691 /* For STR <Bt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}},
692 S <amount>
693 0 [absent]
694 1 #0
695 Must be #0 if <extend> is explicitly LSL. */
696 S = info->shifter.operator_present && info->shifter.amount_present;
697 insert_field (FLD_S, code, S, 0);
699 return true;
702 /* Encode the address operand for e.g.
703 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
704 bool
705 aarch64_ins_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
706 const aarch64_opnd_info *info, aarch64_insn *code,
707 const aarch64_inst *inst ATTRIBUTE_UNUSED,
708 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
710 /* Rn */
711 insert_field (self->fields[0], code, info->addr.base_regno, 0);
713 /* simm9 */
714 int imm = info->addr.offset.imm;
715 insert_field (self->fields[1], code, imm, 0);
717 /* writeback */
718 if (info->addr.writeback)
720 assert (info->addr.preind == 1 && info->addr.postind == 0);
721 insert_field (self->fields[2], code, 1, 0);
723 return true;
726 /* Encode the address operand for e.g.
727 stlur <Xt>, [<Xn|SP>{, <amount>}]. */
728 bool
729 aarch64_ins_rcpc3_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
730 const aarch64_opnd_info *info, aarch64_insn *code,
731 const aarch64_inst *inst ATTRIBUTE_UNUSED,
732 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
734 /* Rn */
735 insert_field (self->fields[0], code, info->addr.base_regno, 0);
737 /* simm9 */
738 int imm = info->addr.offset.imm;
739 insert_field (self->fields[1], code, imm, 0);
741 return true;
744 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>, #<simm>]!. */
745 bool
746 aarch64_ins_addr_simm (const aarch64_operand *self,
747 const aarch64_opnd_info *info,
748 aarch64_insn *code,
749 const aarch64_inst *inst ATTRIBUTE_UNUSED,
750 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
752 int imm;
754 /* Rn */
755 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
756 /* simm (imm9 or imm7) */
757 imm = info->addr.offset.imm;
758 if (self->fields[0] == FLD_imm7
759 || info->qualifier == AARCH64_OPND_QLF_imm_tag)
760 /* scaled immediate in ld/st pair instructions.. */
761 imm >>= get_logsz (aarch64_get_qualifier_esize (info->qualifier));
762 insert_field (self->fields[0], code, imm, 0);
763 /* pre/post- index */
764 if (info->addr.writeback)
766 assert (inst->opcode->iclass != ldst_unscaled
767 && inst->opcode->iclass != ldstnapair_offs
768 && inst->opcode->iclass != ldstpair_off
769 && inst->opcode->iclass != ldst_unpriv);
770 assert (info->addr.preind != info->addr.postind);
771 if (info->addr.preind)
772 insert_field (self->fields[1], code, 1, 0);
775 return true;
778 /* Encode the address operand, potentially offset by the load/store ammount,
779 e.g. LDIAPP <Xt>, <Xt2> [<Xn|SP>, #<simm>]
780 and STILP <Xt>, <Xt2> [<Xn|SP>], #<simm>.*/
781 bool
782 aarch64_ins_rcpc3_addr_opt_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
783 const aarch64_opnd_info *info,
784 aarch64_insn *code,
785 const aarch64_inst *inst ATTRIBUTE_UNUSED,
786 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
788 int imm;
790 /* Rn */
791 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
792 /* simm */
793 imm = info->addr.offset.imm;
794 if (!imm)
795 insert_field (FLD_opc2, code, 1, 0);
797 return true;
800 /* Encode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}]. */
801 bool
802 aarch64_ins_addr_simm10 (const aarch64_operand *self,
803 const aarch64_opnd_info *info,
804 aarch64_insn *code,
805 const aarch64_inst *inst ATTRIBUTE_UNUSED,
806 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
808 int imm;
810 /* Rn */
811 insert_field (self->fields[0], code, info->addr.base_regno, 0);
812 /* simm10 */
813 imm = info->addr.offset.imm >> 3;
814 insert_field (self->fields[1], code, imm >> 9, 0);
815 insert_field (self->fields[2], code, imm, 0);
816 /* writeback */
817 if (info->addr.writeback)
819 assert (info->addr.preind == 1 && info->addr.postind == 0);
820 insert_field (self->fields[3], code, 1, 0);
822 return true;
825 /* Encode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<pimm>}]. */
826 bool
827 aarch64_ins_addr_uimm12 (const aarch64_operand *self,
828 const aarch64_opnd_info *info,
829 aarch64_insn *code,
830 const aarch64_inst *inst ATTRIBUTE_UNUSED,
831 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
833 int shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
835 /* Rn */
836 insert_field (self->fields[0], code, info->addr.base_regno, 0);
837 /* uimm12 */
838 insert_field (self->fields[1], code,info->addr.offset.imm >> shift, 0);
839 return true;
842 /* Encode the address operand for e.g.
843 LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>. */
844 bool
845 aarch64_ins_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
846 const aarch64_opnd_info *info, aarch64_insn *code,
847 const aarch64_inst *inst ATTRIBUTE_UNUSED,
848 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
850 /* Rn */
851 insert_field (FLD_Rn, code, info->addr.base_regno, 0);
852 /* Rm | #<amount> */
853 if (info->addr.offset.is_reg)
854 insert_field (FLD_Rm, code, info->addr.offset.regno, 0);
855 else
856 insert_field (FLD_Rm, code, 0x1f, 0);
857 return true;
860 /* Encode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>. */
861 bool
862 aarch64_ins_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
863 const aarch64_opnd_info *info, aarch64_insn *code,
864 const aarch64_inst *inst ATTRIBUTE_UNUSED,
865 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
867 /* cond */
868 insert_field (FLD_cond, code, info->cond->value, 0);
869 return true;
872 /* Encode the system register operand for e.g. MRS <Xt>, <systemreg>. */
873 bool
874 aarch64_ins_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
875 const aarch64_opnd_info *info, aarch64_insn *code,
876 const aarch64_inst *inst,
877 aarch64_operand_error *detail ATTRIBUTE_UNUSED)
879 /* If a system instruction check if we have any restrictions on which
880 registers it can use. */
881 if (inst->opcode->iclass == ic_system)
883 uint64_t opcode_flags
884 = inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE);
885 uint32_t sysreg_flags
886 = info->sysreg.flags & (F_REG_READ | F_REG_WRITE);
888 /* Check to see if it's read-only, else check if it's write only.
889 if it's both or unspecified don't care. */
890 if (opcode_flags == F_SYS_READ
891 && sysreg_flags
892 && sysreg_flags != F_REG_READ)
894 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
895 detail->error = _("specified register cannot be read from");
896 detail->index = info->idx;
897 detail->non_fatal = true;
899 else if (opcode_flags == F_SYS_WRITE
900 && sysreg_flags
901 && sysreg_flags != F_REG_WRITE)
903 detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
904 detail->error = _("specified register cannot be written to");
905 detail->index = info->idx;
906 detail->non_fatal = true;
909 /* op0:op1:CRn:CRm:op2 */
910 insert_fields (code, info->sysreg.value, inst->opcode->mask, 5,
911 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1, FLD_op0);
912 return true;
915 /* Encode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>. */
916 bool
917 aarch64_ins_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
918 const aarch64_opnd_info *info, aarch64_insn *code,
919 const aarch64_inst *inst ATTRIBUTE_UNUSED,
920 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
922 /* op1:op2 */
923 insert_fields (code, info->pstatefield, inst->opcode->mask, 2,
924 FLD_op2, FLD_op1);
926 /* Extra CRm mask. */
927 if (info->sysreg.flags | F_REG_IN_CRM)
928 insert_field (FLD_CRm, code, PSTATE_DECODE_CRM (info->sysreg.flags), 0);
929 return true;
932 /* Encode the system instruction op operand for e.g. AT <at_op>, <Xt>. */
933 bool
934 aarch64_ins_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
935 const aarch64_opnd_info *info, aarch64_insn *code,
936 const aarch64_inst *inst ATTRIBUTE_UNUSED,
937 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
939 /* op1:CRn:CRm:op2 */
940 insert_fields (code, info->sysins_op->value, inst->opcode->mask, 4,
941 FLD_op2, FLD_CRm, FLD_CRn, FLD_op1);
942 return true;
945 /* Encode the memory barrier option operand for e.g. DMB <option>|#<imm>. */
947 bool
948 aarch64_ins_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
949 const aarch64_opnd_info *info, aarch64_insn *code,
950 const aarch64_inst *inst ATTRIBUTE_UNUSED,
951 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
953 /* CRm */
954 insert_field (FLD_CRm, code, info->barrier->value, 0);
955 return true;
958 /* Encode the memory barrier option operand for DSB <option>nXS|#<imm>. */
960 bool
961 aarch64_ins_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
962 const aarch64_opnd_info *info, aarch64_insn *code,
963 const aarch64_inst *inst ATTRIBUTE_UNUSED,
964 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
966 /* For the DSB nXS barrier variant: is a 5-bit unsigned immediate,
967 encoded in CRm<3:2>. */
968 aarch64_insn value = (info->barrier->value >> 2) - 4;
969 insert_field (FLD_CRm_dsb_nxs, code, value, 0);
970 return true;
973 /* Encode the prefetch operation option operand for e.g.
974 PRFM <prfop>, [<Xn|SP>{, #<pimm>}]. */
976 bool
977 aarch64_ins_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
978 const aarch64_opnd_info *info, aarch64_insn *code,
979 const aarch64_inst *inst ATTRIBUTE_UNUSED,
980 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
982 /* prfop in Rt */
983 insert_field (FLD_Rt, code, info->prfop->value, 0);
984 return true;
987 /* Encode the hint number for instructions that alias HINT but take an
988 operand. */
990 bool
991 aarch64_ins_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
992 const aarch64_opnd_info *info, aarch64_insn *code,
993 const aarch64_inst *inst ATTRIBUTE_UNUSED,
994 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
996 /* CRm:op2. */
997 insert_fields (code, info->hint_option->value, 0, 2, FLD_op2, FLD_CRm);
998 return true;
1001 /* Encode the extended register operand for e.g.
1002 STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
1003 bool
1004 aarch64_ins_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1005 const aarch64_opnd_info *info, aarch64_insn *code,
1006 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1007 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1009 enum aarch64_modifier_kind kind;
1011 /* Rm */
1012 insert_field (FLD_Rm, code, info->reg.regno, 0);
1013 /* option */
1014 kind = info->shifter.kind;
1015 if (kind == AARCH64_MOD_LSL)
1016 kind = info->qualifier == AARCH64_OPND_QLF_W
1017 ? AARCH64_MOD_UXTW : AARCH64_MOD_UXTX;
1018 insert_field (FLD_option, code, aarch64_get_operand_modifier_value (kind), 0);
1019 /* imm3 */
1020 insert_field (FLD_imm3_10, code, info->shifter.amount, 0);
1022 return true;
1025 /* Encode the shifted register operand for e.g.
1026 SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}. */
1027 bool
1028 aarch64_ins_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1029 const aarch64_opnd_info *info, aarch64_insn *code,
1030 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1031 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1033 /* Rm */
1034 insert_field (FLD_Rm, code, info->reg.regno, 0);
1035 /* shift */
1036 insert_field (FLD_shift, code,
1037 aarch64_get_operand_modifier_value (info->shifter.kind), 0);
1038 /* imm6 */
1039 insert_field (FLD_imm6_10, code, info->shifter.amount, 0);
1041 return true;
1044 /* Encode the LSL-shifted register operand for e.g.
1045 ADDPT <Xd|SP>, <Xn|SP>, <Xm>{, LSL #<amount>}. */
1046 bool
1047 aarch64_ins_reg_lsl_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1048 const aarch64_opnd_info *info, aarch64_insn *code,
1049 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1050 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1052 /* Rm */
1053 insert_field (FLD_Rm, code, info->reg.regno, 0);
1054 /* imm3 */
1055 insert_field (FLD_imm3_10, code, info->shifter.amount, 0);
1056 return true;
1059 /* Encode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1060 where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1061 SELF's operand-dependent value. fields[0] specifies the field that
1062 holds <base>. <simm4> is encoded in the SVE_imm4 field. */
1063 bool
1064 aarch64_ins_sve_addr_ri_s4xvl (const aarch64_operand *self,
1065 const aarch64_opnd_info *info,
1066 aarch64_insn *code,
1067 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1068 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1070 int factor = 1 + get_operand_specific_data (self);
1071 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1072 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1073 return true;
1076 /* Encode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1077 where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1078 SELF's operand-dependent value. fields[0] specifies the field that
1079 holds <base>. <simm6> is encoded in the SVE_imm6 field. */
1080 bool
1081 aarch64_ins_sve_addr_ri_s6xvl (const aarch64_operand *self,
1082 const aarch64_opnd_info *info,
1083 aarch64_insn *code,
1084 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1085 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1087 int factor = 1 + get_operand_specific_data (self);
1088 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1089 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1090 return true;
1093 /* Encode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1094 where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1095 SELF's operand-dependent value. fields[0] specifies the field that
1096 holds <base>. <simm9> is encoded in the concatenation of the SVE_imm6
1097 and imm3 fields, with imm3 being the less-significant part. */
1098 bool
1099 aarch64_ins_sve_addr_ri_s9xvl (const aarch64_operand *self,
1100 const aarch64_opnd_info *info,
1101 aarch64_insn *code,
1102 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1103 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1105 int factor = 1 + get_operand_specific_data (self);
1106 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1107 insert_fields (code, info->addr.offset.imm / factor, 0,
1108 2, FLD_imm3_10, FLD_SVE_imm6);
1109 return true;
1112 /* Encode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1113 is a 4-bit signed number and where <shift> is SELF's operand-dependent
1114 value. fields[0] specifies the base register field. */
1115 bool
1116 aarch64_ins_sve_addr_ri_s4 (const aarch64_operand *self,
1117 const aarch64_opnd_info *info, aarch64_insn *code,
1118 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1119 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1121 int factor = 1 << get_operand_specific_data (self);
1122 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1123 insert_field (FLD_SVE_imm4, code, info->addr.offset.imm / factor, 0);
1124 return true;
1127 /* Encode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1128 is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1129 value. fields[0] specifies the base register field. */
1130 bool
1131 aarch64_ins_sve_addr_ri_u6 (const aarch64_operand *self,
1132 const aarch64_opnd_info *info, aarch64_insn *code,
1133 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1134 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1136 int factor = 1 << get_operand_specific_data (self);
1137 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1138 insert_field (FLD_SVE_imm6, code, info->addr.offset.imm / factor, 0);
1139 return true;
1142 /* Encode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1143 is SELF's operand-dependent value. fields[0] specifies the base
1144 register field and fields[1] specifies the offset register field. */
1145 bool
1146 aarch64_ins_sve_addr_rr_lsl (const aarch64_operand *self,
1147 const aarch64_opnd_info *info, aarch64_insn *code,
1148 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1149 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1151 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1152 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1153 return true;
1156 /* Encode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1157 <shift> is SELF's operand-dependent value. fields[0] specifies the
1158 base register field, fields[1] specifies the offset register field and
1159 fields[2] is a single-bit field that selects SXTW over UXTW. */
1160 bool
1161 aarch64_ins_sve_addr_rz_xtw (const aarch64_operand *self,
1162 const aarch64_opnd_info *info, aarch64_insn *code,
1163 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1164 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1166 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1167 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1168 if (info->shifter.kind == AARCH64_MOD_UXTW)
1169 insert_field (self->fields[2], code, 0, 0);
1170 else
1171 insert_field (self->fields[2], code, 1, 0);
1172 return true;
1175 /* Encode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1176 5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1177 fields[0] specifies the base register field. */
1178 bool
1179 aarch64_ins_sve_addr_zi_u5 (const aarch64_operand *self,
1180 const aarch64_opnd_info *info, aarch64_insn *code,
1181 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1182 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1184 int factor = 1 << get_operand_specific_data (self);
1185 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1186 insert_field (FLD_imm5, code, info->addr.offset.imm / factor, 0);
1187 return true;
1190 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1191 where <modifier> is fixed by the instruction and where <msz> is a
1192 2-bit unsigned number. fields[0] specifies the base register field
1193 and fields[1] specifies the offset register field. */
1194 static bool
1195 aarch64_ext_sve_addr_zz (const aarch64_operand *self,
1196 const aarch64_opnd_info *info, aarch64_insn *code,
1197 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1199 insert_field (self->fields[0], code, info->addr.base_regno, 0);
1200 insert_field (self->fields[1], code, info->addr.offset.regno, 0);
1201 insert_field (FLD_SVE_msz, code, info->shifter.amount, 0);
1202 return true;
1205 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1206 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1207 field and fields[1] specifies the offset register field. */
1208 bool
1209 aarch64_ins_sve_addr_zz_lsl (const aarch64_operand *self,
1210 const aarch64_opnd_info *info, aarch64_insn *code,
1211 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1212 aarch64_operand_error *errors)
1214 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1217 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1218 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1219 field and fields[1] specifies the offset register field. */
1220 bool
1221 aarch64_ins_sve_addr_zz_sxtw (const aarch64_operand *self,
1222 const aarch64_opnd_info *info,
1223 aarch64_insn *code,
1224 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1225 aarch64_operand_error *errors)
1227 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1230 /* Encode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1231 <msz> is a 2-bit unsigned number. fields[0] specifies the base register
1232 field and fields[1] specifies the offset register field. */
1233 bool
1234 aarch64_ins_sve_addr_zz_uxtw (const aarch64_operand *self,
1235 const aarch64_opnd_info *info,
1236 aarch64_insn *code,
1237 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1238 aarch64_operand_error *errors)
1240 return aarch64_ext_sve_addr_zz (self, info, code, errors);
1243 /* Encode an SVE ADD/SUB immediate. */
1244 bool
1245 aarch64_ins_sve_aimm (const aarch64_operand *self,
1246 const aarch64_opnd_info *info, aarch64_insn *code,
1247 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1248 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1250 if (info->shifter.amount == 8)
1251 insert_all_fields (self, code, (info->imm.value & 0xff) | 256);
1252 else if (info->imm.value != 0 && (info->imm.value & 0xff) == 0)
1253 insert_all_fields (self, code, ((info->imm.value / 256) & 0xff) | 256);
1254 else
1255 insert_all_fields (self, code, info->imm.value & 0xff);
1256 return true;
1259 bool
1260 aarch64_ins_sve_aligned_reglist (const aarch64_operand *self,
1261 const aarch64_opnd_info *info,
1262 aarch64_insn *code,
1263 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1264 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1266 unsigned int num_regs = get_operand_specific_data (self);
1267 unsigned int val = info->reglist.first_regno;
1268 insert_field (self->fields[0], code, val / num_regs, 0);
1269 return true;
1272 /* Encode an SVE CPY/DUP immediate. */
1273 bool
1274 aarch64_ins_sve_asimm (const aarch64_operand *self,
1275 const aarch64_opnd_info *info, aarch64_insn *code,
1276 const aarch64_inst *inst,
1277 aarch64_operand_error *errors)
1279 return aarch64_ins_sve_aimm (self, info, code, inst, errors);
1282 /* Encode Zn[MM], where MM has a 7-bit triangular encoding. The fields
1283 array specifies which field to use for Zn. MM is encoded in the
1284 concatenation of imm5 and SVE_tszh, with imm5 being the less
1285 significant part. */
1286 bool
1287 aarch64_ins_sve_index (const aarch64_operand *self,
1288 const aarch64_opnd_info *info, aarch64_insn *code,
1289 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1290 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1292 unsigned int esize = aarch64_get_qualifier_esize (info->qualifier);
1293 insert_field (self->fields[0], code, info->reglane.regno, 0);
1294 insert_all_fields_after (self, 1, code,
1295 (info->reglane.index * 2 + 1) * esize);
1296 return true;
1299 /* Encode a logical/bitmask immediate for the MOV alias of SVE DUPM. */
1300 bool
1301 aarch64_ins_sve_limm_mov (const aarch64_operand *self,
1302 const aarch64_opnd_info *info, aarch64_insn *code,
1303 const aarch64_inst *inst,
1304 aarch64_operand_error *errors)
1306 return aarch64_ins_limm (self, info, code, inst, errors);
1309 /* Encode Zn[MM], where Zn occupies the least-significant part of the field
1310 and where MM occupies the most-significant part. The operand-dependent
1311 value specifies the number of bits in Zn. */
1312 bool
1313 aarch64_ins_sve_quad_index (const aarch64_operand *self,
1314 const aarch64_opnd_info *info, aarch64_insn *code,
1315 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1316 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1318 unsigned int reg_bits = get_operand_specific_data (self);
1319 assert (info->reglane.regno < (1U << reg_bits));
1320 unsigned int val = (info->reglane.index << reg_bits) + info->reglane.regno;
1321 insert_all_fields (self, code, val);
1322 return true;
1325 /* Encode {Zn.<T> - Zm.<T>}. The fields array specifies which field
1326 to use for Zn. */
1327 bool
1328 aarch64_ins_sve_reglist (const aarch64_operand *self,
1329 const aarch64_opnd_info *info, aarch64_insn *code,
1330 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1331 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1333 insert_field (self->fields[0], code, info->reglist.first_regno, 0);
1334 return true;
1337 /* Encode a strided register list. The first field holds the top bit
1338 (0 or 16) and the second field holds the lower bits. The stride is
1339 16 divided by the list length. */
1340 bool
1341 aarch64_ins_sve_strided_reglist (const aarch64_operand *self,
1342 const aarch64_opnd_info *info,
1343 aarch64_insn *code,
1344 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1345 aarch64_operand_error *errors
1346 ATTRIBUTE_UNUSED)
1348 unsigned int num_regs = get_operand_specific_data (self);
1349 unsigned int mask ATTRIBUTE_UNUSED = 16 | (16 / num_regs - 1);
1350 unsigned int val = info->reglist.first_regno;
1351 assert ((val & mask) == val);
1352 insert_field (self->fields[0], code, val >> 4, 0);
1353 insert_field (self->fields[1], code, val & 15, 0);
1354 return true;
1357 /* Encode <pattern>{, MUL #<amount>}. The fields array specifies which
1358 fields to use for <pattern>. <amount> - 1 is encoded in the SVE_imm4
1359 field. */
1360 bool
1361 aarch64_ins_sve_scale (const aarch64_operand *self,
1362 const aarch64_opnd_info *info, aarch64_insn *code,
1363 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1364 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1366 insert_all_fields (self, code, info->imm.value);
1367 insert_field (FLD_SVE_imm4, code, info->shifter.amount - 1, 0);
1368 return true;
1371 /* Encode an SVE shift left immediate. */
1372 bool
1373 aarch64_ins_sve_shlimm (const aarch64_operand *self,
1374 const aarch64_opnd_info *info, aarch64_insn *code,
1375 const aarch64_inst *inst,
1376 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1378 const aarch64_opnd_info *prev_operand;
1379 unsigned int esize;
1381 assert (info->idx > 0);
1382 prev_operand = &inst->operands[info->idx - 1];
1383 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1384 insert_all_fields (self, code, 8 * esize + info->imm.value);
1385 return true;
1388 /* Encode an SVE shift right immediate. */
1389 bool
1390 aarch64_ins_sve_shrimm (const aarch64_operand *self,
1391 const aarch64_opnd_info *info, aarch64_insn *code,
1392 const aarch64_inst *inst,
1393 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1395 const aarch64_opnd_info *prev_operand;
1396 unsigned int esize;
1398 unsigned int opnd_backshift = get_operand_specific_data (self);
1399 assert (info->idx >= (int)opnd_backshift);
1400 prev_operand = &inst->operands[info->idx - opnd_backshift];
1401 esize = aarch64_get_qualifier_esize (prev_operand->qualifier);
1402 insert_all_fields (self, code, 16 * esize - info->imm.value);
1403 return true;
1406 /* Encode a single-bit immediate that selects between #0.5 and #1.0.
1407 The fields array specifies which field to use. */
1408 bool
1409 aarch64_ins_sve_float_half_one (const aarch64_operand *self,
1410 const aarch64_opnd_info *info,
1411 aarch64_insn *code,
1412 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1413 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1415 if (info->imm.value == 0x3f000000)
1416 insert_field (self->fields[0], code, 0, 0);
1417 else
1418 insert_field (self->fields[0], code, 1, 0);
1419 return true;
1422 /* Encode a single-bit immediate that selects between #0.5 and #2.0.
1423 The fields array specifies which field to use. */
1424 bool
1425 aarch64_ins_sve_float_half_two (const aarch64_operand *self,
1426 const aarch64_opnd_info *info,
1427 aarch64_insn *code,
1428 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1429 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1431 if (info->imm.value == 0x3f000000)
1432 insert_field (self->fields[0], code, 0, 0);
1433 else
1434 insert_field (self->fields[0], code, 1, 0);
1435 return true;
1438 /* Encode a single-bit immediate that selects between #0.0 and #1.0.
1439 The fields array specifies which field to use. */
1440 bool
1441 aarch64_ins_sve_float_zero_one (const aarch64_operand *self,
1442 const aarch64_opnd_info *info,
1443 aarch64_insn *code,
1444 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1445 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1447 if (info->imm.value == 0)
1448 insert_field (self->fields[0], code, 0, 0);
1449 else
1450 insert_field (self->fields[0], code, 1, 0);
1451 return true;
1454 bool
1455 aarch64_ins_sme_za_vrs1 (const aarch64_operand *self,
1456 const aarch64_opnd_info *info,
1457 aarch64_insn *code,
1458 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1459 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1461 int za_reg = info->indexed_za.regno;
1462 int regno = info->indexed_za.index.regno & 3;
1463 int imm = info->indexed_za.index.imm;
1464 int v = info->indexed_za.v;
1465 int countm1 = info->indexed_za.index.countm1;
1467 insert_field (self->fields[0], code, v, 0);
1468 insert_field (self->fields[1], code, regno, 0);
1469 switch (info->qualifier)
1471 case AARCH64_OPND_QLF_S_B:
1472 insert_field (self->fields[2], code, imm / (countm1 + 1), 0);
1473 break;
1474 case AARCH64_OPND_QLF_S_H:
1475 case AARCH64_OPND_QLF_S_S:
1476 insert_field (self->fields[2], code, za_reg, 0);
1477 insert_field (self->fields[3], code, imm / (countm1 + 1), 0);
1478 break;
1479 case AARCH64_OPND_QLF_S_D:
1480 insert_field (self->fields[2], code, za_reg, 0);
1481 break;
1482 default:
1483 return false;
1486 return true;
1489 bool
1490 aarch64_ins_sme_za_vrs2 (const aarch64_operand *self,
1491 const aarch64_opnd_info *info,
1492 aarch64_insn *code,
1493 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1494 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1496 int za_reg = info->indexed_za.regno;
1497 int regno = info->indexed_za.index.regno & 3;
1498 int imm = info->indexed_za.index.imm;
1499 int v = info->indexed_za.v;
1500 int countm1 = info->indexed_za.index.countm1;
1502 insert_field (self->fields[0], code, v, 0);
1503 insert_field (self->fields[1], code, regno, 0);
1504 switch (info->qualifier)
1506 case AARCH64_OPND_QLF_S_B:
1507 insert_field (self->fields[2], code, imm / (countm1 + 1), 0);
1508 break;
1509 case AARCH64_OPND_QLF_S_H:
1510 insert_field (self->fields[2], code, za_reg, 0);
1511 insert_field (self->fields[3], code, imm / (countm1 + 1), 0);
1512 break;
1513 case AARCH64_OPND_QLF_S_S:
1514 case AARCH64_OPND_QLF_S_D:
1515 insert_field (self->fields[2], code, za_reg, 0);
1516 break;
1517 default:
1518 return false;
1521 return true;
1524 /* Encode in SME instruction such as MOVZA ZA tile slice to vector. */
1525 bool
1526 aarch64_ins_sme_za_tile_to_vec (const aarch64_operand *self,
1527 const aarch64_opnd_info *info,
1528 aarch64_insn *code,
1529 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1530 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1532 int fld_v = info->indexed_za.v;
1533 int fld_rv = info->indexed_za.index.regno - 12;
1534 int fld_zan_imm = info->indexed_za.index.imm;
1535 int regno = info->indexed_za.regno;
1537 switch (info->qualifier)
1539 case AARCH64_OPND_QLF_S_B:
1540 insert_field (FLD_imm4_5, code, fld_zan_imm, 0);
1541 break;
1542 case AARCH64_OPND_QLF_S_H:
1543 insert_field (FLD_ZA8_1, code, regno, 0);
1544 insert_field (FLD_imm3_5, code, fld_zan_imm, 0);
1545 break;
1546 case AARCH64_OPND_QLF_S_S:
1547 insert_field (FLD_ZA7_2, code, regno, 0);
1548 insert_field (FLD_off2, code, fld_zan_imm, 0);
1549 break;
1550 case AARCH64_OPND_QLF_S_D:
1551 insert_field (FLD_ZA6_3, code, regno, 0);
1552 insert_field (FLD_ol, code, fld_zan_imm, 0);
1553 break;
1554 case AARCH64_OPND_QLF_S_Q:
1555 insert_field (FLD_ZA5_4, code, regno, 0);
1556 break;
1557 default:
1558 return false;
1561 insert_field (self->fields[0], code, fld_v, 0);
1562 insert_field (self->fields[1], code, fld_rv, 0);
1564 return true;
1567 /* Encode in SME instruction such as MOVA ZA tile vector register number,
1568 vector indicator, vector selector and immediate. */
1569 bool
1570 aarch64_ins_sme_za_hv_tiles (const aarch64_operand *self,
1571 const aarch64_opnd_info *info,
1572 aarch64_insn *code,
1573 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1574 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1576 int fld_size;
1577 int fld_q;
1578 int fld_v = info->indexed_za.v;
1579 int fld_rv = info->indexed_za.index.regno - 12;
1580 int fld_zan_imm = info->indexed_za.index.imm;
1581 int regno = info->indexed_za.regno;
1583 switch (info->qualifier)
1585 case AARCH64_OPND_QLF_S_B:
1586 fld_size = 0;
1587 fld_q = 0;
1588 break;
1589 case AARCH64_OPND_QLF_S_H:
1590 fld_size = 1;
1591 fld_q = 0;
1592 fld_zan_imm |= regno << 3;
1593 break;
1594 case AARCH64_OPND_QLF_S_S:
1595 fld_size = 2;
1596 fld_q = 0;
1597 fld_zan_imm |= regno << 2;
1598 break;
1599 case AARCH64_OPND_QLF_S_D:
1600 fld_size = 3;
1601 fld_q = 0;
1602 fld_zan_imm |= regno << 1;
1603 break;
1604 case AARCH64_OPND_QLF_S_Q:
1605 fld_size = 3;
1606 fld_q = 1;
1607 fld_zan_imm = regno;
1608 break;
1609 default:
1610 return false;
1613 insert_field (self->fields[0], code, fld_size, 0);
1614 insert_field (self->fields[1], code, fld_q, 0);
1615 insert_field (self->fields[2], code, fld_v, 0);
1616 insert_field (self->fields[3], code, fld_rv, 0);
1617 insert_field (self->fields[4], code, fld_zan_imm, 0);
1619 return true;
1622 bool
1623 aarch64_ins_sme_za_hv_tiles_range (const aarch64_operand *self,
1624 const aarch64_opnd_info *info,
1625 aarch64_insn *code,
1626 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1627 aarch64_operand_error *errors
1628 ATTRIBUTE_UNUSED)
1630 int ebytes = aarch64_get_qualifier_esize (info->qualifier);
1631 int range_size = get_opcode_dependent_value (inst->opcode);
1632 int fld_v = info->indexed_za.v;
1633 int fld_rv = info->indexed_za.index.regno - 12;
1634 int imm = info->indexed_za.index.imm;
1635 int max_value = 16 / range_size / ebytes;
1637 if (max_value == 0)
1638 max_value = 1;
1640 assert (imm % range_size == 0 && (imm / range_size) < max_value);
1641 int fld_zan_imm = (info->indexed_za.regno * max_value) | (imm / range_size);
1642 assert (fld_zan_imm < (range_size == 4 && ebytes < 8 ? 4 : 8));
1644 insert_field (self->fields[0], code, fld_v, 0);
1645 insert_field (self->fields[1], code, fld_rv, 0);
1646 insert_field (self->fields[2], code, fld_zan_imm, 0);
1648 return true;
1651 /* Encode in SME instruction ZERO list of up to eight 64-bit element tile names
1652 separated by commas, encoded in the "imm8" field.
1654 For programmer convenience an assembler must also accept the names of
1655 32-bit, 16-bit and 8-bit element tiles which are converted into the
1656 corresponding set of 64-bit element tiles.
1658 bool
1659 aarch64_ins_sme_za_list (const aarch64_operand *self,
1660 const aarch64_opnd_info *info,
1661 aarch64_insn *code,
1662 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1663 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1665 int fld_mask = info->imm.value;
1666 insert_field (self->fields[0], code, fld_mask, 0);
1667 return true;
1670 bool
1671 aarch64_ins_sme_za_array (const aarch64_operand *self,
1672 const aarch64_opnd_info *info,
1673 aarch64_insn *code,
1674 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1675 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1677 int regno = info->indexed_za.index.regno & 3;
1678 int imm = info->indexed_za.index.imm;
1679 int countm1 = info->indexed_za.index.countm1;
1680 assert (imm % (countm1 + 1) == 0);
1681 insert_field (self->fields[0], code, regno, 0);
1682 insert_field (self->fields[1], code, imm / (countm1 + 1), 0);
1683 return true;
1686 bool
1687 aarch64_ins_sme_addr_ri_u4xvl (const aarch64_operand *self,
1688 const aarch64_opnd_info *info,
1689 aarch64_insn *code,
1690 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1691 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1693 int regno = info->addr.base_regno;
1694 int imm = info->addr.offset.imm;
1695 insert_field (self->fields[0], code, regno, 0);
1696 insert_field (self->fields[1], code, imm, 0);
1697 return true;
1700 /* Encode in SMSTART and SMSTOP {SM | ZA } mode. */
1701 bool
1702 aarch64_ins_sme_sm_za (const aarch64_operand *self,
1703 const aarch64_opnd_info *info,
1704 aarch64_insn *code,
1705 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1706 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1708 aarch64_insn fld_crm;
1709 /* Set CRm[3:1] bits. */
1710 if (info->reg.regno == 's')
1711 fld_crm = 0x02 ; /* SVCRSM. */
1712 else if (info->reg.regno == 'z')
1713 fld_crm = 0x04; /* SVCRZA. */
1714 else
1715 return false;
1717 insert_field (self->fields[0], code, fld_crm, 0);
1718 return true;
1721 /* Encode source scalable predicate register (Pn), name of the index base
1722 register W12-W15 (Rm), and optional element index, defaulting to 0, in the
1723 range 0 to one less than the number of vector elements in a 128-bit vector
1724 register, encoded in "i1:tszh:tszl".
1726 bool
1727 aarch64_ins_sme_pred_reg_with_index (const aarch64_operand *self,
1728 const aarch64_opnd_info *info,
1729 aarch64_insn *code,
1730 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1731 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1733 int fld_pn = info->indexed_za.regno;
1734 int fld_rm = info->indexed_za.index.regno - 12;
1735 int imm = info->indexed_za.index.imm;
1736 int fld_i1, fld_tszh, fld_tshl;
1738 insert_field (self->fields[0], code, fld_rm, 0);
1739 insert_field (self->fields[1], code, fld_pn, 0);
1741 /* Optional element index, defaulting to 0, in the range 0 to one less than
1742 the number of vector elements in a 128-bit vector register, encoded in
1743 "i1:tszh:tszl".
1745 i1 tszh tszl <T>
1746 0 0 000 RESERVED
1747 x x xx1 B
1748 x x x10 H
1749 x x 100 S
1750 x 1 000 D
1752 switch (info->qualifier)
1754 case AARCH64_OPND_QLF_S_B:
1755 /* <imm> is 4 bit value. */
1756 fld_i1 = (imm >> 3) & 0x1;
1757 fld_tszh = (imm >> 2) & 0x1;
1758 fld_tshl = ((imm << 1) | 0x1) & 0x7;
1759 break;
1760 case AARCH64_OPND_QLF_S_H:
1761 /* <imm> is 3 bit value. */
1762 fld_i1 = (imm >> 2) & 0x1;
1763 fld_tszh = (imm >> 1) & 0x1;
1764 fld_tshl = ((imm << 2) | 0x2) & 0x7;
1765 break;
1766 case AARCH64_OPND_QLF_S_S:
1767 /* <imm> is 2 bit value. */
1768 fld_i1 = (imm >> 1) & 0x1;
1769 fld_tszh = imm & 0x1;
1770 fld_tshl = 0x4;
1771 break;
1772 case AARCH64_OPND_QLF_S_D:
1773 /* <imm> is 1 bit value. */
1774 fld_i1 = imm & 0x1;
1775 fld_tszh = 0x1;
1776 fld_tshl = 0x0;
1777 break;
1778 default:
1779 return false;
1782 insert_field (self->fields[2], code, fld_i1, 0);
1783 insert_field (self->fields[3], code, fld_tszh, 0);
1784 insert_field (self->fields[4], code, fld_tshl, 0);
1785 return true;
1788 /* Insert X0-X30. Register 31 is unallocated. */
1789 bool
1790 aarch64_ins_x0_to_x30 (const aarch64_operand *self,
1791 const aarch64_opnd_info *info,
1792 aarch64_insn *code,
1793 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1794 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1796 assert (info->reg.regno <= 30);
1797 insert_field (self->fields[0], code, info->reg.regno, 0);
1798 return true;
1801 /* Insert an indexed register, with the first field being the register
1802 number and the remaining fields being the index. */
1803 bool
1804 aarch64_ins_simple_index (const aarch64_operand *self,
1805 const aarch64_opnd_info *info,
1806 aarch64_insn *code,
1807 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1808 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1810 int bias = get_operand_specific_data (self);
1811 insert_field (self->fields[0], code, info->reglane.regno - bias, 0);
1812 insert_all_fields_after (self, 1, code, info->reglane.index);
1813 return true;
1816 /* Insert a plain shift-right immediate, when there is only a single
1817 element size. */
1818 bool
1819 aarch64_ins_plain_shrimm (const aarch64_operand *self,
1820 const aarch64_opnd_info *info, aarch64_insn *code,
1821 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1822 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1824 unsigned int base = 1 << get_operand_field_width (self, 0);
1825 insert_field (self->fields[0], code, base - info->imm.value, 0);
1826 return true;
1829 /* Miscellaneous encoding functions. */
1831 /* Encode size[0], i.e. bit 22, for
1832 e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1834 static void
1835 encode_asimd_fcvt (aarch64_inst *inst)
1837 aarch64_insn value;
1838 aarch64_field field = {0, 0};
1839 enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_NIL;
1841 switch (inst->opcode->op)
1843 case OP_FCVTN:
1844 case OP_FCVTN2:
1845 /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>. */
1846 qualifier = inst->operands[1].qualifier;
1847 break;
1848 case OP_FCVTL:
1849 case OP_FCVTL2:
1850 /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>. */
1851 qualifier = inst->operands[0].qualifier;
1852 break;
1853 default:
1854 return;
1856 assert (qualifier == AARCH64_OPND_QLF_V_4S
1857 || qualifier == AARCH64_OPND_QLF_V_2D);
1858 value = (qualifier == AARCH64_OPND_QLF_V_4S) ? 0 : 1;
1859 gen_sub_field (FLD_size, 0, 1, &field);
1860 insert_field_2 (&field, &inst->value, value, 0);
1863 /* Encode size[0], i.e. bit 22, for
1864 e.g. FCVTXN <Vb><d>, <Va><n>. */
1866 static void
1867 encode_asisd_fcvtxn (aarch64_inst *inst)
1869 aarch64_insn val = 1;
1870 aarch64_field field = {0, 0};
1871 assert (inst->operands[0].qualifier == AARCH64_OPND_QLF_S_S);
1872 gen_sub_field (FLD_size, 0, 1, &field);
1873 insert_field_2 (&field, &inst->value, val, 0);
1876 /* Encode the 'opc' field for e.g. FCVT <Dd>, <Sn>. */
1877 static void
1878 encode_fcvt (aarch64_inst *inst)
1880 aarch64_insn val;
1881 const aarch64_field field = {15, 2};
1883 /* opc dstsize */
1884 switch (inst->operands[0].qualifier)
1886 case AARCH64_OPND_QLF_S_S: val = 0; break;
1887 case AARCH64_OPND_QLF_S_D: val = 1; break;
1888 case AARCH64_OPND_QLF_S_H: val = 3; break;
1889 default: abort ();
1891 insert_field_2 (&field, &inst->value, val, 0);
1893 return;
1896 /* Return the index in qualifiers_list that INST is using. Should only
1897 be called once the qualifiers are known to be valid. */
1899 static int
1900 aarch64_get_variant (struct aarch64_inst *inst)
1902 int i, nops, variant;
1904 nops = aarch64_num_of_operands (inst->opcode);
1905 for (variant = 0; variant < AARCH64_MAX_QLF_SEQ_NUM; ++variant)
1907 for (i = 0; i < nops; ++i)
1908 if (inst->opcode->qualifiers_list[variant][i]
1909 != inst->operands[i].qualifier)
1910 break;
1911 if (i == nops)
1912 return variant;
1914 abort ();
1917 /* Do miscellaneous encodings that are not common enough to be driven by
1918 flags. */
1920 static void
1921 do_misc_encoding (aarch64_inst *inst)
1923 unsigned int value;
1925 switch (inst->opcode->op)
1927 case OP_FCVT:
1928 encode_fcvt (inst);
1929 break;
1930 case OP_FCVTN:
1931 case OP_FCVTN2:
1932 case OP_FCVTL:
1933 case OP_FCVTL2:
1934 encode_asimd_fcvt (inst);
1935 break;
1936 case OP_FCVTXN_S:
1937 encode_asisd_fcvtxn (inst);
1938 break;
1939 case OP_MOV_P_P:
1940 case OP_MOV_PN_PN:
1941 case OP_MOVS_P_P:
1942 /* Copy Pn to Pm and Pg. */
1943 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1944 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1945 insert_field (FLD_SVE_Pg4_10, &inst->value, value, 0);
1946 break;
1947 case OP_MOV_Z_P_Z:
1948 /* Copy Zd to Zm. */
1949 value = extract_field (FLD_SVE_Zd, inst->value, 0);
1950 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1951 break;
1952 case OP_MOV_Z_V:
1953 /* Fill in the zero immediate. */
1954 insert_fields (&inst->value, 1 << aarch64_get_variant (inst), 0,
1955 2, FLD_imm5, FLD_SVE_tszh);
1956 break;
1957 case OP_MOV_Z_Z:
1958 /* Copy Zn to Zm. */
1959 value = extract_field (FLD_SVE_Zn, inst->value, 0);
1960 insert_field (FLD_SVE_Zm_16, &inst->value, value, 0);
1961 break;
1962 case OP_MOV_Z_Zi:
1963 break;
1964 case OP_MOVM_P_P_P:
1965 /* Copy Pd to Pm. */
1966 value = extract_field (FLD_SVE_Pd, inst->value, 0);
1967 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1968 break;
1969 case OP_MOVZS_P_P_P:
1970 case OP_MOVZ_P_P_P:
1971 /* Copy Pn to Pm. */
1972 value = extract_field (FLD_SVE_Pn, inst->value, 0);
1973 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1974 break;
1975 case OP_NOTS_P_P_P_Z:
1976 case OP_NOT_P_P_P_Z:
1977 /* Copy Pg to Pm. */
1978 value = extract_field (FLD_SVE_Pg4_10, inst->value, 0);
1979 insert_field (FLD_SVE_Pm, &inst->value, value, 0);
1980 break;
1981 default: break;
1985 /* Encode the 'size' and 'Q' field for e.g. SHADD. */
1986 static void
1987 encode_sizeq (aarch64_inst *inst)
1989 aarch64_insn sizeq;
1990 enum aarch64_field_kind kind;
1991 int idx;
1993 /* Get the index of the operand whose information we are going to use
1994 to encode the size and Q fields.
1995 This is deduced from the possible valid qualifier lists. */
1996 idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1997 DEBUG_TRACE ("idx: %d; qualifier: %s", idx,
1998 aarch64_get_qualifier_name (inst->operands[idx].qualifier));
1999 sizeq = aarch64_get_qualifier_standard_value (inst->operands[idx].qualifier);
2000 /* Q */
2001 insert_field (FLD_Q, &inst->value, sizeq & 0x1, inst->opcode->mask);
2002 /* size */
2003 if (inst->opcode->iclass == asisdlse
2004 || inst->opcode->iclass == asisdlsep
2005 || inst->opcode->iclass == asisdlso
2006 || inst->opcode->iclass == asisdlsop)
2007 kind = FLD_vldst_size;
2008 else
2009 kind = FLD_size;
2010 insert_field (kind, &inst->value, (sizeq >> 1) & 0x3, inst->opcode->mask);
2013 /* Opcodes that have fields shared by multiple operands are usually flagged
2014 with flags. In this function, we detect such flags and use the
2015 information in one of the related operands to do the encoding. The 'one'
2016 operand is not any operand but one of the operands that has the enough
2017 information for such an encoding. */
2019 static void
2020 do_special_encoding (struct aarch64_inst *inst)
2022 int idx;
2023 aarch64_insn value = 0;
2025 DEBUG_TRACE ("enter with coding 0x%x", (uint32_t) inst->value);
2027 /* Condition for truly conditional executed instructions, e.g. b.cond. */
2028 if (inst->opcode->flags & F_COND)
2030 insert_field (FLD_cond2, &inst->value, inst->cond->value, 0);
2032 if (inst->opcode->flags & F_SF)
2034 idx = select_operand_for_sf_field_coding (inst->opcode);
2035 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
2036 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
2037 ? 1 : 0;
2038 insert_field (FLD_sf, &inst->value, value, 0);
2039 if (inst->opcode->flags & F_N)
2040 insert_field (FLD_N, &inst->value, value, inst->opcode->mask);
2042 if (inst->opcode->flags & F_LSE_SZ)
2044 idx = select_operand_for_sf_field_coding (inst->opcode);
2045 value = (inst->operands[idx].qualifier == AARCH64_OPND_QLF_X
2046 || inst->operands[idx].qualifier == AARCH64_OPND_QLF_SP)
2047 ? 1 : 0;
2048 insert_field (FLD_lse_sz, &inst->value, value, 0);
2050 if (inst->opcode->flags & F_RCPC3_SIZE)
2052 switch (inst->operands[0].qualifier)
2054 case AARCH64_OPND_QLF_W: value = 2; break;
2055 case AARCH64_OPND_QLF_X: value = 3; break;
2056 case AARCH64_OPND_QLF_S_B: value = 0; break;
2057 case AARCH64_OPND_QLF_S_H: value = 1; break;
2058 case AARCH64_OPND_QLF_S_S: value = 2; break;
2059 case AARCH64_OPND_QLF_S_D: value = 3; break;
2060 case AARCH64_OPND_QLF_S_Q: value = 0; break;
2061 default: return;
2063 insert_field (FLD_rcpc3_size, &inst->value, value, 0);
2066 if (inst->opcode->flags & F_SIZEQ)
2067 encode_sizeq (inst);
2068 if (inst->opcode->flags & F_FPTYPE)
2070 idx = select_operand_for_fptype_field_coding (inst->opcode);
2071 switch (inst->operands[idx].qualifier)
2073 case AARCH64_OPND_QLF_S_S: value = 0; break;
2074 case AARCH64_OPND_QLF_S_D: value = 1; break;
2075 case AARCH64_OPND_QLF_S_H: value = 3; break;
2076 default: return;
2078 insert_field (FLD_type, &inst->value, value, 0);
2080 if (inst->opcode->flags & F_SSIZE)
2082 enum aarch64_opnd_qualifier qualifier;
2083 idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2084 qualifier = inst->operands[idx].qualifier;
2085 assert (qualifier >= AARCH64_OPND_QLF_S_B
2086 && qualifier <= AARCH64_OPND_QLF_S_Q);
2087 value = aarch64_get_qualifier_standard_value (qualifier);
2088 insert_field (FLD_size, &inst->value, value, inst->opcode->mask);
2090 if (inst->opcode->flags & F_T)
2092 int num; /* num of consecutive '0's on the right side of imm5<3:0>. */
2093 aarch64_field field = {0, 0};
2094 enum aarch64_opnd_qualifier qualifier;
2096 idx = 0;
2097 qualifier = inst->operands[idx].qualifier;
2098 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2099 == AARCH64_OPND_CLASS_SIMD_REG
2100 && qualifier >= AARCH64_OPND_QLF_V_8B
2101 && qualifier <= AARCH64_OPND_QLF_V_2D);
2102 /* imm5<3:0> q <t>
2103 0000 x reserved
2104 xxx1 0 8b
2105 xxx1 1 16b
2106 xx10 0 4h
2107 xx10 1 8h
2108 x100 0 2s
2109 x100 1 4s
2110 1000 0 reserved
2111 1000 1 2d */
2112 value = aarch64_get_qualifier_standard_value (qualifier);
2113 insert_field (FLD_Q, &inst->value, value & 0x1, inst->opcode->mask);
2114 num = (int) value >> 1;
2115 assert (num >= 0 && num <= 3);
2116 gen_sub_field (FLD_imm5, 0, num + 1, &field);
2117 insert_field_2 (&field, &inst->value, 1 << num, inst->opcode->mask);
2120 if ((inst->opcode->flags & F_OPD_SIZE) && inst->opcode->iclass == sve2_urqvs)
2122 enum aarch64_opnd_qualifier qualifier[2];
2123 aarch64_insn value1 = 0;
2124 idx = 0;
2125 qualifier[0] = inst->operands[idx].qualifier;
2126 qualifier[1] = inst->operands[idx+2].qualifier;
2127 value = aarch64_get_qualifier_standard_value (qualifier[0]);
2128 value1 = aarch64_get_qualifier_standard_value (qualifier[1]);
2129 assert ((value >> 1) == value1);
2130 insert_field (FLD_size, &inst->value, value1, inst->opcode->mask);
2133 if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2135 /* Use Rt to encode in the case of e.g.
2136 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}]. */
2137 enum aarch64_opnd_qualifier qualifier;
2138 idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2139 if (idx == -1)
2140 /* Otherwise use the result operand, which has to be a integer
2141 register. */
2142 idx = 0;
2143 assert (idx == 0 || idx == 1);
2144 assert (aarch64_get_operand_class (inst->opcode->operands[idx])
2145 == AARCH64_OPND_CLASS_INT_REG);
2146 qualifier = inst->operands[idx].qualifier;
2147 insert_field (FLD_Q, &inst->value,
2148 aarch64_get_qualifier_standard_value (qualifier), 0);
2150 if (inst->opcode->flags & F_LDS_SIZE)
2152 /* e.g. LDRSB <Wt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]. */
2153 enum aarch64_opnd_qualifier qualifier;
2154 aarch64_field field = {0, 0};
2155 assert (aarch64_get_operand_class (inst->opcode->operands[0])
2156 == AARCH64_OPND_CLASS_INT_REG);
2157 gen_sub_field (FLD_opc, 0, 1, &field);
2158 qualifier = inst->operands[0].qualifier;
2159 insert_field_2 (&field, &inst->value,
2160 1 - aarch64_get_qualifier_standard_value (qualifier), 0);
2162 /* Miscellaneous encoding as the last step. */
2163 if (inst->opcode->flags & F_MISC)
2164 do_misc_encoding (inst);
2166 DEBUG_TRACE ("exit with coding 0x%x", (uint32_t) inst->value);
2169 /* Some instructions (including all SVE ones) use the instruction class
2170 to describe how a qualifiers_list index is represented in the instruction
2171 encoding. If INST is such an instruction, encode the chosen qualifier
2172 variant. */
2174 static void
2175 aarch64_encode_variant_using_iclass (struct aarch64_inst *inst)
2177 int variant = 0;
2178 switch (inst->opcode->iclass)
2180 case sme_mov:
2181 case sme_psel:
2182 /* The variant is encoded as part of the immediate. */
2183 break;
2185 case sme_size_12_bh:
2186 insert_field (FLD_S, &inst->value, aarch64_get_variant (inst), 0);
2187 break;
2189 case sme_size_12_bhs:
2190 case sme_size_12_b:
2191 insert_field (FLD_SME_size_12, &inst->value,
2192 aarch64_get_variant (inst), 0);
2193 break;
2195 case sme_size_22:
2196 insert_field (FLD_SME_size_22, &inst->value,
2197 aarch64_get_variant (inst), 0);
2198 break;
2200 case sme_size_22_hsd:
2201 insert_field (FLD_SME_size_22, &inst->value,
2202 aarch64_get_variant (inst) + 1, 0);
2203 break;
2205 case sme_size_12_hs:
2206 insert_field (FLD_SME_size_12, &inst->value,
2207 aarch64_get_variant (inst) + 1, 0);
2208 break;
2210 case sme_sz_23:
2211 insert_field (FLD_SME_sz_23, &inst->value,
2212 aarch64_get_variant (inst), 0);
2213 break;
2215 case sve_cpy:
2216 insert_fields (&inst->value, aarch64_get_variant (inst),
2217 0, 2, FLD_SVE_M_14, FLD_size);
2218 break;
2220 case sme_shift:
2221 case sve_index:
2222 case sve_index1:
2223 case sve_shift_pred:
2224 case sve_shift_unpred:
2225 case sve_shift_tsz_hsd:
2226 case sve_shift_tsz_bhsd:
2227 /* For indices and shift amounts, the variant is encoded as
2228 part of the immediate. */
2229 break;
2231 case sve_limm:
2232 case sme2_mov:
2233 /* For sve_limm, the .B, .H, and .S forms are just a convenience
2234 and depend on the immediate. They don't have a separate
2235 encoding. */
2236 break;
2238 case sme_misc:
2239 case sme2_movaz:
2240 case sve_misc:
2241 /* These instructions have only a single variant. */
2242 break;
2244 case sve_movprfx:
2245 insert_fields (&inst->value, aarch64_get_variant (inst),
2246 0, 2, FLD_SVE_M_16, FLD_size);
2247 break;
2249 case sve_pred_zm:
2250 insert_field (FLD_SVE_M_4, &inst->value, aarch64_get_variant (inst), 0);
2251 break;
2253 case sve_size_bhs:
2254 case sve_size_bhsd:
2255 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst), 0);
2256 break;
2258 case sve_size_hsd:
2259 /* MOD 3 For `OP_SVE_Vv_HSD`. */
2260 insert_field (FLD_size, &inst->value, aarch64_get_variant (inst) % 3 + 1, 0);
2261 break;
2263 case sme_fp_sd:
2264 case sme_int_sd:
2265 case sve_size_bh:
2266 case sve_size_sd:
2267 insert_field (FLD_SVE_sz, &inst->value, aarch64_get_variant (inst), 0);
2268 break;
2270 case sve_size_sd2:
2271 insert_field (FLD_SVE_sz2, &inst->value, aarch64_get_variant (inst), 0);
2272 break;
2274 case sve_size_hsd2:
2275 insert_field (FLD_SVE_size, &inst->value,
2276 aarch64_get_variant (inst) + 1, 0);
2277 break;
2279 case sve_size_tsz_bhs:
2280 insert_fields (&inst->value,
2281 (1 << aarch64_get_variant (inst)),
2282 0, 2, FLD_SVE_tszl_19, FLD_SVE_sz);
2283 break;
2285 case sve_size_13:
2286 variant = aarch64_get_variant (inst) + 1;
2287 if (variant == 2)
2288 variant = 3;
2289 insert_field (FLD_size, &inst->value, variant, 0);
2290 break;
2292 default:
2293 break;
2297 /* Converters converting an alias opcode instruction to its real form. */
2299 /* ROR <Wd>, <Ws>, #<shift>
2300 is equivalent to:
2301 EXTR <Wd>, <Ws>, <Ws>, #<shift>. */
2302 static void
2303 convert_ror_to_extr (aarch64_inst *inst)
2305 copy_operand_info (inst, 3, 2);
2306 copy_operand_info (inst, 2, 1);
2309 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2310 is equivalent to:
2311 USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0. */
2312 static void
2313 convert_xtl_to_shll (aarch64_inst *inst)
2315 inst->operands[2].qualifier = inst->operands[1].qualifier;
2316 inst->operands[2].imm.value = 0;
2319 /* Convert
2320 LSR <Xd>, <Xn>, #<shift>
2322 UBFM <Xd>, <Xn>, #<shift>, #63. */
2323 static void
2324 convert_sr_to_bfm (aarch64_inst *inst)
2326 inst->operands[3].imm.value =
2327 inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2330 /* Convert MOV to ORR. */
2331 static void
2332 convert_mov_to_orr (aarch64_inst *inst)
2334 /* MOV <Vd>.<T>, <Vn>.<T>
2335 is equivalent to:
2336 ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>. */
2337 copy_operand_info (inst, 2, 1);
2340 /* When <imms> >= <immr>, the instruction written:
2341 SBFX <Xd>, <Xn>, #<lsb>, #<width>
2342 is equivalent to:
2343 SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1). */
2345 static void
2346 convert_bfx_to_bfm (aarch64_inst *inst)
2348 int64_t lsb, width;
2350 /* Convert the operand. */
2351 lsb = inst->operands[2].imm.value;
2352 width = inst->operands[3].imm.value;
2353 inst->operands[2].imm.value = lsb;
2354 inst->operands[3].imm.value = lsb + width - 1;
2357 /* When <imms> < <immr>, the instruction written:
2358 SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2359 is equivalent to:
2360 SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1). */
2362 static void
2363 convert_bfi_to_bfm (aarch64_inst *inst)
2365 int64_t lsb, width;
2367 /* Convert the operand. */
2368 lsb = inst->operands[2].imm.value;
2369 width = inst->operands[3].imm.value;
2370 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2372 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2373 inst->operands[3].imm.value = width - 1;
2375 else
2377 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2378 inst->operands[3].imm.value = width - 1;
2382 /* The instruction written:
2383 BFC <Xd>, #<lsb>, #<width>
2384 is equivalent to:
2385 BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1). */
2387 static void
2388 convert_bfc_to_bfm (aarch64_inst *inst)
2390 int64_t lsb, width;
2392 /* Insert XZR. */
2393 copy_operand_info (inst, 3, 2);
2394 copy_operand_info (inst, 2, 1);
2395 copy_operand_info (inst, 1, 0);
2396 inst->operands[1].reg.regno = 0x1f;
2398 /* Convert the immediate operand. */
2399 lsb = inst->operands[2].imm.value;
2400 width = inst->operands[3].imm.value;
2401 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2403 inst->operands[2].imm.value = (32 - lsb) & 0x1f;
2404 inst->operands[3].imm.value = width - 1;
2406 else
2408 inst->operands[2].imm.value = (64 - lsb) & 0x3f;
2409 inst->operands[3].imm.value = width - 1;
2413 /* The instruction written:
2414 LSL <Xd>, <Xn>, #<shift>
2415 is equivalent to:
2416 UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>). */
2418 static void
2419 convert_lsl_to_ubfm (aarch64_inst *inst)
2421 int64_t shift = inst->operands[2].imm.value;
2423 if (inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31)
2425 inst->operands[2].imm.value = (32 - shift) & 0x1f;
2426 inst->operands[3].imm.value = 31 - shift;
2428 else
2430 inst->operands[2].imm.value = (64 - shift) & 0x3f;
2431 inst->operands[3].imm.value = 63 - shift;
2435 /* CINC <Wd>, <Wn>, <cond>
2436 is equivalent to:
2437 CSINC <Wd>, <Wn>, <Wn>, invert(<cond>). */
2439 static void
2440 convert_to_csel (aarch64_inst *inst)
2442 copy_operand_info (inst, 3, 2);
2443 copy_operand_info (inst, 2, 1);
2444 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2447 /* CSET <Wd>, <cond>
2448 is equivalent to:
2449 CSINC <Wd>, WZR, WZR, invert(<cond>). */
2451 static void
2452 convert_cset_to_csinc (aarch64_inst *inst)
2454 copy_operand_info (inst, 3, 1);
2455 copy_operand_info (inst, 2, 0);
2456 copy_operand_info (inst, 1, 0);
2457 inst->operands[1].reg.regno = 0x1f;
2458 inst->operands[2].reg.regno = 0x1f;
2459 inst->operands[3].cond = get_inverted_cond (inst->operands[3].cond);
2462 /* MOV <Wd>, #<imm>
2463 is equivalent to:
2464 MOVZ <Wd>, #<imm16>, LSL #<shift>. */
2466 static void
2467 convert_mov_to_movewide (aarch64_inst *inst)
2469 int is32;
2470 uint32_t shift_amount;
2471 uint64_t value = ~(uint64_t)0;
2473 switch (inst->opcode->op)
2475 case OP_MOV_IMM_WIDE:
2476 value = inst->operands[1].imm.value;
2477 break;
2478 case OP_MOV_IMM_WIDEN:
2479 value = ~inst->operands[1].imm.value;
2480 break;
2481 default:
2482 return;
2484 inst->operands[1].type = AARCH64_OPND_HALF;
2485 is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2486 if (! aarch64_wide_constant_p (value, is32, &shift_amount))
2487 /* The constraint check should have guaranteed this wouldn't happen. */
2488 return;
2489 value >>= shift_amount;
2490 value &= 0xffff;
2491 inst->operands[1].imm.value = value;
2492 inst->operands[1].shifter.kind = AARCH64_MOD_LSL;
2493 inst->operands[1].shifter.amount = shift_amount;
2496 /* MOV <Wd>, #<imm>
2497 is equivalent to:
2498 ORR <Wd>, WZR, #<imm>. */
2500 static void
2501 convert_mov_to_movebitmask (aarch64_inst *inst)
2503 copy_operand_info (inst, 2, 1);
2504 inst->operands[1].reg.regno = 0x1f;
2505 inst->operands[1].skip = 0;
2508 /* Some alias opcodes are assembled by being converted to their real-form. */
2510 static void
2511 convert_to_real (aarch64_inst *inst, const aarch64_opcode *real)
2513 const aarch64_opcode *alias = inst->opcode;
2515 if ((alias->flags & F_CONV) == 0)
2516 goto convert_to_real_return;
2518 switch (alias->op)
2520 case OP_ASR_IMM:
2521 case OP_LSR_IMM:
2522 convert_sr_to_bfm (inst);
2523 break;
2524 case OP_LSL_IMM:
2525 convert_lsl_to_ubfm (inst);
2526 break;
2527 case OP_CINC:
2528 case OP_CINV:
2529 case OP_CNEG:
2530 convert_to_csel (inst);
2531 break;
2532 case OP_CSET:
2533 case OP_CSETM:
2534 convert_cset_to_csinc (inst);
2535 break;
2536 case OP_UBFX:
2537 case OP_BFXIL:
2538 case OP_SBFX:
2539 convert_bfx_to_bfm (inst);
2540 break;
2541 case OP_SBFIZ:
2542 case OP_BFI:
2543 case OP_UBFIZ:
2544 convert_bfi_to_bfm (inst);
2545 break;
2546 case OP_BFC:
2547 convert_bfc_to_bfm (inst);
2548 break;
2549 case OP_MOV_V:
2550 convert_mov_to_orr (inst);
2551 break;
2552 case OP_MOV_IMM_WIDE:
2553 case OP_MOV_IMM_WIDEN:
2554 convert_mov_to_movewide (inst);
2555 break;
2556 case OP_MOV_IMM_LOG:
2557 convert_mov_to_movebitmask (inst);
2558 break;
2559 case OP_ROR_IMM:
2560 convert_ror_to_extr (inst);
2561 break;
2562 case OP_SXTL:
2563 case OP_SXTL2:
2564 case OP_UXTL:
2565 case OP_UXTL2:
2566 convert_xtl_to_shll (inst);
2567 break;
2568 default:
2569 break;
2572 convert_to_real_return:
2573 aarch64_replace_opcode (inst, real);
2576 /* Encode *INST_ORI of the opcode code OPCODE.
2577 Return the encoded result in *CODE and if QLF_SEQ is not NULL, return the
2578 matched operand qualifier sequence in *QLF_SEQ. */
2580 bool
2581 aarch64_opcode_encode (const aarch64_opcode *opcode,
2582 const aarch64_inst *inst_ori, aarch64_insn *code,
2583 aarch64_opnd_qualifier_t *qlf_seq,
2584 aarch64_operand_error *mismatch_detail,
2585 aarch64_instr_sequence* insn_sequence)
2587 int i;
2588 const aarch64_opcode *aliased;
2589 aarch64_inst copy, *inst;
2591 DEBUG_TRACE ("enter with %s", opcode->name);
2593 /* Create a copy of *INST_ORI, so that we can do any change we want. */
2594 copy = *inst_ori;
2595 inst = &copy;
2597 assert (inst->opcode == NULL || inst->opcode == opcode);
2598 if (inst->opcode == NULL)
2599 inst->opcode = opcode;
2601 /* Constrain the operands.
2602 After passing this, the encoding is guaranteed to succeed. */
2603 if (aarch64_match_operands_constraint (inst, mismatch_detail) == 0)
2605 DEBUG_TRACE ("FAIL since operand constraint not met");
2606 return 0;
2609 /* Get the base value.
2610 Note: this has to be before the aliasing handling below in order to
2611 get the base value from the alias opcode before we move on to the
2612 aliased opcode for encoding. */
2613 inst->value = opcode->opcode;
2615 /* No need to do anything else if the opcode does not have any operand. */
2616 if (aarch64_num_of_operands (opcode) == 0)
2617 goto encoding_exit;
2619 /* Assign operand indexes and check types. Also put the matched
2620 operand qualifiers in *QLF_SEQ to return. */
2621 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2623 assert (opcode->operands[i] == inst->operands[i].type);
2624 inst->operands[i].idx = i;
2625 if (qlf_seq != NULL)
2626 *qlf_seq = inst->operands[i].qualifier;
2629 aliased = aarch64_find_real_opcode (opcode);
2630 /* If the opcode is an alias and it does not ask for direct encoding by
2631 itself, the instruction will be transformed to the form of real opcode
2632 and the encoding will be carried out using the rules for the aliased
2633 opcode. */
2634 if (aliased != NULL && (opcode->flags & F_CONV))
2636 DEBUG_TRACE ("real opcode '%s' has been found for the alias %s",
2637 aliased->name, opcode->name);
2638 /* Convert the operands to the form of the real opcode. */
2639 convert_to_real (inst, aliased);
2640 opcode = aliased;
2643 aarch64_opnd_info *info = inst->operands;
2645 /* Call the inserter of each operand. */
2646 for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++info)
2648 const aarch64_operand *opnd;
2649 enum aarch64_opnd type = opcode->operands[i];
2650 if (type == AARCH64_OPND_NIL)
2651 break;
2652 if (info->skip)
2654 DEBUG_TRACE ("skip the incomplete operand %d", i);
2655 continue;
2657 opnd = &aarch64_operands[type];
2658 if (operand_has_inserter (opnd)
2659 && !aarch64_insert_operand (opnd, info, &inst->value, inst,
2660 mismatch_detail))
2661 return false;
2664 /* Call opcode encoders indicated by flags. */
2665 if (opcode_has_special_coder (opcode))
2666 do_special_encoding (inst);
2668 /* Possibly use the instruction class to encode the chosen qualifier
2669 variant. */
2670 aarch64_encode_variant_using_iclass (inst);
2672 /* Run a verifier if the instruction has one set. */
2673 if (opcode->verifier)
2675 enum err_type result = opcode->verifier (inst, *code, 0, true,
2676 mismatch_detail, insn_sequence);
2677 switch (result)
2679 case ERR_UND:
2680 case ERR_UNP:
2681 case ERR_NYI:
2682 return false;
2683 default:
2684 break;
2688 /* Always run constrain verifiers, this is needed because constrains need to
2689 maintain a global state. Regardless if the instruction has the flag set
2690 or not. */
2691 enum err_type result = verify_constraints (inst, *code, 0, true,
2692 mismatch_detail, insn_sequence);
2693 switch (result)
2695 case ERR_UND:
2696 case ERR_UNP:
2697 case ERR_NYI:
2698 return false;
2699 default:
2700 break;
2704 encoding_exit:
2705 DEBUG_TRACE ("exit with %s", opcode->name);
2707 *code = inst->value;
2709 return true;