1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
45 #define WARN_DEPRECATED 1
48 /* Must be at least the size of the largest unwind opcode (currently two). */
49 #define ARM_OPCODE_CHUNK_SIZE 8
51 /* This structure holds the unwinding state. */
56 symbolS
* table_entry
;
57 symbolS
* personality_routine
;
58 int personality_index
;
59 /* The segment containing the function. */
62 /* Opcodes generated from this function. */
63 unsigned char * opcodes
;
66 /* The number of bytes pushed to the stack. */
68 /* We don't add stack adjustment opcodes immediately so that we can merge
69 multiple adjustments. We can also omit the final adjustment
70 when using a frame pointer. */
71 offsetT pending_offset
;
72 /* These two fields are set by both unwind_movsp and unwind_setfp. They
73 hold the reg+offset to use when restoring sp from a frame pointer. */
76 /* Nonzero if an unwind_setfp directive has been seen. */
78 /* Nonzero if the last opcode restores sp from fp_reg. */
79 unsigned sp_restored
:1;
82 /* Bit N indicates that an R_ARM_NONE relocation has been output for
83 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
84 emitted only once per section, to save unnecessary bloat. */
85 static unsigned int marked_pr_dependency
= 0;
89 /* Results from operand parsing worker functions. */
93 PARSE_OPERAND_SUCCESS
,
95 PARSE_OPERAND_FAIL_NO_BACKTRACK
96 } parse_operand_result
;
101 ARM_FLOAT_ABI_SOFTFP
,
105 /* Types of processor to assemble for. */
107 #if defined __XSCALE__
108 #define CPU_DEFAULT ARM_ARCH_XSCALE
110 #if defined __thumb__
111 #define CPU_DEFAULT ARM_ARCH_V5T
118 # define FPU_DEFAULT FPU_ARCH_FPA
119 # elif defined (TE_NetBSD)
121 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
123 /* Legacy a.out format. */
124 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
126 # elif defined (TE_VXWORKS)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
129 /* For backwards compatibility, default to FPA. */
130 # define FPU_DEFAULT FPU_ARCH_FPA
132 #endif /* ifndef FPU_DEFAULT */
134 #define streq(a, b) (strcmp (a, b) == 0)
136 static arm_feature_set cpu_variant
;
137 static arm_feature_set arm_arch_used
;
138 static arm_feature_set thumb_arch_used
;
140 /* Flags stored in private area of BFD structure. */
141 static int uses_apcs_26
= FALSE
;
142 static int atpcs
= FALSE
;
143 static int support_interwork
= FALSE
;
144 static int uses_apcs_float
= FALSE
;
145 static int pic_code
= FALSE
;
147 /* Variables that we set while parsing command-line options. Once all
148 options have been read we re-process these values to set the real
150 static const arm_feature_set
*legacy_cpu
= NULL
;
151 static const arm_feature_set
*legacy_fpu
= NULL
;
153 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
154 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
155 static const arm_feature_set
*march_cpu_opt
= NULL
;
156 static const arm_feature_set
*march_fpu_opt
= NULL
;
157 static const arm_feature_set
*mfpu_opt
= NULL
;
158 static const arm_feature_set
*object_arch
= NULL
;
160 /* Constants for known architecture features. */
161 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
162 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
163 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
164 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
165 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
166 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
167 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
168 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
169 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
172 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
175 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
176 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
177 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
178 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
179 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
180 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
181 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
182 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
183 static const arm_feature_set arm_ext_v4t_5
=
184 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
185 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
186 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
187 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
188 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
189 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
190 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
191 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
192 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
193 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
194 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
195 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
196 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
197 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
198 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
200 static const arm_feature_set arm_arch_any
= ARM_ANY
;
201 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
203 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
205 static const arm_feature_set arm_cext_iwmmxt2
=
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
207 static const arm_feature_set arm_cext_iwmmxt
=
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
209 static const arm_feature_set arm_cext_xscale
=
210 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
211 static const arm_feature_set arm_cext_maverick
=
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
213 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
214 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
215 static const arm_feature_set fpu_vfp_ext_v1xd
=
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
217 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
218 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
219 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
220 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
221 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
222 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
224 static int mfloat_abi_opt
= -1;
225 /* Record user cpu selection for object attributes. */
226 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
227 /* Must be long enough to hold any of the names in arm_cpus. */
228 static char selected_cpu_name
[16];
231 static int meabi_flags
= EABI_DEFAULT
;
233 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
239 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS
* GOT_symbol
;
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
252 static int thumb_mode
= 0;
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
266 Important differences from the old Thumb mode:
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
277 static bfd_boolean unified_syntax
= FALSE
;
292 enum neon_el_type type
;
296 #define NEON_MAX_TYPE_ELS 4
300 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
307 unsigned long instruction
;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
315 struct neon_type vectype
;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
321 bfd_reloc_code_real_type type
;
330 struct neon_type_el vectype
;
331 unsigned present
: 1; /* Operand present. */
332 unsigned isreg
: 1; /* Operand was a register. */
333 unsigned immisreg
: 1; /* .imm field is a second register. */
334 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
336 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
337 instructions. This allows us to disambiguate ARM <-> vector insns. */
338 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
339 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
340 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
341 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
342 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
343 unsigned writeback
: 1; /* Operand has trailing ! */
344 unsigned preind
: 1; /* Preindexed address. */
345 unsigned postind
: 1; /* Postindexed address. */
346 unsigned negative
: 1; /* Index register was negated. */
347 unsigned shifted
: 1; /* Shift applied to operation. */
348 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
352 static struct arm_it inst
;
354 #define NUM_FLOAT_VALS 8
356 const char * fp_const
[] =
358 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
361 /* Number of littlenums required to hold an extended precision number. */
362 #define MAX_LITTLENUMS 6
364 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
374 #define CP_T_X 0x00008000
375 #define CP_T_Y 0x00400000
377 #define CONDS_BIT 0x00100000
378 #define LOAD_BIT 0x00100000
380 #define DOUBLE_LOAD_FLAG 0x00000001
384 const char * template;
388 #define COND_ALWAYS 0xE
392 const char *template;
396 struct asm_barrier_opt
398 const char *template;
402 /* The bit that distinguishes CPSR and SPSR. */
403 #define SPSR_BIT (1 << 22)
405 /* The individual PSR flag bits. */
406 #define PSR_c (1 << 16)
407 #define PSR_x (1 << 17)
408 #define PSR_s (1 << 18)
409 #define PSR_f (1 << 19)
414 bfd_reloc_code_real_type reloc
;
419 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
420 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
425 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
428 /* Bits for DEFINED field in neon_typed_alias. */
429 #define NTA_HASTYPE 1
430 #define NTA_HASINDEX 2
432 struct neon_typed_alias
434 unsigned char defined
;
436 struct neon_type_el eltype
;
439 /* ARM register categories. This includes coprocessor numbers and various
440 architecture extensions' registers. */
466 /* Structure for a hash table entry for a register.
467 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
468 information which states whether a vector type or index is specified (for a
469 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
473 unsigned char number
;
475 unsigned char builtin
;
476 struct neon_typed_alias
*neon
;
479 /* Diagnostics used when we don't get a register of the expected type. */
480 const char *const reg_expected_msgs
[] =
482 N_("ARM register expected"),
483 N_("bad or missing co-processor number"),
484 N_("co-processor register expected"),
485 N_("FPA register expected"),
486 N_("VFP single precision register expected"),
487 N_("VFP/Neon double precision register expected"),
488 N_("Neon quad precision register expected"),
489 N_("VFP single or double precision register expected"),
490 N_("Neon double or quad precision register expected"),
491 N_("VFP single, double or Neon quad precision register expected"),
492 N_("VFP system register expected"),
493 N_("Maverick MVF register expected"),
494 N_("Maverick MVD register expected"),
495 N_("Maverick MVFX register expected"),
496 N_("Maverick MVDX register expected"),
497 N_("Maverick MVAX register expected"),
498 N_("Maverick DSPSC register expected"),
499 N_("iWMMXt data register expected"),
500 N_("iWMMXt control register expected"),
501 N_("iWMMXt scalar register expected"),
502 N_("XScale accumulator register expected"),
505 /* Some well known registers that we refer to directly elsewhere. */
510 /* ARM instructions take 4bytes in the object file, Thumb instructions
516 /* Basic string to match. */
517 const char *template;
519 /* Parameters to instruction. */
520 unsigned char operands
[8];
522 /* Conditional tag - see opcode_lookup. */
523 unsigned int tag
: 4;
525 /* Basic instruction code. */
526 unsigned int avalue
: 28;
528 /* Thumb-format instruction code. */
531 /* Which architecture variant provides this instruction. */
532 const arm_feature_set
*avariant
;
533 const arm_feature_set
*tvariant
;
535 /* Function to call to encode instruction in ARM format. */
536 void (* aencode
) (void);
538 /* Function to call to encode instruction in Thumb format. */
539 void (* tencode
) (void);
542 /* Defines for various bits that we will want to toggle. */
543 #define INST_IMMEDIATE 0x02000000
544 #define OFFSET_REG 0x02000000
545 #define HWOFFSET_IMM 0x00400000
546 #define SHIFT_BY_REG 0x00000010
547 #define PRE_INDEX 0x01000000
548 #define INDEX_UP 0x00800000
549 #define WRITE_BACK 0x00200000
550 #define LDM_TYPE_2_OR_3 0x00400000
551 #define CPSI_MMOD 0x00020000
553 #define LITERAL_MASK 0xf000f000
554 #define OPCODE_MASK 0xfe1fffff
555 #define V4_STR_BIT 0x00000020
557 #define DATA_OP_SHIFT 21
559 #define T2_OPCODE_MASK 0xfe1fffff
560 #define T2_DATA_OP_SHIFT 21
562 /* Codes to distinguish the arithmetic instructions. */
573 #define OPCODE_CMP 10
574 #define OPCODE_CMN 11
575 #define OPCODE_ORR 12
576 #define OPCODE_MOV 13
577 #define OPCODE_BIC 14
578 #define OPCODE_MVN 15
580 #define T2_OPCODE_AND 0
581 #define T2_OPCODE_BIC 1
582 #define T2_OPCODE_ORR 2
583 #define T2_OPCODE_ORN 3
584 #define T2_OPCODE_EOR 4
585 #define T2_OPCODE_ADD 8
586 #define T2_OPCODE_ADC 10
587 #define T2_OPCODE_SBC 11
588 #define T2_OPCODE_SUB 13
589 #define T2_OPCODE_RSB 14
591 #define T_OPCODE_MUL 0x4340
592 #define T_OPCODE_TST 0x4200
593 #define T_OPCODE_CMN 0x42c0
594 #define T_OPCODE_NEG 0x4240
595 #define T_OPCODE_MVN 0x43c0
597 #define T_OPCODE_ADD_R3 0x1800
598 #define T_OPCODE_SUB_R3 0x1a00
599 #define T_OPCODE_ADD_HI 0x4400
600 #define T_OPCODE_ADD_ST 0xb000
601 #define T_OPCODE_SUB_ST 0xb080
602 #define T_OPCODE_ADD_SP 0xa800
603 #define T_OPCODE_ADD_PC 0xa000
604 #define T_OPCODE_ADD_I8 0x3000
605 #define T_OPCODE_SUB_I8 0x3800
606 #define T_OPCODE_ADD_I3 0x1c00
607 #define T_OPCODE_SUB_I3 0x1e00
609 #define T_OPCODE_ASR_R 0x4100
610 #define T_OPCODE_LSL_R 0x4080
611 #define T_OPCODE_LSR_R 0x40c0
612 #define T_OPCODE_ROR_R 0x41c0
613 #define T_OPCODE_ASR_I 0x1000
614 #define T_OPCODE_LSL_I 0x0000
615 #define T_OPCODE_LSR_I 0x0800
617 #define T_OPCODE_MOV_I8 0x2000
618 #define T_OPCODE_CMP_I8 0x2800
619 #define T_OPCODE_CMP_LR 0x4280
620 #define T_OPCODE_MOV_HR 0x4600
621 #define T_OPCODE_CMP_HR 0x4500
623 #define T_OPCODE_LDR_PC 0x4800
624 #define T_OPCODE_LDR_SP 0x9800
625 #define T_OPCODE_STR_SP 0x9000
626 #define T_OPCODE_LDR_IW 0x6800
627 #define T_OPCODE_STR_IW 0x6000
628 #define T_OPCODE_LDR_IH 0x8800
629 #define T_OPCODE_STR_IH 0x8000
630 #define T_OPCODE_LDR_IB 0x7800
631 #define T_OPCODE_STR_IB 0x7000
632 #define T_OPCODE_LDR_RW 0x5800
633 #define T_OPCODE_STR_RW 0x5000
634 #define T_OPCODE_LDR_RH 0x5a00
635 #define T_OPCODE_STR_RH 0x5200
636 #define T_OPCODE_LDR_RB 0x5c00
637 #define T_OPCODE_STR_RB 0x5400
639 #define T_OPCODE_PUSH 0xb400
640 #define T_OPCODE_POP 0xbc00
642 #define T_OPCODE_BRANCH 0xe000
644 #define THUMB_SIZE 2 /* Size of thumb instruction. */
645 #define THUMB_PP_PC_LR 0x0100
646 #define THUMB_LOAD_BIT 0x0800
647 #define THUMB2_LOAD_BIT 0x00100000
649 #define BAD_ARGS _("bad arguments to instruction")
650 #define BAD_PC _("r15 not allowed here")
651 #define BAD_COND _("instruction cannot be conditional")
652 #define BAD_OVERLAP _("registers may not be the same")
653 #define BAD_HIREG _("lo register required")
654 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
655 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
656 #define BAD_BRANCH _("branch must be last instruction in IT block")
657 #define BAD_NOT_IT _("instruction not allowed in IT block")
658 #define BAD_FPU _("selected FPU does not support instruction")
660 static struct hash_control
*arm_ops_hsh
;
661 static struct hash_control
*arm_cond_hsh
;
662 static struct hash_control
*arm_shift_hsh
;
663 static struct hash_control
*arm_psr_hsh
;
664 static struct hash_control
*arm_v7m_psr_hsh
;
665 static struct hash_control
*arm_reg_hsh
;
666 static struct hash_control
*arm_reloc_hsh
;
667 static struct hash_control
*arm_barrier_opt_hsh
;
669 /* Stuff needed to resolve the label ambiguity
679 symbolS
* last_label_seen
;
680 static int label_is_thumb_function_name
= FALSE
;
682 /* Literal pool structure. Held on a per-section
683 and per-sub-section basis. */
685 #define MAX_LITERAL_POOL_SIZE 1024
686 typedef struct literal_pool
688 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
689 unsigned int next_free_entry
;
694 struct literal_pool
* next
;
697 /* Pointer to a linked list of literal pools. */
698 literal_pool
* list_of_pools
= NULL
;
700 /* State variables for IT block handling. */
701 static bfd_boolean current_it_mask
= 0;
702 static int current_cc
;
707 /* This array holds the chars that always start a comment. If the
708 pre-processor is disabled, these aren't very useful. */
709 const char comment_chars
[] = "@";
711 /* This array holds the chars that only start a comment at the beginning of
712 a line. If the line seems to have the form '# 123 filename'
713 .line and .file directives will appear in the pre-processed output. */
714 /* Note that input_file.c hand checks for '#' at the beginning of the
715 first line of the input file. This is because the compiler outputs
716 #NO_APP at the beginning of its output. */
717 /* Also note that comments like this one will always work. */
718 const char line_comment_chars
[] = "#";
720 const char line_separator_chars
[] = ";";
722 /* Chars that can be used to separate mant
723 from exp in floating point numbers. */
724 const char EXP_CHARS
[] = "eE";
726 /* Chars that mean this number is a floating point constant. */
730 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
732 /* Prefix characters that indicate the start of an immediate
734 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
736 /* Separator character handling. */
738 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
741 skip_past_char (char ** str
, char c
)
751 #define skip_past_comma(str) skip_past_char (str, ',')
753 /* Arithmetic expressions (possibly involving symbols). */
755 /* Return TRUE if anything in the expression is a bignum. */
758 walk_no_bignums (symbolS
* sp
)
760 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
763 if (symbol_get_value_expression (sp
)->X_add_symbol
)
765 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
766 || (symbol_get_value_expression (sp
)->X_op_symbol
767 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
773 static int in_my_get_expression
= 0;
775 /* Third argument to my_get_expression. */
776 #define GE_NO_PREFIX 0
777 #define GE_IMM_PREFIX 1
778 #define GE_OPT_PREFIX 2
779 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
780 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
781 #define GE_OPT_PREFIX_BIG 3
784 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
789 /* In unified syntax, all prefixes are optional. */
791 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
796 case GE_NO_PREFIX
: break;
798 if (!is_immediate_prefix (**str
))
800 inst
.error
= _("immediate expression requires a # prefix");
806 case GE_OPT_PREFIX_BIG
:
807 if (is_immediate_prefix (**str
))
813 memset (ep
, 0, sizeof (expressionS
));
815 save_in
= input_line_pointer
;
816 input_line_pointer
= *str
;
817 in_my_get_expression
= 1;
818 seg
= expression (ep
);
819 in_my_get_expression
= 0;
821 if (ep
->X_op
== O_illegal
)
823 /* We found a bad expression in md_operand(). */
824 *str
= input_line_pointer
;
825 input_line_pointer
= save_in
;
826 if (inst
.error
== NULL
)
827 inst
.error
= _("bad expression");
832 if (seg
!= absolute_section
833 && seg
!= text_section
834 && seg
!= data_section
835 && seg
!= bss_section
836 && seg
!= undefined_section
)
838 inst
.error
= _("bad segment");
839 *str
= input_line_pointer
;
840 input_line_pointer
= save_in
;
845 /* Get rid of any bignums now, so that we don't generate an error for which
846 we can't establish a line number later on. Big numbers are never valid
847 in instructions, which is where this routine is always called. */
848 if (prefix_mode
!= GE_OPT_PREFIX_BIG
849 && (ep
->X_op
== O_big
851 && (walk_no_bignums (ep
->X_add_symbol
)
853 && walk_no_bignums (ep
->X_op_symbol
))))))
855 inst
.error
= _("invalid constant");
856 *str
= input_line_pointer
;
857 input_line_pointer
= save_in
;
861 *str
= input_line_pointer
;
862 input_line_pointer
= save_in
;
866 /* Turn a string in input_line_pointer into a floating point constant
867 of type TYPE, and store the appropriate bytes in *LITP. The number
868 of LITTLENUMS emitted is stored in *SIZEP. An error message is
869 returned, or NULL on OK.
871 Note that fp constants aren't represent in the normal way on the ARM.
872 In big endian mode, things are as expected. However, in little endian
873 mode fp constants are big-endian word-wise, and little-endian byte-wise
874 within the words. For example, (double) 1.1 in big endian mode is
875 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
876 the byte sequence 99 99 f1 3f 9a 99 99 99.
878 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
881 md_atof (int type
, char * litP
, int * sizeP
)
884 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
916 return _("bad call to MD_ATOF()");
919 t
= atof_ieee (input_line_pointer
, type
, words
);
921 input_line_pointer
= t
;
924 if (target_big_endian
)
926 for (i
= 0; i
< prec
; i
++)
928 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
934 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
935 for (i
= prec
- 1; i
>= 0; i
--)
937 md_number_to_chars (litP
, (valueT
) words
[i
], 2);
941 /* For a 4 byte float the order of elements in `words' is 1 0.
942 For an 8 byte float the order is 1 0 3 2. */
943 for (i
= 0; i
< prec
; i
+= 2)
945 md_number_to_chars (litP
, (valueT
) words
[i
+ 1], 2);
946 md_number_to_chars (litP
+ 2, (valueT
) words
[i
], 2);
954 /* We handle all bad expressions here, so that we can report the faulty
955 instruction in the error message. */
957 md_operand (expressionS
* expr
)
959 if (in_my_get_expression
)
960 expr
->X_op
= O_illegal
;
963 /* Immediate values. */
965 /* Generic immediate-value read function for use in directives.
966 Accepts anything that 'expression' can fold to a constant.
967 *val receives the number. */
970 immediate_for_directive (int *val
)
973 exp
.X_op
= O_illegal
;
975 if (is_immediate_prefix (*input_line_pointer
))
977 input_line_pointer
++;
981 if (exp
.X_op
!= O_constant
)
983 as_bad (_("expected #constant"));
984 ignore_rest_of_line ();
987 *val
= exp
.X_add_number
;
992 /* Register parsing. */
994 /* Generic register parser. CCP points to what should be the
995 beginning of a register name. If it is indeed a valid register
996 name, advance CCP over it and return the reg_entry structure;
997 otherwise return NULL. Does not issue diagnostics. */
999 static struct reg_entry
*
1000 arm_reg_parse_multi (char **ccp
)
1004 struct reg_entry
*reg
;
1006 #ifdef REGISTER_PREFIX
1007 if (*start
!= REGISTER_PREFIX
)
1011 #ifdef OPTIONAL_REGISTER_PREFIX
1012 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1017 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1022 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1024 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1034 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1035 enum arm_reg_type type
)
1037 /* Alternative syntaxes are accepted for a few register classes. */
1044 /* Generic coprocessor register names are allowed for these. */
1045 if (reg
&& reg
->type
== REG_TYPE_CN
)
1050 /* For backward compatibility, a bare number is valid here. */
1052 unsigned long processor
= strtoul (start
, ccp
, 10);
1053 if (*ccp
!= start
&& processor
<= 15)
1057 case REG_TYPE_MMXWC
:
1058 /* WC includes WCG. ??? I'm not sure this is true for all
1059 instructions that take WC registers. */
1060 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1071 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1072 return value is the register number or FAIL. */
1075 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1078 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1081 /* Do not allow a scalar (reg+index) to parse as a register. */
1082 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1085 if (reg
&& reg
->type
== type
)
1088 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1095 /* Parse a Neon type specifier. *STR should point at the leading '.'
1096 character. Does no verification at this stage that the type fits the opcode
1103 Can all be legally parsed by this function.
1105 Fills in neon_type struct pointer with parsed information, and updates STR
1106 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1107 type, FAIL if not. */
1110 parse_neon_type (struct neon_type
*type
, char **str
)
1117 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1119 enum neon_el_type thistype
= NT_untyped
;
1120 unsigned thissize
= -1u;
1127 /* Just a size without an explicit type. */
1131 switch (TOLOWER (*ptr
))
1133 case 'i': thistype
= NT_integer
; break;
1134 case 'f': thistype
= NT_float
; break;
1135 case 'p': thistype
= NT_poly
; break;
1136 case 's': thistype
= NT_signed
; break;
1137 case 'u': thistype
= NT_unsigned
; break;
1139 thistype
= NT_float
;
1144 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1150 /* .f is an abbreviation for .f32. */
1151 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1156 thissize
= strtoul (ptr
, &ptr
, 10);
1158 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1161 as_bad (_("bad size %d in type specifier"), thissize
);
1169 type
->el
[type
->elems
].type
= thistype
;
1170 type
->el
[type
->elems
].size
= thissize
;
1175 /* Empty/missing type is not a successful parse. */
1176 if (type
->elems
== 0)
1184 /* Errors may be set multiple times during parsing or bit encoding
1185 (particularly in the Neon bits), but usually the earliest error which is set
1186 will be the most meaningful. Avoid overwriting it with later (cascading)
1187 errors by calling this function. */
1190 first_error (const char *err
)
1196 /* Parse a single type, e.g. ".s32", leading period included. */
1198 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1201 struct neon_type optype
;
1205 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1207 if (optype
.elems
== 1)
1208 *vectype
= optype
.el
[0];
1211 first_error (_("only one type should be specified for operand"));
1217 first_error (_("vector type expected"));
1229 /* Special meanings for indices (which have a range of 0-7), which will fit into
1232 #define NEON_ALL_LANES 15
1233 #define NEON_INTERLEAVE_LANES 14
1235 /* Parse either a register or a scalar, with an optional type. Return the
1236 register number, and optionally fill in the actual type of the register
1237 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1238 type/index information in *TYPEINFO. */
1241 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1242 enum arm_reg_type
*rtype
,
1243 struct neon_typed_alias
*typeinfo
)
1246 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1247 struct neon_typed_alias atype
;
1248 struct neon_type_el parsetype
;
1252 atype
.eltype
.type
= NT_invtype
;
1253 atype
.eltype
.size
= -1;
1255 /* Try alternate syntax for some types of register. Note these are mutually
1256 exclusive with the Neon syntax extensions. */
1259 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1267 /* Undo polymorphism when a set of register types may be accepted. */
1268 if ((type
== REG_TYPE_NDQ
1269 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1270 || (type
== REG_TYPE_VFSD
1271 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1272 || (type
== REG_TYPE_NSDQ
1273 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1274 || reg
->type
== REG_TYPE_NQ
))
1275 || (type
== REG_TYPE_MMXWC
1276 && (reg
->type
== REG_TYPE_MMXWCG
)))
1279 if (type
!= reg
->type
)
1285 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1287 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1289 first_error (_("can't redefine type for operand"));
1292 atype
.defined
|= NTA_HASTYPE
;
1293 atype
.eltype
= parsetype
;
1296 if (skip_past_char (&str
, '[') == SUCCESS
)
1298 if (type
!= REG_TYPE_VFD
)
1300 first_error (_("only D registers may be indexed"));
1304 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1306 first_error (_("can't change index for operand"));
1310 atype
.defined
|= NTA_HASINDEX
;
1312 if (skip_past_char (&str
, ']') == SUCCESS
)
1313 atype
.index
= NEON_ALL_LANES
;
1318 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1320 if (exp
.X_op
!= O_constant
)
1322 first_error (_("constant expression required"));
1326 if (skip_past_char (&str
, ']') == FAIL
)
1329 atype
.index
= exp
.X_add_number
;
1344 /* Like arm_reg_parse, but allow allow the following extra features:
1345 - If RTYPE is non-zero, return the (possibly restricted) type of the
1346 register (e.g. Neon double or quad reg when either has been requested).
1347 - If this is a Neon vector type with additional type information, fill
1348 in the struct pointed to by VECTYPE (if non-NULL).
1349 This function will fault on encountering a scalar.
1353 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1354 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1356 struct neon_typed_alias atype
;
1358 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1363 /* Do not allow a scalar (reg+index) to parse as a register. */
1364 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1366 first_error (_("register operand expected, but got scalar"));
1371 *vectype
= atype
.eltype
;
1378 #define NEON_SCALAR_REG(X) ((X) >> 4)
1379 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1381 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1382 have enough information to be able to do a good job bounds-checking. So, we
1383 just do easy checks here, and do further checks later. */
1386 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1390 struct neon_typed_alias atype
;
1392 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1394 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1397 if (atype
.index
== NEON_ALL_LANES
)
1399 first_error (_("scalar must have an index"));
1402 else if (atype
.index
>= 64 / elsize
)
1404 first_error (_("scalar index out of range"));
1409 *type
= atype
.eltype
;
1413 return reg
* 16 + atype
.index
;
1416 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1418 parse_reg_list (char ** strp
)
1420 char * str
= * strp
;
1424 /* We come back here if we get ranges concatenated by '+' or '|'. */
1439 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1441 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1451 first_error (_("bad range in register list"));
1455 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1457 if (range
& (1 << i
))
1459 (_("Warning: duplicated register (r%d) in register list"),
1467 if (range
& (1 << reg
))
1468 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1470 else if (reg
<= cur_reg
)
1471 as_tsktsk (_("Warning: register range not in ascending order"));
1476 while (skip_past_comma (&str
) != FAIL
1477 || (in_range
= 1, *str
++ == '-'));
1482 first_error (_("missing `}'"));
1490 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1493 if (expr
.X_op
== O_constant
)
1495 if (expr
.X_add_number
1496 != (expr
.X_add_number
& 0x0000ffff))
1498 inst
.error
= _("invalid register mask");
1502 if ((range
& expr
.X_add_number
) != 0)
1504 int regno
= range
& expr
.X_add_number
;
1507 regno
= (1 << regno
) - 1;
1509 (_("Warning: duplicated register (r%d) in register list"),
1513 range
|= expr
.X_add_number
;
1517 if (inst
.reloc
.type
!= 0)
1519 inst
.error
= _("expression too complex");
1523 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1524 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1525 inst
.reloc
.pc_rel
= 0;
1529 if (*str
== '|' || *str
== '+')
1535 while (another_range
);
1541 /* Types of registers in a list. */
1550 /* Parse a VFP register list. If the string is invalid return FAIL.
1551 Otherwise return the number of registers, and set PBASE to the first
1552 register. Parses registers of type ETYPE.
1553 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1554 - Q registers can be used to specify pairs of D registers
1555 - { } can be omitted from around a singleton register list
1556 FIXME: This is not implemented, as it would require backtracking in
1559 This could be done (the meaning isn't really ambiguous), but doesn't
1560 fit in well with the current parsing framework.
1561 - 32 D registers may be used (also true for VFPv3).
1562 FIXME: Types are ignored in these register lists, which is probably a
1566 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1571 enum arm_reg_type regtype
= 0;
1575 unsigned long mask
= 0;
1580 inst
.error
= _("expecting {");
1589 regtype
= REG_TYPE_VFS
;
1594 regtype
= REG_TYPE_VFD
;
1597 case REGLIST_NEON_D
:
1598 regtype
= REG_TYPE_NDQ
;
1602 if (etype
!= REGLIST_VFP_S
)
1604 /* VFPv3 allows 32 D registers. */
1605 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
1609 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1612 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1619 base_reg
= max_regs
;
1623 int setmask
= 1, addregs
= 1;
1625 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1627 if (new_base
== FAIL
)
1629 first_error (_(reg_expected_msgs
[regtype
]));
1633 if (new_base
>= max_regs
)
1635 first_error (_("register out of range in list"));
1639 /* Note: a value of 2 * n is returned for the register Q<n>. */
1640 if (regtype
== REG_TYPE_NQ
)
1646 if (new_base
< base_reg
)
1647 base_reg
= new_base
;
1649 if (mask
& (setmask
<< new_base
))
1651 first_error (_("invalid register list"));
1655 if ((mask
>> new_base
) != 0 && ! warned
)
1657 as_tsktsk (_("register list not in ascending order"));
1661 mask
|= setmask
<< new_base
;
1664 if (*str
== '-') /* We have the start of a range expression */
1670 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1673 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1677 if (high_range
>= max_regs
)
1679 first_error (_("register out of range in list"));
1683 if (regtype
== REG_TYPE_NQ
)
1684 high_range
= high_range
+ 1;
1686 if (high_range
<= new_base
)
1688 inst
.error
= _("register range not in ascending order");
1692 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1694 if (mask
& (setmask
<< new_base
))
1696 inst
.error
= _("invalid register list");
1700 mask
|= setmask
<< new_base
;
1705 while (skip_past_comma (&str
) != FAIL
);
1709 /* Sanity check -- should have raised a parse error above. */
1710 if (count
== 0 || count
> max_regs
)
1715 /* Final test -- the registers must be consecutive. */
1717 for (i
= 0; i
< count
; i
++)
1719 if ((mask
& (1u << i
)) == 0)
1721 inst
.error
= _("non-contiguous register range");
1731 /* True if two alias types are the same. */
1734 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1742 if (a
->defined
!= b
->defined
)
1745 if ((a
->defined
& NTA_HASTYPE
) != 0
1746 && (a
->eltype
.type
!= b
->eltype
.type
1747 || a
->eltype
.size
!= b
->eltype
.size
))
1750 if ((a
->defined
& NTA_HASINDEX
) != 0
1751 && (a
->index
!= b
->index
))
1757 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1758 The base register is put in *PBASE.
1759 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1761 The register stride (minus one) is put in bit 4 of the return value.
1762 Bits [6:5] encode the list length (minus one).
1763 The type of the list elements is put in *ELTYPE, if non-NULL. */
1765 #define NEON_LANE(X) ((X) & 0xf)
1766 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1767 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1770 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1771 struct neon_type_el
*eltype
)
1778 int leading_brace
= 0;
1779 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1781 const char *const incr_error
= "register stride must be 1 or 2";
1782 const char *const type_error
= "mismatched element/structure types in list";
1783 struct neon_typed_alias firsttype
;
1785 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1790 struct neon_typed_alias atype
;
1791 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1795 first_error (_(reg_expected_msgs
[rtype
]));
1802 if (rtype
== REG_TYPE_NQ
)
1809 else if (reg_incr
== -1)
1811 reg_incr
= getreg
- base_reg
;
1812 if (reg_incr
< 1 || reg_incr
> 2)
1814 first_error (_(incr_error
));
1818 else if (getreg
!= base_reg
+ reg_incr
* count
)
1820 first_error (_(incr_error
));
1824 if (!neon_alias_types_same (&atype
, &firsttype
))
1826 first_error (_(type_error
));
1830 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1834 struct neon_typed_alias htype
;
1835 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1837 lane
= NEON_INTERLEAVE_LANES
;
1838 else if (lane
!= NEON_INTERLEAVE_LANES
)
1840 first_error (_(type_error
));
1845 else if (reg_incr
!= 1)
1847 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1851 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1854 first_error (_(reg_expected_msgs
[rtype
]));
1857 if (!neon_alias_types_same (&htype
, &firsttype
))
1859 first_error (_(type_error
));
1862 count
+= hireg
+ dregs
- getreg
;
1866 /* If we're using Q registers, we can't use [] or [n] syntax. */
1867 if (rtype
== REG_TYPE_NQ
)
1873 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1877 else if (lane
!= atype
.index
)
1879 first_error (_(type_error
));
1883 else if (lane
== -1)
1884 lane
= NEON_INTERLEAVE_LANES
;
1885 else if (lane
!= NEON_INTERLEAVE_LANES
)
1887 first_error (_(type_error
));
1892 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1894 /* No lane set by [x]. We must be interleaving structures. */
1896 lane
= NEON_INTERLEAVE_LANES
;
1899 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1900 || (count
> 1 && reg_incr
== -1))
1902 first_error (_("error parsing element/structure list"));
1906 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1908 first_error (_("expected }"));
1916 *eltype
= firsttype
.eltype
;
1921 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1924 /* Parse an explicit relocation suffix on an expression. This is
1925 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1926 arm_reloc_hsh contains no entries, so this function can only
1927 succeed if there is no () after the word. Returns -1 on error,
1928 BFD_RELOC_UNUSED if there wasn't any suffix. */
1930 parse_reloc (char **str
)
1932 struct reloc_entry
*r
;
1936 return BFD_RELOC_UNUSED
;
1941 while (*q
&& *q
!= ')' && *q
!= ',')
1946 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1953 /* Directives: register aliases. */
1955 static struct reg_entry
*
1956 insert_reg_alias (char *str
, int number
, int type
)
1958 struct reg_entry
*new;
1961 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1964 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1966 /* Only warn about a redefinition if it's not defined as the
1968 else if (new->number
!= number
|| new->type
!= type
)
1969 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1974 name
= xstrdup (str
);
1975 new = xmalloc (sizeof (struct reg_entry
));
1978 new->number
= number
;
1980 new->builtin
= FALSE
;
1983 if (hash_insert (arm_reg_hsh
, name
, (PTR
) new))
1990 insert_neon_reg_alias (char *str
, int number
, int type
,
1991 struct neon_typed_alias
*atype
)
1993 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
1997 first_error (_("attempt to redefine typed alias"));
2003 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
2004 *reg
->neon
= *atype
;
2008 /* Look for the .req directive. This is of the form:
2010 new_register_name .req existing_register_name
2012 If we find one, or if it looks sufficiently like one that we want to
2013 handle any error here, return non-zero. Otherwise return zero. */
2016 create_register_alias (char * newname
, char *p
)
2018 struct reg_entry
*old
;
2019 char *oldname
, *nbuf
;
2022 /* The input scrubber ensures that whitespace after the mnemonic is
2023 collapsed to single spaces. */
2025 if (strncmp (oldname
, " .req ", 6) != 0)
2029 if (*oldname
== '\0')
2032 old
= hash_find (arm_reg_hsh
, oldname
);
2035 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2039 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2040 the desired alias name, and p points to its end. If not, then
2041 the desired alias name is in the global original_case_string. */
2042 #ifdef TC_CASE_SENSITIVE
2045 newname
= original_case_string
;
2046 nlen
= strlen (newname
);
2049 nbuf
= alloca (nlen
+ 1);
2050 memcpy (nbuf
, newname
, nlen
);
2053 /* Create aliases under the new name as stated; an all-lowercase
2054 version of the new name; and an all-uppercase version of the new
2056 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2058 for (p
= nbuf
; *p
; p
++)
2061 if (strncmp (nbuf
, newname
, nlen
))
2062 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2064 for (p
= nbuf
; *p
; p
++)
2067 if (strncmp (nbuf
, newname
, nlen
))
2068 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2073 /* Create a Neon typed/indexed register alias using directives, e.g.:
2078 These typed registers can be used instead of the types specified after the
2079 Neon mnemonic, so long as all operands given have types. Types can also be
2080 specified directly, e.g.:
2081 vadd d0.s32, d1.s32, d2.s32
2085 create_neon_reg_alias (char *newname
, char *p
)
2087 enum arm_reg_type basetype
;
2088 struct reg_entry
*basereg
;
2089 struct reg_entry mybasereg
;
2090 struct neon_type ntype
;
2091 struct neon_typed_alias typeinfo
;
2092 char *namebuf
, *nameend
;
2095 typeinfo
.defined
= 0;
2096 typeinfo
.eltype
.type
= NT_invtype
;
2097 typeinfo
.eltype
.size
= -1;
2098 typeinfo
.index
= -1;
2102 if (strncmp (p
, " .dn ", 5) == 0)
2103 basetype
= REG_TYPE_VFD
;
2104 else if (strncmp (p
, " .qn ", 5) == 0)
2105 basetype
= REG_TYPE_NQ
;
2114 basereg
= arm_reg_parse_multi (&p
);
2116 if (basereg
&& basereg
->type
!= basetype
)
2118 as_bad (_("bad type for register"));
2122 if (basereg
== NULL
)
2125 /* Try parsing as an integer. */
2126 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2127 if (exp
.X_op
!= O_constant
)
2129 as_bad (_("expression must be constant"));
2132 basereg
= &mybasereg
;
2133 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2139 typeinfo
= *basereg
->neon
;
2141 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2143 /* We got a type. */
2144 if (typeinfo
.defined
& NTA_HASTYPE
)
2146 as_bad (_("can't redefine the type of a register alias"));
2150 typeinfo
.defined
|= NTA_HASTYPE
;
2151 if (ntype
.elems
!= 1)
2153 as_bad (_("you must specify a single type only"));
2156 typeinfo
.eltype
= ntype
.el
[0];
2159 if (skip_past_char (&p
, '[') == SUCCESS
)
2162 /* We got a scalar index. */
2164 if (typeinfo
.defined
& NTA_HASINDEX
)
2166 as_bad (_("can't redefine the index of a scalar alias"));
2170 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2172 if (exp
.X_op
!= O_constant
)
2174 as_bad (_("scalar index must be constant"));
2178 typeinfo
.defined
|= NTA_HASINDEX
;
2179 typeinfo
.index
= exp
.X_add_number
;
2181 if (skip_past_char (&p
, ']') == FAIL
)
2183 as_bad (_("expecting ]"));
2188 namelen
= nameend
- newname
;
2189 namebuf
= alloca (namelen
+ 1);
2190 strncpy (namebuf
, newname
, namelen
);
2191 namebuf
[namelen
] = '\0';
2193 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2194 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2196 /* Insert name in all uppercase. */
2197 for (p
= namebuf
; *p
; p
++)
2200 if (strncmp (namebuf
, newname
, namelen
))
2201 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2202 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2204 /* Insert name in all lowercase. */
2205 for (p
= namebuf
; *p
; p
++)
2208 if (strncmp (namebuf
, newname
, namelen
))
2209 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2210 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2215 /* Should never be called, as .req goes between the alias and the
2216 register name, not at the beginning of the line. */
2218 s_req (int a ATTRIBUTE_UNUSED
)
2220 as_bad (_("invalid syntax for .req directive"));
2224 s_dn (int a ATTRIBUTE_UNUSED
)
2226 as_bad (_("invalid syntax for .dn directive"));
2230 s_qn (int a ATTRIBUTE_UNUSED
)
2232 as_bad (_("invalid syntax for .qn directive"));
2235 /* The .unreq directive deletes an alias which was previously defined
2236 by .req. For example:
2242 s_unreq (int a ATTRIBUTE_UNUSED
)
2247 name
= input_line_pointer
;
2249 while (*input_line_pointer
!= 0
2250 && *input_line_pointer
!= ' '
2251 && *input_line_pointer
!= '\n')
2252 ++input_line_pointer
;
2254 saved_char
= *input_line_pointer
;
2255 *input_line_pointer
= 0;
2258 as_bad (_("invalid syntax for .unreq directive"));
2261 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2264 as_bad (_("unknown register alias '%s'"), name
);
2265 else if (reg
->builtin
)
2266 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2270 hash_delete (arm_reg_hsh
, name
);
2271 free ((char *) reg
->name
);
2278 *input_line_pointer
= saved_char
;
2279 demand_empty_rest_of_line ();
2282 /* Directives: Instruction set selection. */
2285 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2286 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2287 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2288 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2290 static enum mstate mapstate
= MAP_UNDEFINED
;
2293 mapping_state (enum mstate state
)
2296 const char * symname
;
2299 if (mapstate
== state
)
2300 /* The mapping symbol has already been emitted.
2301 There is nothing else to do. */
2310 type
= BSF_NO_FLAGS
;
2314 type
= BSF_NO_FLAGS
;
2318 type
= BSF_NO_FLAGS
;
2326 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2328 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2329 symbol_table_insert (symbolP
);
2330 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2335 THUMB_SET_FUNC (symbolP
, 0);
2336 ARM_SET_THUMB (symbolP
, 0);
2337 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2341 THUMB_SET_FUNC (symbolP
, 1);
2342 ARM_SET_THUMB (symbolP
, 1);
2343 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2352 #define mapping_state(x) /* nothing */
2355 /* Find the real, Thumb encoded start of a Thumb function. */
2358 find_real_start (symbolS
* symbolP
)
2361 const char * name
= S_GET_NAME (symbolP
);
2362 symbolS
* new_target
;
2364 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2365 #define STUB_NAME ".real_start_of"
2370 /* The compiler may generate BL instructions to local labels because
2371 it needs to perform a branch to a far away location. These labels
2372 do not have a corresponding ".real_start_of" label. We check
2373 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2374 the ".real_start_of" convention for nonlocal branches. */
2375 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2378 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2379 new_target
= symbol_find (real_start
);
2381 if (new_target
== NULL
)
2383 as_warn ("Failed to find real start of function: %s\n", name
);
2384 new_target
= symbolP
;
2391 opcode_select (int width
)
2398 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2399 as_bad (_("selected processor does not support THUMB opcodes"));
2402 /* No need to force the alignment, since we will have been
2403 coming from ARM mode, which is word-aligned. */
2404 record_alignment (now_seg
, 1);
2406 mapping_state (MAP_THUMB
);
2412 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2413 as_bad (_("selected processor does not support ARM opcodes"));
2418 frag_align (2, 0, 0);
2420 record_alignment (now_seg
, 1);
2422 mapping_state (MAP_ARM
);
2426 as_bad (_("invalid instruction size selected (%d)"), width
);
2431 s_arm (int ignore ATTRIBUTE_UNUSED
)
2434 demand_empty_rest_of_line ();
2438 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2441 demand_empty_rest_of_line ();
2445 s_code (int unused ATTRIBUTE_UNUSED
)
2449 temp
= get_absolute_expression ();
2454 opcode_select (temp
);
2458 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2463 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2465 /* If we are not already in thumb mode go into it, EVEN if
2466 the target processor does not support thumb instructions.
2467 This is used by gcc/config/arm/lib1funcs.asm for example
2468 to compile interworking support functions even if the
2469 target processor should not support interworking. */
2473 record_alignment (now_seg
, 1);
2476 demand_empty_rest_of_line ();
2480 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2484 /* The following label is the name/address of the start of a Thumb function.
2485 We need to know this for the interworking support. */
2486 label_is_thumb_function_name
= TRUE
;
2489 /* Perform a .set directive, but also mark the alias as
2490 being a thumb function. */
2493 s_thumb_set (int equiv
)
2495 /* XXX the following is a duplicate of the code for s_set() in read.c
2496 We cannot just call that code as we need to get at the symbol that
2503 /* Especial apologies for the random logic:
2504 This just grew, and could be parsed much more simply!
2506 name
= input_line_pointer
;
2507 delim
= get_symbol_end ();
2508 end_name
= input_line_pointer
;
2511 if (*input_line_pointer
!= ',')
2514 as_bad (_("expected comma after name \"%s\""), name
);
2516 ignore_rest_of_line ();
2520 input_line_pointer
++;
2523 if (name
[0] == '.' && name
[1] == '\0')
2525 /* XXX - this should not happen to .thumb_set. */
2529 if ((symbolP
= symbol_find (name
)) == NULL
2530 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2533 /* When doing symbol listings, play games with dummy fragments living
2534 outside the normal fragment chain to record the file and line info
2536 if (listing
& LISTING_SYMBOLS
)
2538 extern struct list_info_struct
* listing_tail
;
2539 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2541 memset (dummy_frag
, 0, sizeof (fragS
));
2542 dummy_frag
->fr_type
= rs_fill
;
2543 dummy_frag
->line
= listing_tail
;
2544 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2545 dummy_frag
->fr_symbol
= symbolP
;
2549 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2552 /* "set" symbols are local unless otherwise specified. */
2553 SF_SET_LOCAL (symbolP
);
2554 #endif /* OBJ_COFF */
2555 } /* Make a new symbol. */
2557 symbol_table_insert (symbolP
);
2562 && S_IS_DEFINED (symbolP
)
2563 && S_GET_SEGMENT (symbolP
) != reg_section
)
2564 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2566 pseudo_set (symbolP
);
2568 demand_empty_rest_of_line ();
2570 /* XXX Now we come to the Thumb specific bit of code. */
2572 THUMB_SET_FUNC (symbolP
, 1);
2573 ARM_SET_THUMB (symbolP
, 1);
2574 #if defined OBJ_ELF || defined OBJ_COFF
2575 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2579 /* Directives: Mode selection. */
2581 /* .syntax [unified|divided] - choose the new unified syntax
2582 (same for Arm and Thumb encoding, modulo slight differences in what
2583 can be represented) or the old divergent syntax for each mode. */
2585 s_syntax (int unused ATTRIBUTE_UNUSED
)
2589 name
= input_line_pointer
;
2590 delim
= get_symbol_end ();
2592 if (!strcasecmp (name
, "unified"))
2593 unified_syntax
= TRUE
;
2594 else if (!strcasecmp (name
, "divided"))
2595 unified_syntax
= FALSE
;
2598 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2601 *input_line_pointer
= delim
;
2602 demand_empty_rest_of_line ();
2605 /* Directives: sectioning and alignment. */
2607 /* Same as s_align_ptwo but align 0 => align 2. */
2610 s_align (int unused ATTRIBUTE_UNUSED
)
2614 long max_alignment
= 15;
2616 temp
= get_absolute_expression ();
2617 if (temp
> max_alignment
)
2618 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2621 as_bad (_("alignment negative. 0 assumed."));
2625 if (*input_line_pointer
== ',')
2627 input_line_pointer
++;
2628 temp_fill
= get_absolute_expression ();
2636 /* Only make a frag if we HAVE to. */
2637 if (temp
&& !need_pass_2
)
2638 frag_align (temp
, (int) temp_fill
, 0);
2639 demand_empty_rest_of_line ();
2641 record_alignment (now_seg
, temp
);
2645 s_bss (int ignore ATTRIBUTE_UNUSED
)
2647 /* We don't support putting frags in the BSS segment, we fake it by
2648 marking in_bss, then looking at s_skip for clues. */
2649 subseg_set (bss_section
, 0);
2650 demand_empty_rest_of_line ();
2651 mapping_state (MAP_DATA
);
2655 s_even (int ignore ATTRIBUTE_UNUSED
)
2657 /* Never make frag if expect extra pass. */
2659 frag_align (1, 0, 0);
2661 record_alignment (now_seg
, 1);
2663 demand_empty_rest_of_line ();
2666 /* Directives: Literal pools. */
2668 static literal_pool
*
2669 find_literal_pool (void)
2671 literal_pool
* pool
;
2673 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2675 if (pool
->section
== now_seg
2676 && pool
->sub_section
== now_subseg
)
2683 static literal_pool
*
2684 find_or_make_literal_pool (void)
2686 /* Next literal pool ID number. */
2687 static unsigned int latest_pool_num
= 1;
2688 literal_pool
* pool
;
2690 pool
= find_literal_pool ();
2694 /* Create a new pool. */
2695 pool
= xmalloc (sizeof (* pool
));
2699 pool
->next_free_entry
= 0;
2700 pool
->section
= now_seg
;
2701 pool
->sub_section
= now_subseg
;
2702 pool
->next
= list_of_pools
;
2703 pool
->symbol
= NULL
;
2705 /* Add it to the list. */
2706 list_of_pools
= pool
;
2709 /* New pools, and emptied pools, will have a NULL symbol. */
2710 if (pool
->symbol
== NULL
)
2712 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2713 (valueT
) 0, &zero_address_frag
);
2714 pool
->id
= latest_pool_num
++;
2721 /* Add the literal in the global 'inst'
2722 structure to the relevent literal pool. */
2725 add_to_lit_pool (void)
2727 literal_pool
* pool
;
2730 pool
= find_or_make_literal_pool ();
2732 /* Check if this literal value is already in the pool. */
2733 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2735 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2736 && (inst
.reloc
.exp
.X_op
== O_constant
)
2737 && (pool
->literals
[entry
].X_add_number
2738 == inst
.reloc
.exp
.X_add_number
)
2739 && (pool
->literals
[entry
].X_unsigned
2740 == inst
.reloc
.exp
.X_unsigned
))
2743 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2744 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2745 && (pool
->literals
[entry
].X_add_number
2746 == inst
.reloc
.exp
.X_add_number
)
2747 && (pool
->literals
[entry
].X_add_symbol
2748 == inst
.reloc
.exp
.X_add_symbol
)
2749 && (pool
->literals
[entry
].X_op_symbol
2750 == inst
.reloc
.exp
.X_op_symbol
))
2754 /* Do we need to create a new entry? */
2755 if (entry
== pool
->next_free_entry
)
2757 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2759 inst
.error
= _("literal pool overflow");
2763 pool
->literals
[entry
] = inst
.reloc
.exp
;
2764 pool
->next_free_entry
+= 1;
2767 inst
.reloc
.exp
.X_op
= O_symbol
;
2768 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2769 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2774 /* Can't use symbol_new here, so have to create a symbol and then at
2775 a later date assign it a value. Thats what these functions do. */
2778 symbol_locate (symbolS
* symbolP
,
2779 const char * name
, /* It is copied, the caller can modify. */
2780 segT segment
, /* Segment identifier (SEG_<something>). */
2781 valueT valu
, /* Symbol value. */
2782 fragS
* frag
) /* Associated fragment. */
2784 unsigned int name_length
;
2785 char * preserved_copy_of_name
;
2787 name_length
= strlen (name
) + 1; /* +1 for \0. */
2788 obstack_grow (¬es
, name
, name_length
);
2789 preserved_copy_of_name
= obstack_finish (¬es
);
2791 #ifdef tc_canonicalize_symbol_name
2792 preserved_copy_of_name
=
2793 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2796 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2798 S_SET_SEGMENT (symbolP
, segment
);
2799 S_SET_VALUE (symbolP
, valu
);
2800 symbol_clear_list_pointers (symbolP
);
2802 symbol_set_frag (symbolP
, frag
);
2804 /* Link to end of symbol chain. */
2806 extern int symbol_table_frozen
;
2808 if (symbol_table_frozen
)
2812 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2814 obj_symbol_new_hook (symbolP
);
2816 #ifdef tc_symbol_new_hook
2817 tc_symbol_new_hook (symbolP
);
2821 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2822 #endif /* DEBUG_SYMS */
2827 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2830 literal_pool
* pool
;
2833 pool
= find_literal_pool ();
2835 || pool
->symbol
== NULL
2836 || pool
->next_free_entry
== 0)
2839 mapping_state (MAP_DATA
);
2841 /* Align pool as you have word accesses.
2842 Only make a frag if we have to. */
2844 frag_align (2, 0, 0);
2846 record_alignment (now_seg
, 2);
2848 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2850 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2851 (valueT
) frag_now_fix (), frag_now
);
2852 symbol_table_insert (pool
->symbol
);
2854 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2856 #if defined OBJ_COFF || defined OBJ_ELF
2857 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2860 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2861 /* First output the expression in the instruction to the pool. */
2862 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2864 /* Mark the pool as empty. */
2865 pool
->next_free_entry
= 0;
2866 pool
->symbol
= NULL
;
2870 /* Forward declarations for functions below, in the MD interface
2872 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2873 static valueT
create_unwind_entry (int);
2874 static void start_unwind_section (const segT
, int);
2875 static void add_unwind_opcode (valueT
, int);
2876 static void flush_pending_unwind (void);
2878 /* Directives: Data. */
2881 s_arm_elf_cons (int nbytes
)
2885 #ifdef md_flush_pending_output
2886 md_flush_pending_output ();
2889 if (is_it_end_of_statement ())
2891 demand_empty_rest_of_line ();
2895 #ifdef md_cons_align
2896 md_cons_align (nbytes
);
2899 mapping_state (MAP_DATA
);
2903 char *base
= input_line_pointer
;
2907 if (exp
.X_op
!= O_symbol
)
2908 emit_expr (&exp
, (unsigned int) nbytes
);
2911 char *before_reloc
= input_line_pointer
;
2912 reloc
= parse_reloc (&input_line_pointer
);
2915 as_bad (_("unrecognized relocation suffix"));
2916 ignore_rest_of_line ();
2919 else if (reloc
== BFD_RELOC_UNUSED
)
2920 emit_expr (&exp
, (unsigned int) nbytes
);
2923 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2924 int size
= bfd_get_reloc_size (howto
);
2926 if (reloc
== BFD_RELOC_ARM_PLT32
)
2928 as_bad (_("(plt) is only valid on branch targets"));
2929 reloc
= BFD_RELOC_UNUSED
;
2934 as_bad (_("%s relocations do not fit in %d bytes"),
2935 howto
->name
, nbytes
);
2938 /* We've parsed an expression stopping at O_symbol.
2939 But there may be more expression left now that we
2940 have parsed the relocation marker. Parse it again.
2941 XXX Surely there is a cleaner way to do this. */
2942 char *p
= input_line_pointer
;
2944 char *save_buf
= alloca (input_line_pointer
- base
);
2945 memcpy (save_buf
, base
, input_line_pointer
- base
);
2946 memmove (base
+ (input_line_pointer
- before_reloc
),
2947 base
, before_reloc
- base
);
2949 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
2951 memcpy (base
, save_buf
, p
- base
);
2953 offset
= nbytes
- size
;
2954 p
= frag_more ((int) nbytes
);
2955 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
2956 size
, &exp
, 0, reloc
);
2961 while (*input_line_pointer
++ == ',');
2963 /* Put terminator back into stream. */
2964 input_line_pointer
--;
2965 demand_empty_rest_of_line ();
2969 /* Parse a .rel31 directive. */
2972 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
2979 if (*input_line_pointer
== '1')
2980 highbit
= 0x80000000;
2981 else if (*input_line_pointer
!= '0')
2982 as_bad (_("expected 0 or 1"));
2984 input_line_pointer
++;
2985 if (*input_line_pointer
!= ',')
2986 as_bad (_("missing comma"));
2987 input_line_pointer
++;
2989 #ifdef md_flush_pending_output
2990 md_flush_pending_output ();
2993 #ifdef md_cons_align
2997 mapping_state (MAP_DATA
);
3002 md_number_to_chars (p
, highbit
, 4);
3003 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3004 BFD_RELOC_ARM_PREL31
);
3006 demand_empty_rest_of_line ();
3009 /* Directives: AEABI stack-unwind tables. */
3011 /* Parse an unwind_fnstart directive. Simply records the current location. */
3014 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3016 demand_empty_rest_of_line ();
3017 /* Mark the start of the function. */
3018 unwind
.proc_start
= expr_build_dot ();
3020 /* Reset the rest of the unwind info. */
3021 unwind
.opcode_count
= 0;
3022 unwind
.table_entry
= NULL
;
3023 unwind
.personality_routine
= NULL
;
3024 unwind
.personality_index
= -1;
3025 unwind
.frame_size
= 0;
3026 unwind
.fp_offset
= 0;
3029 unwind
.sp_restored
= 0;
3033 /* Parse a handlerdata directive. Creates the exception handling table entry
3034 for the function. */
3037 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3039 demand_empty_rest_of_line ();
3040 if (unwind
.table_entry
)
3041 as_bad (_("dupicate .handlerdata directive"));
3043 create_unwind_entry (1);
3046 /* Parse an unwind_fnend directive. Generates the index table entry. */
3049 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3055 demand_empty_rest_of_line ();
3057 /* Add eh table entry. */
3058 if (unwind
.table_entry
== NULL
)
3059 val
= create_unwind_entry (0);
3063 /* Add index table entry. This is two words. */
3064 start_unwind_section (unwind
.saved_seg
, 1);
3065 frag_align (2, 0, 0);
3066 record_alignment (now_seg
, 2);
3068 ptr
= frag_more (8);
3069 where
= frag_now_fix () - 8;
3071 /* Self relative offset of the function start. */
3072 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3073 BFD_RELOC_ARM_PREL31
);
3075 /* Indicate dependency on EHABI-defined personality routines to the
3076 linker, if it hasn't been done already. */
3077 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3078 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3080 static const char *const name
[] = {
3081 "__aeabi_unwind_cpp_pr0",
3082 "__aeabi_unwind_cpp_pr1",
3083 "__aeabi_unwind_cpp_pr2"
3085 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3086 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3087 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3088 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3089 = marked_pr_dependency
;
3093 /* Inline exception table entry. */
3094 md_number_to_chars (ptr
+ 4, val
, 4);
3096 /* Self relative offset of the table entry. */
3097 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3098 BFD_RELOC_ARM_PREL31
);
3100 /* Restore the original section. */
3101 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3105 /* Parse an unwind_cantunwind directive. */
3108 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3110 demand_empty_rest_of_line ();
3111 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3112 as_bad (_("personality routine specified for cantunwind frame"));
3114 unwind
.personality_index
= -2;
3118 /* Parse a personalityindex directive. */
3121 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3125 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3126 as_bad (_("duplicate .personalityindex directive"));
3130 if (exp
.X_op
!= O_constant
3131 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3133 as_bad (_("bad personality routine number"));
3134 ignore_rest_of_line ();
3138 unwind
.personality_index
= exp
.X_add_number
;
3140 demand_empty_rest_of_line ();
3144 /* Parse a personality directive. */
3147 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3151 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3152 as_bad (_("duplicate .personality directive"));
3154 name
= input_line_pointer
;
3155 c
= get_symbol_end ();
3156 p
= input_line_pointer
;
3157 unwind
.personality_routine
= symbol_find_or_make (name
);
3159 demand_empty_rest_of_line ();
3163 /* Parse a directive saving core registers. */
3166 s_arm_unwind_save_core (void)
3172 range
= parse_reg_list (&input_line_pointer
);
3175 as_bad (_("expected register list"));
3176 ignore_rest_of_line ();
3180 demand_empty_rest_of_line ();
3182 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3183 into .unwind_save {..., sp...}. We aren't bothered about the value of
3184 ip because it is clobbered by calls. */
3185 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3186 && (range
& 0x3000) == 0x1000)
3188 unwind
.opcode_count
--;
3189 unwind
.sp_restored
= 0;
3190 range
= (range
| 0x2000) & ~0x1000;
3191 unwind
.pending_offset
= 0;
3197 /* See if we can use the short opcodes. These pop a block of up to 8
3198 registers starting with r4, plus maybe r14. */
3199 for (n
= 0; n
< 8; n
++)
3201 /* Break at the first non-saved register. */
3202 if ((range
& (1 << (n
+ 4))) == 0)
3205 /* See if there are any other bits set. */
3206 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3208 /* Use the long form. */
3209 op
= 0x8000 | ((range
>> 4) & 0xfff);
3210 add_unwind_opcode (op
, 2);
3214 /* Use the short form. */
3216 op
= 0xa8; /* Pop r14. */
3218 op
= 0xa0; /* Do not pop r14. */
3220 add_unwind_opcode (op
, 1);
3227 op
= 0xb100 | (range
& 0xf);
3228 add_unwind_opcode (op
, 2);
3231 /* Record the number of bytes pushed. */
3232 for (n
= 0; n
< 16; n
++)
3234 if (range
& (1 << n
))
3235 unwind
.frame_size
+= 4;
3240 /* Parse a directive saving FPA registers. */
3243 s_arm_unwind_save_fpa (int reg
)
3249 /* Get Number of registers to transfer. */
3250 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3253 exp
.X_op
= O_illegal
;
3255 if (exp
.X_op
!= O_constant
)
3257 as_bad (_("expected , <constant>"));
3258 ignore_rest_of_line ();
3262 num_regs
= exp
.X_add_number
;
3264 if (num_regs
< 1 || num_regs
> 4)
3266 as_bad (_("number of registers must be in the range [1:4]"));
3267 ignore_rest_of_line ();
3271 demand_empty_rest_of_line ();
3276 op
= 0xb4 | (num_regs
- 1);
3277 add_unwind_opcode (op
, 1);
3282 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3283 add_unwind_opcode (op
, 2);
3285 unwind
.frame_size
+= num_regs
* 12;
3289 /* Parse a directive saving VFP registers for ARMv6 and above. */
3292 s_arm_unwind_save_vfp_armv6 (void)
3297 int num_vfpv3_regs
= 0;
3298 int num_regs_below_16
;
3300 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3303 as_bad (_("expected register list"));
3304 ignore_rest_of_line ();
3308 demand_empty_rest_of_line ();
3310 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3311 than FSTMX/FLDMX-style ones). */
3313 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3315 num_vfpv3_regs
= count
;
3316 else if (start
+ count
> 16)
3317 num_vfpv3_regs
= start
+ count
- 16;
3319 if (num_vfpv3_regs
> 0)
3321 int start_offset
= start
> 16 ? start
- 16 : 0;
3322 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3323 add_unwind_opcode (op
, 2);
3326 /* Generate opcode for registers numbered in the range 0 .. 15. */
3327 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3328 assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3329 if (num_regs_below_16
> 0)
3331 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3332 add_unwind_opcode (op
, 2);
3335 unwind
.frame_size
+= count
* 8;
3339 /* Parse a directive saving VFP registers for pre-ARMv6. */
3342 s_arm_unwind_save_vfp (void)
3348 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3351 as_bad (_("expected register list"));
3352 ignore_rest_of_line ();
3356 demand_empty_rest_of_line ();
3361 op
= 0xb8 | (count
- 1);
3362 add_unwind_opcode (op
, 1);
3367 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3368 add_unwind_opcode (op
, 2);
3370 unwind
.frame_size
+= count
* 8 + 4;
3374 /* Parse a directive saving iWMMXt data registers. */
3377 s_arm_unwind_save_mmxwr (void)
3385 if (*input_line_pointer
== '{')
3386 input_line_pointer
++;
3390 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3394 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3399 as_tsktsk (_("register list not in ascending order"));
3402 if (*input_line_pointer
== '-')
3404 input_line_pointer
++;
3405 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3408 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3411 else if (reg
>= hi_reg
)
3413 as_bad (_("bad register range"));
3416 for (; reg
< hi_reg
; reg
++)
3420 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3422 if (*input_line_pointer
== '}')
3423 input_line_pointer
++;
3425 demand_empty_rest_of_line ();
3427 /* Generate any deferred opcodes because we're going to be looking at
3429 flush_pending_unwind ();
3431 for (i
= 0; i
< 16; i
++)
3433 if (mask
& (1 << i
))
3434 unwind
.frame_size
+= 8;
3437 /* Attempt to combine with a previous opcode. We do this because gcc
3438 likes to output separate unwind directives for a single block of
3440 if (unwind
.opcode_count
> 0)
3442 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3443 if ((i
& 0xf8) == 0xc0)
3446 /* Only merge if the blocks are contiguous. */
3449 if ((mask
& 0xfe00) == (1 << 9))
3451 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3452 unwind
.opcode_count
--;
3455 else if (i
== 6 && unwind
.opcode_count
>= 2)
3457 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3461 op
= 0xffff << (reg
- 1);
3463 && ((mask
& op
) == (1u << (reg
- 1))))
3465 op
= (1 << (reg
+ i
+ 1)) - 1;
3466 op
&= ~((1 << reg
) - 1);
3468 unwind
.opcode_count
-= 2;
3475 /* We want to generate opcodes in the order the registers have been
3476 saved, ie. descending order. */
3477 for (reg
= 15; reg
>= -1; reg
--)
3479 /* Save registers in blocks. */
3481 || !(mask
& (1 << reg
)))
3483 /* We found an unsaved reg. Generate opcodes to save the
3484 preceeding block. */
3490 op
= 0xc0 | (hi_reg
- 10);
3491 add_unwind_opcode (op
, 1);
3496 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3497 add_unwind_opcode (op
, 2);
3506 ignore_rest_of_line ();
3510 s_arm_unwind_save_mmxwcg (void)
3517 if (*input_line_pointer
== '{')
3518 input_line_pointer
++;
3522 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3526 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3532 as_tsktsk (_("register list not in ascending order"));
3535 if (*input_line_pointer
== '-')
3537 input_line_pointer
++;
3538 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3541 as_bad (_(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3544 else if (reg
>= hi_reg
)
3546 as_bad (_("bad register range"));
3549 for (; reg
< hi_reg
; reg
++)
3553 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3555 if (*input_line_pointer
== '}')
3556 input_line_pointer
++;
3558 demand_empty_rest_of_line ();
3560 /* Generate any deferred opcodes because we're going to be looking at
3562 flush_pending_unwind ();
3564 for (reg
= 0; reg
< 16; reg
++)
3566 if (mask
& (1 << reg
))
3567 unwind
.frame_size
+= 4;
3570 add_unwind_opcode (op
, 2);
3573 ignore_rest_of_line ();
3577 /* Parse an unwind_save directive.
3578 If the argument is non-zero, this is a .vsave directive. */
3581 s_arm_unwind_save (int arch_v6
)
3584 struct reg_entry
*reg
;
3585 bfd_boolean had_brace
= FALSE
;
3587 /* Figure out what sort of save we have. */
3588 peek
= input_line_pointer
;
3596 reg
= arm_reg_parse_multi (&peek
);
3600 as_bad (_("register expected"));
3601 ignore_rest_of_line ();
3610 as_bad (_("FPA .unwind_save does not take a register list"));
3611 ignore_rest_of_line ();
3614 s_arm_unwind_save_fpa (reg
->number
);
3617 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3620 s_arm_unwind_save_vfp_armv6 ();
3622 s_arm_unwind_save_vfp ();
3624 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3625 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3628 as_bad (_(".unwind_save does not support this kind of register"));
3629 ignore_rest_of_line ();
3634 /* Parse an unwind_movsp directive. */
3637 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3643 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3646 as_bad (_(reg_expected_msgs
[REG_TYPE_RN
]));
3647 ignore_rest_of_line ();
3651 /* Optional constant. */
3652 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3654 if (immediate_for_directive (&offset
) == FAIL
)
3660 demand_empty_rest_of_line ();
3662 if (reg
== REG_SP
|| reg
== REG_PC
)
3664 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3668 if (unwind
.fp_reg
!= REG_SP
)
3669 as_bad (_("unexpected .unwind_movsp directive"));
3671 /* Generate opcode to restore the value. */
3673 add_unwind_opcode (op
, 1);
3675 /* Record the information for later. */
3676 unwind
.fp_reg
= reg
;
3677 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3678 unwind
.sp_restored
= 1;
3681 /* Parse an unwind_pad directive. */
3684 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3688 if (immediate_for_directive (&offset
) == FAIL
)
3693 as_bad (_("stack increment must be multiple of 4"));
3694 ignore_rest_of_line ();
3698 /* Don't generate any opcodes, just record the details for later. */
3699 unwind
.frame_size
+= offset
;
3700 unwind
.pending_offset
+= offset
;
3702 demand_empty_rest_of_line ();
3705 /* Parse an unwind_setfp directive. */
3708 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3714 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3715 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3718 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3720 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3722 as_bad (_("expected <reg>, <reg>"));
3723 ignore_rest_of_line ();
3727 /* Optional constant. */
3728 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3730 if (immediate_for_directive (&offset
) == FAIL
)
3736 demand_empty_rest_of_line ();
3738 if (sp_reg
!= 13 && sp_reg
!= unwind
.fp_reg
)
3740 as_bad (_("register must be either sp or set by a previous"
3741 "unwind_movsp directive"));
3745 /* Don't generate any opcodes, just record the information for later. */
3746 unwind
.fp_reg
= fp_reg
;
3749 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3751 unwind
.fp_offset
-= offset
;
3754 /* Parse an unwind_raw directive. */
3757 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3760 /* This is an arbitrary limit. */
3761 unsigned char op
[16];
3765 if (exp
.X_op
== O_constant
3766 && skip_past_comma (&input_line_pointer
) != FAIL
)
3768 unwind
.frame_size
+= exp
.X_add_number
;
3772 exp
.X_op
= O_illegal
;
3774 if (exp
.X_op
!= O_constant
)
3776 as_bad (_("expected <offset>, <opcode>"));
3777 ignore_rest_of_line ();
3783 /* Parse the opcode. */
3788 as_bad (_("unwind opcode too long"));
3789 ignore_rest_of_line ();
3791 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3793 as_bad (_("invalid unwind opcode"));
3794 ignore_rest_of_line ();
3797 op
[count
++] = exp
.X_add_number
;
3799 /* Parse the next byte. */
3800 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3806 /* Add the opcode bytes in reverse order. */
3808 add_unwind_opcode (op
[count
], 1);
3810 demand_empty_rest_of_line ();
3814 /* Parse a .eabi_attribute directive. */
3817 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3820 bfd_boolean is_string
;
3827 if (exp
.X_op
!= O_constant
)
3830 tag
= exp
.X_add_number
;
3831 if (tag
== 4 || tag
== 5 || tag
== 32 || (tag
> 32 && (tag
& 1) != 0))
3836 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3838 if (tag
== 32 || !is_string
)
3841 if (exp
.X_op
!= O_constant
)
3843 as_bad (_("expected numeric constant"));
3844 ignore_rest_of_line ();
3847 i
= exp
.X_add_number
;
3849 if (tag
== Tag_compatibility
3850 && skip_past_comma (&input_line_pointer
) == FAIL
)
3852 as_bad (_("expected comma"));
3853 ignore_rest_of_line ();
3858 skip_whitespace(input_line_pointer
);
3859 if (*input_line_pointer
!= '"')
3861 input_line_pointer
++;
3862 s
= input_line_pointer
;
3863 while (*input_line_pointer
&& *input_line_pointer
!= '"')
3864 input_line_pointer
++;
3865 if (*input_line_pointer
!= '"')
3867 saved_char
= *input_line_pointer
;
3868 *input_line_pointer
= 0;
3876 if (tag
== Tag_compatibility
)
3877 elf32_arm_add_eabi_attr_compat (stdoutput
, i
, s
);
3879 elf32_arm_add_eabi_attr_string (stdoutput
, tag
, s
);
3881 elf32_arm_add_eabi_attr_int (stdoutput
, tag
, i
);
3885 *input_line_pointer
= saved_char
;
3886 input_line_pointer
++;
3888 demand_empty_rest_of_line ();
3891 as_bad (_("bad string constant"));
3892 ignore_rest_of_line ();
3895 as_bad (_("expected <tag> , <value>"));
3896 ignore_rest_of_line ();
3898 #endif /* OBJ_ELF */
3900 static void s_arm_arch (int);
3901 static void s_arm_object_arch (int);
3902 static void s_arm_cpu (int);
3903 static void s_arm_fpu (int);
3908 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
3915 if (exp
.X_op
== O_symbol
)
3916 exp
.X_op
= O_secrel
;
3918 emit_expr (&exp
, 4);
3920 while (*input_line_pointer
++ == ',');
3922 input_line_pointer
--;
3923 demand_empty_rest_of_line ();
3927 /* This table describes all the machine specific pseudo-ops the assembler
3928 has to support. The fields are:
3929 pseudo-op name without dot
3930 function to call to execute this pseudo-op
3931 Integer arg to pass to the function. */
3933 const pseudo_typeS md_pseudo_table
[] =
3935 /* Never called because '.req' does not start a line. */
3936 { "req", s_req
, 0 },
3937 /* Following two are likewise never called. */
3940 { "unreq", s_unreq
, 0 },
3941 { "bss", s_bss
, 0 },
3942 { "align", s_align
, 0 },
3943 { "arm", s_arm
, 0 },
3944 { "thumb", s_thumb
, 0 },
3945 { "code", s_code
, 0 },
3946 { "force_thumb", s_force_thumb
, 0 },
3947 { "thumb_func", s_thumb_func
, 0 },
3948 { "thumb_set", s_thumb_set
, 0 },
3949 { "even", s_even
, 0 },
3950 { "ltorg", s_ltorg
, 0 },
3951 { "pool", s_ltorg
, 0 },
3952 { "syntax", s_syntax
, 0 },
3953 { "cpu", s_arm_cpu
, 0 },
3954 { "arch", s_arm_arch
, 0 },
3955 { "object_arch", s_arm_object_arch
, 0 },
3956 { "fpu", s_arm_fpu
, 0 },
3958 { "word", s_arm_elf_cons
, 4 },
3959 { "long", s_arm_elf_cons
, 4 },
3960 { "rel31", s_arm_rel31
, 0 },
3961 { "fnstart", s_arm_unwind_fnstart
, 0 },
3962 { "fnend", s_arm_unwind_fnend
, 0 },
3963 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3964 { "personality", s_arm_unwind_personality
, 0 },
3965 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3966 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3967 { "save", s_arm_unwind_save
, 0 },
3968 { "vsave", s_arm_unwind_save
, 1 },
3969 { "movsp", s_arm_unwind_movsp
, 0 },
3970 { "pad", s_arm_unwind_pad
, 0 },
3971 { "setfp", s_arm_unwind_setfp
, 0 },
3972 { "unwind_raw", s_arm_unwind_raw
, 0 },
3973 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3977 /* These are used for dwarf. */
3981 /* These are used for dwarf2. */
3982 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
3983 { "loc", dwarf2_directive_loc
, 0 },
3984 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
3986 { "extend", float_cons
, 'x' },
3987 { "ldouble", float_cons
, 'x' },
3988 { "packed", float_cons
, 'p' },
3990 {"secrel32", pe_directive_secrel
, 0},
3995 /* Parser functions used exclusively in instruction operands. */
3997 /* Generic immediate-value read function for use in insn parsing.
3998 STR points to the beginning of the immediate (the leading #);
3999 VAL receives the value; if the value is outside [MIN, MAX]
4000 issue an error. PREFIX_OPT is true if the immediate prefix is
4004 parse_immediate (char **str
, int *val
, int min
, int max
,
4005 bfd_boolean prefix_opt
)
4008 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4009 if (exp
.X_op
!= O_constant
)
4011 inst
.error
= _("constant expression required");
4015 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4017 inst
.error
= _("immediate value out of range");
4021 *val
= exp
.X_add_number
;
4025 /* Less-generic immediate-value read function with the possibility of loading a
4026 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4027 instructions. Puts the result directly in inst.operands[i]. */
4030 parse_big_immediate (char **str
, int i
)
4035 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4037 if (exp
.X_op
== O_constant
)
4039 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4040 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4041 O_constant. We have to be careful not to break compilation for
4042 32-bit X_add_number, though. */
4043 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4045 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4046 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4047 inst
.operands
[i
].regisimm
= 1;
4050 else if (exp
.X_op
== O_big
4051 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
4052 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
4054 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4055 /* Bignums have their least significant bits in
4056 generic_bignum[0]. Make sure we put 32 bits in imm and
4057 32 bits in reg, in a (hopefully) portable way. */
4058 assert (parts
!= 0);
4059 inst
.operands
[i
].imm
= 0;
4060 for (j
= 0; j
< parts
; j
++, idx
++)
4061 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4062 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4063 inst
.operands
[i
].reg
= 0;
4064 for (j
= 0; j
< parts
; j
++, idx
++)
4065 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4066 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4067 inst
.operands
[i
].regisimm
= 1;
4077 /* Returns the pseudo-register number of an FPA immediate constant,
4078 or FAIL if there isn't a valid constant here. */
4081 parse_fpa_immediate (char ** str
)
4083 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4089 /* First try and match exact strings, this is to guarantee
4090 that some formats will work even for cross assembly. */
4092 for (i
= 0; fp_const
[i
]; i
++)
4094 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4098 *str
+= strlen (fp_const
[i
]);
4099 if (is_end_of_line
[(unsigned char) **str
])
4105 /* Just because we didn't get a match doesn't mean that the constant
4106 isn't valid, just that it is in a format that we don't
4107 automatically recognize. Try parsing it with the standard
4108 expression routines. */
4110 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4112 /* Look for a raw floating point number. */
4113 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4114 && is_end_of_line
[(unsigned char) *save_in
])
4116 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4118 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4120 if (words
[j
] != fp_values
[i
][j
])
4124 if (j
== MAX_LITTLENUMS
)
4132 /* Try and parse a more complex expression, this will probably fail
4133 unless the code uses a floating point prefix (eg "0f"). */
4134 save_in
= input_line_pointer
;
4135 input_line_pointer
= *str
;
4136 if (expression (&exp
) == absolute_section
4137 && exp
.X_op
== O_big
4138 && exp
.X_add_number
< 0)
4140 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4142 if (gen_to_words (words
, 5, (long) 15) == 0)
4144 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4146 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4148 if (words
[j
] != fp_values
[i
][j
])
4152 if (j
== MAX_LITTLENUMS
)
4154 *str
= input_line_pointer
;
4155 input_line_pointer
= save_in
;
4162 *str
= input_line_pointer
;
4163 input_line_pointer
= save_in
;
4164 inst
.error
= _("invalid FPA immediate expression");
4168 /* Returns 1 if a number has "quarter-precision" float format
4169 0baBbbbbbc defgh000 00000000 00000000. */
4172 is_quarter_float (unsigned imm
)
4174 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4175 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4178 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4179 0baBbbbbbc defgh000 00000000 00000000.
4180 The minus-zero case needs special handling, since it can't be encoded in the
4181 "quarter-precision" float format, but can nonetheless be loaded as an integer
4185 parse_qfloat_immediate (char **ccp
, int *immed
)
4188 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4190 skip_past_char (&str
, '#');
4192 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4194 unsigned fpword
= 0;
4197 /* Our FP word must be 32 bits (single-precision FP). */
4198 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4200 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4204 if (is_quarter_float (fpword
) || fpword
== 0x80000000)
4217 /* Shift operands. */
4220 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4223 struct asm_shift_name
4226 enum shift_kind kind
;
4229 /* Third argument to parse_shift. */
4230 enum parse_shift_mode
4232 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4233 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4234 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4235 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4236 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4239 /* Parse a <shift> specifier on an ARM data processing instruction.
4240 This has three forms:
4242 (LSL|LSR|ASL|ASR|ROR) Rs
4243 (LSL|LSR|ASL|ASR|ROR) #imm
4246 Note that ASL is assimilated to LSL in the instruction encoding, and
4247 RRX to ROR #0 (which cannot be written as such). */
4250 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4252 const struct asm_shift_name
*shift_name
;
4253 enum shift_kind shift
;
4258 for (p
= *str
; ISALPHA (*p
); p
++)
4263 inst
.error
= _("shift expression expected");
4267 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4269 if (shift_name
== NULL
)
4271 inst
.error
= _("shift expression expected");
4275 shift
= shift_name
->kind
;
4279 case NO_SHIFT_RESTRICT
:
4280 case SHIFT_IMMEDIATE
: break;
4282 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4283 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4285 inst
.error
= _("'LSL' or 'ASR' required");
4290 case SHIFT_LSL_IMMEDIATE
:
4291 if (shift
!= SHIFT_LSL
)
4293 inst
.error
= _("'LSL' required");
4298 case SHIFT_ASR_IMMEDIATE
:
4299 if (shift
!= SHIFT_ASR
)
4301 inst
.error
= _("'ASR' required");
4309 if (shift
!= SHIFT_RRX
)
4311 /* Whitespace can appear here if the next thing is a bare digit. */
4312 skip_whitespace (p
);
4314 if (mode
== NO_SHIFT_RESTRICT
4315 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4317 inst
.operands
[i
].imm
= reg
;
4318 inst
.operands
[i
].immisreg
= 1;
4320 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4323 inst
.operands
[i
].shift_kind
= shift
;
4324 inst
.operands
[i
].shifted
= 1;
4329 /* Parse a <shifter_operand> for an ARM data processing instruction:
4332 #<immediate>, <rotate>
4336 where <shift> is defined by parse_shift above, and <rotate> is a
4337 multiple of 2 between 0 and 30. Validation of immediate operands
4338 is deferred to md_apply_fix. */
4341 parse_shifter_operand (char **str
, int i
)
4346 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4348 inst
.operands
[i
].reg
= value
;
4349 inst
.operands
[i
].isreg
= 1;
4351 /* parse_shift will override this if appropriate */
4352 inst
.reloc
.exp
.X_op
= O_constant
;
4353 inst
.reloc
.exp
.X_add_number
= 0;
4355 if (skip_past_comma (str
) == FAIL
)
4358 /* Shift operation on register. */
4359 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4362 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4365 if (skip_past_comma (str
) == SUCCESS
)
4367 /* #x, y -- ie explicit rotation by Y. */
4368 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4371 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4373 inst
.error
= _("constant expression expected");
4377 value
= expr
.X_add_number
;
4378 if (value
< 0 || value
> 30 || value
% 2 != 0)
4380 inst
.error
= _("invalid rotation");
4383 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4385 inst
.error
= _("invalid constant");
4389 /* Convert to decoded value. md_apply_fix will put it back. */
4390 inst
.reloc
.exp
.X_add_number
4391 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4392 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4395 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4396 inst
.reloc
.pc_rel
= 0;
4400 /* Group relocation information. Each entry in the table contains the
4401 textual name of the relocation as may appear in assembler source
4402 and must end with a colon.
4403 Along with this textual name are the relocation codes to be used if
4404 the corresponding instruction is an ALU instruction (ADD or SUB only),
4405 an LDR, an LDRS, or an LDC. */
4407 struct group_reloc_table_entry
4418 /* Varieties of non-ALU group relocation. */
4425 static struct group_reloc_table_entry group_reloc_table
[] =
4426 { /* Program counter relative: */
4428 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4433 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4434 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4435 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4436 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4438 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4443 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4444 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4445 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4446 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4448 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4449 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4450 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4451 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4452 /* Section base relative */
4454 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4459 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4460 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4461 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4462 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4464 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4469 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4470 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4471 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4472 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4474 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4475 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4476 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4477 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4479 /* Given the address of a pointer pointing to the textual name of a group
4480 relocation as may appear in assembler source, attempt to find its details
4481 in group_reloc_table. The pointer will be updated to the character after
4482 the trailing colon. On failure, FAIL will be returned; SUCCESS
4483 otherwise. On success, *entry will be updated to point at the relevant
4484 group_reloc_table entry. */
4487 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4490 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4492 int length
= strlen (group_reloc_table
[i
].name
);
4494 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0 &&
4495 (*str
)[length
] == ':')
4497 *out
= &group_reloc_table
[i
];
4498 *str
+= (length
+ 1);
4506 /* Parse a <shifter_operand> for an ARM data processing instruction
4507 (as for parse_shifter_operand) where group relocations are allowed:
4510 #<immediate>, <rotate>
4511 #:<group_reloc>:<expression>
4515 where <group_reloc> is one of the strings defined in group_reloc_table.
4516 The hashes are optional.
4518 Everything else is as for parse_shifter_operand. */
4520 static parse_operand_result
4521 parse_shifter_operand_group_reloc (char **str
, int i
)
4523 /* Determine if we have the sequence of characters #: or just :
4524 coming next. If we do, then we check for a group relocation.
4525 If we don't, punt the whole lot to parse_shifter_operand. */
4527 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4528 || (*str
)[0] == ':')
4530 struct group_reloc_table_entry
*entry
;
4532 if ((*str
)[0] == '#')
4537 /* Try to parse a group relocation. Anything else is an error. */
4538 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4540 inst
.error
= _("unknown group relocation");
4541 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4544 /* We now have the group relocation table entry corresponding to
4545 the name in the assembler source. Next, we parse the expression. */
4546 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4547 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4549 /* Record the relocation type (always the ALU variant here). */
4550 inst
.reloc
.type
= entry
->alu_code
;
4551 assert (inst
.reloc
.type
!= 0);
4553 return PARSE_OPERAND_SUCCESS
;
4556 return parse_shifter_operand (str
, i
) == SUCCESS
4557 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4559 /* Never reached. */
4562 /* Parse all forms of an ARM address expression. Information is written
4563 to inst.operands[i] and/or inst.reloc.
4565 Preindexed addressing (.preind=1):
4567 [Rn, #offset] .reg=Rn .reloc.exp=offset
4568 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4569 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4570 .shift_kind=shift .reloc.exp=shift_imm
4572 These three may have a trailing ! which causes .writeback to be set also.
4574 Postindexed addressing (.postind=1, .writeback=1):
4576 [Rn], #offset .reg=Rn .reloc.exp=offset
4577 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4578 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4579 .shift_kind=shift .reloc.exp=shift_imm
4581 Unindexed addressing (.preind=0, .postind=0):
4583 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4587 [Rn]{!} shorthand for [Rn,#0]{!}
4588 =immediate .isreg=0 .reloc.exp=immediate
4589 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4591 It is the caller's responsibility to check for addressing modes not
4592 supported by the instruction, and to set inst.reloc.type. */
4594 static parse_operand_result
4595 parse_address_main (char **str
, int i
, int group_relocations
,
4596 group_reloc_type group_type
)
4601 if (skip_past_char (&p
, '[') == FAIL
)
4603 if (skip_past_char (&p
, '=') == FAIL
)
4605 /* bare address - translate to PC-relative offset */
4606 inst
.reloc
.pc_rel
= 1;
4607 inst
.operands
[i
].reg
= REG_PC
;
4608 inst
.operands
[i
].isreg
= 1;
4609 inst
.operands
[i
].preind
= 1;
4611 /* else a load-constant pseudo op, no special treatment needed here */
4613 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4614 return PARSE_OPERAND_FAIL
;
4617 return PARSE_OPERAND_SUCCESS
;
4620 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4622 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4623 return PARSE_OPERAND_FAIL
;
4625 inst
.operands
[i
].reg
= reg
;
4626 inst
.operands
[i
].isreg
= 1;
4628 if (skip_past_comma (&p
) == SUCCESS
)
4630 inst
.operands
[i
].preind
= 1;
4633 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4635 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4637 inst
.operands
[i
].imm
= reg
;
4638 inst
.operands
[i
].immisreg
= 1;
4640 if (skip_past_comma (&p
) == SUCCESS
)
4641 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4642 return PARSE_OPERAND_FAIL
;
4644 else if (skip_past_char (&p
, ':') == SUCCESS
)
4646 /* FIXME: '@' should be used here, but it's filtered out by generic
4647 code before we get to see it here. This may be subject to
4650 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4651 if (exp
.X_op
!= O_constant
)
4653 inst
.error
= _("alignment must be constant");
4654 return PARSE_OPERAND_FAIL
;
4656 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4657 inst
.operands
[i
].immisalign
= 1;
4658 /* Alignments are not pre-indexes. */
4659 inst
.operands
[i
].preind
= 0;
4663 if (inst
.operands
[i
].negative
)
4665 inst
.operands
[i
].negative
= 0;
4669 if (group_relocations
&&
4670 ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
4673 struct group_reloc_table_entry
*entry
;
4675 /* Skip over the #: or : sequence. */
4681 /* Try to parse a group relocation. Anything else is an
4683 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
4685 inst
.error
= _("unknown group relocation");
4686 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4689 /* We now have the group relocation table entry corresponding to
4690 the name in the assembler source. Next, we parse the
4692 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4693 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4695 /* Record the relocation type. */
4699 inst
.reloc
.type
= entry
->ldr_code
;
4703 inst
.reloc
.type
= entry
->ldrs_code
;
4707 inst
.reloc
.type
= entry
->ldc_code
;
4714 if (inst
.reloc
.type
== 0)
4716 inst
.error
= _("this group relocation is not allowed on this instruction");
4717 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4721 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4722 return PARSE_OPERAND_FAIL
;
4726 if (skip_past_char (&p
, ']') == FAIL
)
4728 inst
.error
= _("']' expected");
4729 return PARSE_OPERAND_FAIL
;
4732 if (skip_past_char (&p
, '!') == SUCCESS
)
4733 inst
.operands
[i
].writeback
= 1;
4735 else if (skip_past_comma (&p
) == SUCCESS
)
4737 if (skip_past_char (&p
, '{') == SUCCESS
)
4739 /* [Rn], {expr} - unindexed, with option */
4740 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4741 0, 255, TRUE
) == FAIL
)
4742 return PARSE_OPERAND_FAIL
;
4744 if (skip_past_char (&p
, '}') == FAIL
)
4746 inst
.error
= _("'}' expected at end of 'option' field");
4747 return PARSE_OPERAND_FAIL
;
4749 if (inst
.operands
[i
].preind
)
4751 inst
.error
= _("cannot combine index with option");
4752 return PARSE_OPERAND_FAIL
;
4755 return PARSE_OPERAND_SUCCESS
;
4759 inst
.operands
[i
].postind
= 1;
4760 inst
.operands
[i
].writeback
= 1;
4762 if (inst
.operands
[i
].preind
)
4764 inst
.error
= _("cannot combine pre- and post-indexing");
4765 return PARSE_OPERAND_FAIL
;
4769 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4771 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4773 /* We might be using the immediate for alignment already. If we
4774 are, OR the register number into the low-order bits. */
4775 if (inst
.operands
[i
].immisalign
)
4776 inst
.operands
[i
].imm
|= reg
;
4778 inst
.operands
[i
].imm
= reg
;
4779 inst
.operands
[i
].immisreg
= 1;
4781 if (skip_past_comma (&p
) == SUCCESS
)
4782 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4783 return PARSE_OPERAND_FAIL
;
4787 if (inst
.operands
[i
].negative
)
4789 inst
.operands
[i
].negative
= 0;
4792 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4793 return PARSE_OPERAND_FAIL
;
4798 /* If at this point neither .preind nor .postind is set, we have a
4799 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4800 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4802 inst
.operands
[i
].preind
= 1;
4803 inst
.reloc
.exp
.X_op
= O_constant
;
4804 inst
.reloc
.exp
.X_add_number
= 0;
4807 return PARSE_OPERAND_SUCCESS
;
4811 parse_address (char **str
, int i
)
4813 return parse_address_main (str
, i
, 0, 0) == PARSE_OPERAND_SUCCESS
4817 static parse_operand_result
4818 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
4820 return parse_address_main (str
, i
, 1, type
);
4823 /* Parse an operand for a MOVW or MOVT instruction. */
4825 parse_half (char **str
)
4830 skip_past_char (&p
, '#');
4831 if (strncasecmp (p
, ":lower16:", 9) == 0)
4832 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
4833 else if (strncasecmp (p
, ":upper16:", 9) == 0)
4834 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
4836 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
4842 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4845 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
4847 if (inst
.reloc
.exp
.X_op
!= O_constant
)
4849 inst
.error
= _("constant expression expected");
4852 if (inst
.reloc
.exp
.X_add_number
< 0
4853 || inst
.reloc
.exp
.X_add_number
> 0xffff)
4855 inst
.error
= _("immediate value out of range");
4863 /* Miscellaneous. */
4865 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4866 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4868 parse_psr (char **str
)
4871 unsigned long psr_field
;
4872 const struct asm_psr
*psr
;
4875 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4876 feature for ease of use and backwards compatibility. */
4878 if (strncasecmp (p
, "SPSR", 4) == 0)
4879 psr_field
= SPSR_BIT
;
4880 else if (strncasecmp (p
, "CPSR", 4) == 0)
4887 while (ISALNUM (*p
) || *p
== '_');
4889 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4900 /* A suffix follows. */
4906 while (ISALNUM (*p
) || *p
== '_');
4908 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4912 psr_field
|= psr
->field
;
4917 goto error
; /* Garbage after "[CS]PSR". */
4919 psr_field
|= (PSR_c
| PSR_f
);
4925 inst
.error
= _("flag for {c}psr instruction expected");
4929 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4930 value suitable for splatting into the AIF field of the instruction. */
4933 parse_cps_flags (char **str
)
4942 case '\0': case ',':
4945 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4946 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4947 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4950 inst
.error
= _("unrecognized CPS flag");
4955 if (saw_a_flag
== 0)
4957 inst
.error
= _("missing CPS flags");
4965 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4966 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4969 parse_endian_specifier (char **str
)
4974 if (strncasecmp (s
, "BE", 2))
4976 else if (strncasecmp (s
, "LE", 2))
4980 inst
.error
= _("valid endian specifiers are be or le");
4984 if (ISALNUM (s
[2]) || s
[2] == '_')
4986 inst
.error
= _("valid endian specifiers are be or le");
4991 return little_endian
;
4994 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4995 value suitable for poking into the rotate field of an sxt or sxta
4996 instruction, or FAIL on error. */
4999 parse_ror (char **str
)
5004 if (strncasecmp (s
, "ROR", 3) == 0)
5008 inst
.error
= _("missing rotation field after comma");
5012 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5017 case 0: *str
= s
; return 0x0;
5018 case 8: *str
= s
; return 0x1;
5019 case 16: *str
= s
; return 0x2;
5020 case 24: *str
= s
; return 0x3;
5023 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5028 /* Parse a conditional code (from conds[] below). The value returned is in the
5029 range 0 .. 14, or FAIL. */
5031 parse_cond (char **str
)
5034 const struct asm_cond
*c
;
5037 while (ISALPHA (*q
))
5040 c
= hash_find_n (arm_cond_hsh
, p
, q
- p
);
5043 inst
.error
= _("condition required");
5051 /* Parse an option for a barrier instruction. Returns the encoding for the
5054 parse_barrier (char **str
)
5057 const struct asm_barrier_opt
*o
;
5060 while (ISALPHA (*q
))
5063 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
5071 /* Parse the operands of a table branch instruction. Similar to a memory
5074 parse_tb (char **str
)
5079 if (skip_past_char (&p
, '[') == FAIL
)
5081 inst
.error
= _("'[' expected");
5085 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5087 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5090 inst
.operands
[0].reg
= reg
;
5092 if (skip_past_comma (&p
) == FAIL
)
5094 inst
.error
= _("',' expected");
5098 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5100 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5103 inst
.operands
[0].imm
= reg
;
5105 if (skip_past_comma (&p
) == SUCCESS
)
5107 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5109 if (inst
.reloc
.exp
.X_add_number
!= 1)
5111 inst
.error
= _("invalid shift");
5114 inst
.operands
[0].shifted
= 1;
5117 if (skip_past_char (&p
, ']') == FAIL
)
5119 inst
.error
= _("']' expected");
5126 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5127 information on the types the operands can take and how they are encoded.
5128 Up to four operands may be read; this function handles setting the
5129 ".present" field for each read operand itself.
5130 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5131 else returns FAIL. */
5134 parse_neon_mov (char **str
, int *which_operand
)
5136 int i
= *which_operand
, val
;
5137 enum arm_reg_type rtype
;
5139 struct neon_type_el optype
;
5141 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5143 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5144 inst
.operands
[i
].reg
= val
;
5145 inst
.operands
[i
].isscalar
= 1;
5146 inst
.operands
[i
].vectype
= optype
;
5147 inst
.operands
[i
++].present
= 1;
5149 if (skip_past_comma (&ptr
) == FAIL
)
5152 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5155 inst
.operands
[i
].reg
= val
;
5156 inst
.operands
[i
].isreg
= 1;
5157 inst
.operands
[i
].present
= 1;
5159 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5162 /* Cases 0, 1, 2, 3, 5 (D only). */
5163 if (skip_past_comma (&ptr
) == FAIL
)
5166 inst
.operands
[i
].reg
= val
;
5167 inst
.operands
[i
].isreg
= 1;
5168 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5169 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5170 inst
.operands
[i
].isvec
= 1;
5171 inst
.operands
[i
].vectype
= optype
;
5172 inst
.operands
[i
++].present
= 1;
5174 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5176 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5177 Case 13: VMOV <Sd>, <Rm> */
5178 inst
.operands
[i
].reg
= val
;
5179 inst
.operands
[i
].isreg
= 1;
5180 inst
.operands
[i
].present
= 1;
5182 if (rtype
== REG_TYPE_NQ
)
5184 first_error (_("can't use Neon quad register here"));
5187 else if (rtype
!= REG_TYPE_VFS
)
5190 if (skip_past_comma (&ptr
) == FAIL
)
5192 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5194 inst
.operands
[i
].reg
= val
;
5195 inst
.operands
[i
].isreg
= 1;
5196 inst
.operands
[i
].present
= 1;
5199 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5200 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5201 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5202 Case 10: VMOV.F32 <Sd>, #<imm>
5203 Case 11: VMOV.F64 <Dd>, #<imm> */
5205 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5206 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5207 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5209 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5212 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5213 Case 1: VMOV<c><q> <Dd>, <Dm>
5214 Case 8: VMOV.F32 <Sd>, <Sm>
5215 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5217 inst
.operands
[i
].reg
= val
;
5218 inst
.operands
[i
].isreg
= 1;
5219 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5220 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5221 inst
.operands
[i
].isvec
= 1;
5222 inst
.operands
[i
].vectype
= optype
;
5223 inst
.operands
[i
].present
= 1;
5225 if (skip_past_comma (&ptr
) == SUCCESS
)
5230 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5233 inst
.operands
[i
].reg
= val
;
5234 inst
.operands
[i
].isreg
= 1;
5235 inst
.operands
[i
++].present
= 1;
5237 if (skip_past_comma (&ptr
) == FAIL
)
5240 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5243 inst
.operands
[i
].reg
= val
;
5244 inst
.operands
[i
].isreg
= 1;
5245 inst
.operands
[i
++].present
= 1;
5250 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5254 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5257 inst
.operands
[i
].reg
= val
;
5258 inst
.operands
[i
].isreg
= 1;
5259 inst
.operands
[i
++].present
= 1;
5261 if (skip_past_comma (&ptr
) == FAIL
)
5264 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5266 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5267 inst
.operands
[i
].reg
= val
;
5268 inst
.operands
[i
].isscalar
= 1;
5269 inst
.operands
[i
].present
= 1;
5270 inst
.operands
[i
].vectype
= optype
;
5272 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5274 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5275 inst
.operands
[i
].reg
= val
;
5276 inst
.operands
[i
].isreg
= 1;
5277 inst
.operands
[i
++].present
= 1;
5279 if (skip_past_comma (&ptr
) == FAIL
)
5282 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5285 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5289 inst
.operands
[i
].reg
= val
;
5290 inst
.operands
[i
].isreg
= 1;
5291 inst
.operands
[i
].isvec
= 1;
5292 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5293 inst
.operands
[i
].vectype
= optype
;
5294 inst
.operands
[i
].present
= 1;
5296 if (rtype
== REG_TYPE_VFS
)
5300 if (skip_past_comma (&ptr
) == FAIL
)
5302 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5305 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5308 inst
.operands
[i
].reg
= val
;
5309 inst
.operands
[i
].isreg
= 1;
5310 inst
.operands
[i
].isvec
= 1;
5311 inst
.operands
[i
].issingle
= 1;
5312 inst
.operands
[i
].vectype
= optype
;
5313 inst
.operands
[i
].present
= 1;
5316 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5320 inst
.operands
[i
].reg
= val
;
5321 inst
.operands
[i
].isreg
= 1;
5322 inst
.operands
[i
].isvec
= 1;
5323 inst
.operands
[i
].issingle
= 1;
5324 inst
.operands
[i
].vectype
= optype
;
5325 inst
.operands
[i
++].present
= 1;
5330 first_error (_("parse error"));
5334 /* Successfully parsed the operands. Update args. */
5340 first_error (_("expected comma"));
5344 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5348 /* Matcher codes for parse_operands. */
5349 enum operand_parse_code
5351 OP_stop
, /* end of line */
5353 OP_RR
, /* ARM register */
5354 OP_RRnpc
, /* ARM register, not r15 */
5355 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5356 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5357 OP_RCP
, /* Coprocessor number */
5358 OP_RCN
, /* Coprocessor register */
5359 OP_RF
, /* FPA register */
5360 OP_RVS
, /* VFP single precision register */
5361 OP_RVD
, /* VFP double precision register (0..15) */
5362 OP_RND
, /* Neon double precision register (0..31) */
5363 OP_RNQ
, /* Neon quad precision register */
5364 OP_RVSD
, /* VFP single or double precision register */
5365 OP_RNDQ
, /* Neon double or quad precision register */
5366 OP_RNSDQ
, /* Neon single, double or quad precision register */
5367 OP_RNSC
, /* Neon scalar D[X] */
5368 OP_RVC
, /* VFP control register */
5369 OP_RMF
, /* Maverick F register */
5370 OP_RMD
, /* Maverick D register */
5371 OP_RMFX
, /* Maverick FX register */
5372 OP_RMDX
, /* Maverick DX register */
5373 OP_RMAX
, /* Maverick AX register */
5374 OP_RMDS
, /* Maverick DSPSC register */
5375 OP_RIWR
, /* iWMMXt wR register */
5376 OP_RIWC
, /* iWMMXt wC register */
5377 OP_RIWG
, /* iWMMXt wCG register */
5378 OP_RXA
, /* XScale accumulator register */
5380 OP_REGLST
, /* ARM register list */
5381 OP_VRSLST
, /* VFP single-precision register list */
5382 OP_VRDLST
, /* VFP double-precision register list */
5383 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5384 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5385 OP_NSTRLST
, /* Neon element/structure list */
5387 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5388 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5389 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5390 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5391 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5392 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5393 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5394 OP_VMOV
, /* Neon VMOV operands. */
5395 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
5396 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5397 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5399 OP_I0
, /* immediate zero */
5400 OP_I7
, /* immediate value 0 .. 7 */
5401 OP_I15
, /* 0 .. 15 */
5402 OP_I16
, /* 1 .. 16 */
5403 OP_I16z
, /* 0 .. 16 */
5404 OP_I31
, /* 0 .. 31 */
5405 OP_I31w
, /* 0 .. 31, optional trailing ! */
5406 OP_I32
, /* 1 .. 32 */
5407 OP_I32z
, /* 0 .. 32 */
5408 OP_I63
, /* 0 .. 63 */
5409 OP_I63s
, /* -64 .. 63 */
5410 OP_I64
, /* 1 .. 64 */
5411 OP_I64z
, /* 0 .. 64 */
5412 OP_I255
, /* 0 .. 255 */
5414 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5415 OP_I7b
, /* 0 .. 7 */
5416 OP_I15b
, /* 0 .. 15 */
5417 OP_I31b
, /* 0 .. 31 */
5419 OP_SH
, /* shifter operand */
5420 OP_SHG
, /* shifter operand with possible group relocation */
5421 OP_ADDR
, /* Memory address expression (any mode) */
5422 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5423 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5424 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5425 OP_EXP
, /* arbitrary expression */
5426 OP_EXPi
, /* same, with optional immediate prefix */
5427 OP_EXPr
, /* same, with optional relocation suffix */
5428 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5430 OP_CPSF
, /* CPS flags */
5431 OP_ENDI
, /* Endianness specifier */
5432 OP_PSR
, /* CPSR/SPSR mask for msr */
5433 OP_COND
, /* conditional code */
5434 OP_TB
, /* Table branch. */
5436 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5437 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5439 OP_RRnpc_I0
, /* ARM register or literal 0 */
5440 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5441 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5442 OP_RF_IF
, /* FPA register or immediate */
5443 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5444 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5446 /* Optional operands. */
5447 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5448 OP_oI31b
, /* 0 .. 31 */
5449 OP_oI32b
, /* 1 .. 32 */
5450 OP_oIffffb
, /* 0 .. 65535 */
5451 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5453 OP_oRR
, /* ARM register */
5454 OP_oRRnpc
, /* ARM register, not the PC */
5455 OP_oRND
, /* Optional Neon double precision register */
5456 OP_oRNQ
, /* Optional Neon quad precision register */
5457 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5458 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5459 OP_oSHll
, /* LSL immediate */
5460 OP_oSHar
, /* ASR immediate */
5461 OP_oSHllar
, /* LSL or ASR immediate */
5462 OP_oROR
, /* ROR 0/8/16/24 */
5463 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5465 OP_FIRST_OPTIONAL
= OP_oI7b
5468 /* Generic instruction operand parser. This does no encoding and no
5469 semantic validation; it merely squirrels values away in the inst
5470 structure. Returns SUCCESS or FAIL depending on whether the
5471 specified grammar matched. */
5473 parse_operands (char *str
, const unsigned char *pattern
)
5475 unsigned const char *upat
= pattern
;
5476 char *backtrack_pos
= 0;
5477 const char *backtrack_error
= 0;
5478 int i
, val
, backtrack_index
= 0;
5479 enum arm_reg_type rtype
;
5480 parse_operand_result result
;
5482 #define po_char_or_fail(chr) do { \
5483 if (skip_past_char (&str, chr) == FAIL) \
5487 #define po_reg_or_fail(regtype) do { \
5488 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5489 &inst.operands[i].vectype); \
5492 first_error (_(reg_expected_msgs[regtype])); \
5495 inst.operands[i].reg = val; \
5496 inst.operands[i].isreg = 1; \
5497 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5498 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5499 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5500 || rtype == REG_TYPE_VFD \
5501 || rtype == REG_TYPE_NQ); \
5504 #define po_reg_or_goto(regtype, label) do { \
5505 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5506 &inst.operands[i].vectype); \
5510 inst.operands[i].reg = val; \
5511 inst.operands[i].isreg = 1; \
5512 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5513 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5514 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5515 || rtype == REG_TYPE_VFD \
5516 || rtype == REG_TYPE_NQ); \
5519 #define po_imm_or_fail(min, max, popt) do { \
5520 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5522 inst.operands[i].imm = val; \
5525 #define po_scalar_or_goto(elsz, label) do { \
5526 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5529 inst.operands[i].reg = val; \
5530 inst.operands[i].isscalar = 1; \
5533 #define po_misc_or_fail(expr) do { \
5538 #define po_misc_or_fail_no_backtrack(expr) do { \
5540 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5541 backtrack_pos = 0; \
5542 if (result != PARSE_OPERAND_SUCCESS) \
5546 skip_whitespace (str
);
5548 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5550 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5552 /* Remember where we are in case we need to backtrack. */
5553 assert (!backtrack_pos
);
5554 backtrack_pos
= str
;
5555 backtrack_error
= inst
.error
;
5556 backtrack_index
= i
;
5560 po_char_or_fail (',');
5568 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5569 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5570 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5571 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5572 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5573 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5575 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5576 case OP_RVC
: po_reg_or_fail (REG_TYPE_VFC
); break;
5577 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5578 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5579 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5580 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5581 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5582 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5583 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5584 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5585 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5586 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5588 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5590 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5591 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
5593 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
5595 /* Neon scalar. Using an element size of 8 means that some invalid
5596 scalars are accepted here, so deal with those in later code. */
5597 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5599 /* WARNING: We can expand to two operands here. This has the potential
5600 to totally confuse the backtracking mechanism! It will be OK at
5601 least as long as we don't try to use optional args as well,
5605 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5606 inst
.operands
[i
].present
= 1;
5608 skip_past_comma (&str
);
5609 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5612 /* Optional register operand was omitted. Unfortunately, it's in
5613 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5614 here (this is a bit grotty). */
5615 inst
.operands
[i
] = inst
.operands
[i
-1];
5616 inst
.operands
[i
-1].present
= 0;
5619 /* There's a possibility of getting a 64-bit immediate here, so
5620 we need special handling. */
5621 if (parse_big_immediate (&str
, i
) == FAIL
)
5623 inst
.error
= _("immediate value is out of range");
5631 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5634 po_imm_or_fail (0, 0, TRUE
);
5639 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
5644 po_scalar_or_goto (8, try_rr
);
5647 po_reg_or_fail (REG_TYPE_RN
);
5653 po_scalar_or_goto (8, try_nsdq
);
5656 po_reg_or_fail (REG_TYPE_NSDQ
);
5662 po_scalar_or_goto (8, try_ndq
);
5665 po_reg_or_fail (REG_TYPE_NDQ
);
5671 po_scalar_or_goto (8, try_vfd
);
5674 po_reg_or_fail (REG_TYPE_VFD
);
5679 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5680 not careful then bad things might happen. */
5681 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5686 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5689 /* There's a possibility of getting a 64-bit immediate here, so
5690 we need special handling. */
5691 if (parse_big_immediate (&str
, i
) == FAIL
)
5693 inst
.error
= _("immediate value is out of range");
5701 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5704 po_imm_or_fail (0, 63, TRUE
);
5709 po_char_or_fail ('[');
5710 po_reg_or_fail (REG_TYPE_RN
);
5711 po_char_or_fail (']');
5715 po_reg_or_fail (REG_TYPE_RN
);
5716 if (skip_past_char (&str
, '!') == SUCCESS
)
5717 inst
.operands
[i
].writeback
= 1;
5721 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5722 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5723 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5724 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5725 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5726 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5727 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5728 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5729 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5730 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5731 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5732 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5734 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5736 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5737 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5739 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5740 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5741 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5743 /* Immediate variants */
5745 po_char_or_fail ('{');
5746 po_imm_or_fail (0, 255, TRUE
);
5747 po_char_or_fail ('}');
5751 /* The expression parser chokes on a trailing !, so we have
5752 to find it first and zap it. */
5755 while (*s
&& *s
!= ',')
5760 inst
.operands
[i
].writeback
= 1;
5762 po_imm_or_fail (0, 31, TRUE
);
5770 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5775 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5780 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5782 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5784 val
= parse_reloc (&str
);
5787 inst
.error
= _("unrecognized relocation suffix");
5790 else if (val
!= BFD_RELOC_UNUSED
)
5792 inst
.operands
[i
].imm
= val
;
5793 inst
.operands
[i
].hasreloc
= 1;
5798 /* Operand for MOVW or MOVT. */
5800 po_misc_or_fail (parse_half (&str
));
5803 /* Register or expression */
5804 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5805 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5807 /* Register or immediate */
5808 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5809 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5811 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5813 if (!is_immediate_prefix (*str
))
5816 val
= parse_fpa_immediate (&str
);
5819 /* FPA immediates are encoded as registers 8-15.
5820 parse_fpa_immediate has already applied the offset. */
5821 inst
.operands
[i
].reg
= val
;
5822 inst
.operands
[i
].isreg
= 1;
5825 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
5826 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
5828 /* Two kinds of register */
5831 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5833 || (rege
->type
!= REG_TYPE_MMXWR
5834 && rege
->type
!= REG_TYPE_MMXWC
5835 && rege
->type
!= REG_TYPE_MMXWCG
))
5837 inst
.error
= _("iWMMXt data or control register expected");
5840 inst
.operands
[i
].reg
= rege
->number
;
5841 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5847 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5849 || (rege
->type
!= REG_TYPE_MMXWC
5850 && rege
->type
!= REG_TYPE_MMXWCG
))
5852 inst
.error
= _("iWMMXt control register expected");
5855 inst
.operands
[i
].reg
= rege
->number
;
5856 inst
.operands
[i
].isreg
= 1;
5861 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5862 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5863 case OP_oROR
: val
= parse_ror (&str
); break;
5864 case OP_PSR
: val
= parse_psr (&str
); break;
5865 case OP_COND
: val
= parse_cond (&str
); break;
5866 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5869 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
5870 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
5873 val
= parse_psr (&str
);
5877 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
5880 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5882 if (strncasecmp (str
, "APSR_", 5) == 0)
5889 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
5890 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
5891 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
5892 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
5893 default: found
= 16;
5897 inst
.operands
[i
].isvec
= 1;
5904 po_misc_or_fail (parse_tb (&str
));
5907 /* Register lists */
5909 val
= parse_reg_list (&str
);
5912 inst
.operands
[1].writeback
= 1;
5918 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5922 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5926 /* Allow Q registers too. */
5927 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5932 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5934 inst
.operands
[i
].issingle
= 1;
5939 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5944 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5945 &inst
.operands
[i
].vectype
);
5948 /* Addressing modes */
5950 po_misc_or_fail (parse_address (&str
, i
));
5954 po_misc_or_fail_no_backtrack (
5955 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
5959 po_misc_or_fail_no_backtrack (
5960 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
5964 po_misc_or_fail_no_backtrack (
5965 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
5969 po_misc_or_fail (parse_shifter_operand (&str
, i
));
5973 po_misc_or_fail_no_backtrack (
5974 parse_shifter_operand_group_reloc (&str
, i
));
5978 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
5982 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
5986 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
5990 as_fatal ("unhandled operand code %d", upat
[i
]);
5993 /* Various value-based sanity checks and shared operations. We
5994 do not signal immediate failures for the register constraints;
5995 this allows a syntax error to take precedence. */
6003 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6004 inst
.error
= BAD_PC
;
6022 inst
.operands
[i
].imm
= val
;
6029 /* If we get here, this operand was successfully parsed. */
6030 inst
.operands
[i
].present
= 1;
6034 inst
.error
= BAD_ARGS
;
6039 /* The parse routine should already have set inst.error, but set a
6040 defaut here just in case. */
6042 inst
.error
= _("syntax error");
6046 /* Do not backtrack over a trailing optional argument that
6047 absorbed some text. We will only fail again, with the
6048 'garbage following instruction' error message, which is
6049 probably less helpful than the current one. */
6050 if (backtrack_index
== i
&& backtrack_pos
!= str
6051 && upat
[i
+1] == OP_stop
)
6054 inst
.error
= _("syntax error");
6058 /* Try again, skipping the optional argument at backtrack_pos. */
6059 str
= backtrack_pos
;
6060 inst
.error
= backtrack_error
;
6061 inst
.operands
[backtrack_index
].present
= 0;
6062 i
= backtrack_index
;
6066 /* Check that we have parsed all the arguments. */
6067 if (*str
!= '\0' && !inst
.error
)
6068 inst
.error
= _("garbage following instruction");
6070 return inst
.error
? FAIL
: SUCCESS
;
6073 #undef po_char_or_fail
6074 #undef po_reg_or_fail
6075 #undef po_reg_or_goto
6076 #undef po_imm_or_fail
6077 #undef po_scalar_or_fail
6079 /* Shorthand macro for instruction encoding functions issuing errors. */
6080 #define constraint(expr, err) do { \
6088 /* Functions for operand encoding. ARM, then Thumb. */
6090 #define rotate_left(v, n) (v << n | v >> (32 - n))
6092 /* If VAL can be encoded in the immediate field of an ARM instruction,
6093 return the encoded form. Otherwise, return FAIL. */
6096 encode_arm_immediate (unsigned int val
)
6100 for (i
= 0; i
< 32; i
+= 2)
6101 if ((a
= rotate_left (val
, i
)) <= 0xff)
6102 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6107 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6108 return the encoded form. Otherwise, return FAIL. */
6110 encode_thumb32_immediate (unsigned int val
)
6117 for (i
= 1; i
<= 24; i
++)
6120 if ((val
& ~(0xff << i
)) == 0)
6121 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6125 if (val
== ((a
<< 16) | a
))
6127 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6131 if (val
== ((a
<< 16) | a
))
6132 return 0x200 | (a
>> 8);
6136 /* Encode a VFP SP or DP register number into inst.instruction. */
6139 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6141 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6144 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
6147 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6150 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6155 first_error (_("D register out of range for selected VFP version"));
6163 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6167 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6171 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6175 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6179 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6183 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6191 /* Encode a <shift> in an ARM-format instruction. The immediate,
6192 if any, is handled by md_apply_fix. */
6194 encode_arm_shift (int i
)
6196 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6197 inst
.instruction
|= SHIFT_ROR
<< 5;
6200 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6201 if (inst
.operands
[i
].immisreg
)
6203 inst
.instruction
|= SHIFT_BY_REG
;
6204 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6207 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6212 encode_arm_shifter_operand (int i
)
6214 if (inst
.operands
[i
].isreg
)
6216 inst
.instruction
|= inst
.operands
[i
].reg
;
6217 encode_arm_shift (i
);
6220 inst
.instruction
|= INST_IMMEDIATE
;
6223 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6225 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6227 assert (inst
.operands
[i
].isreg
);
6228 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6230 if (inst
.operands
[i
].preind
)
6234 inst
.error
= _("instruction does not accept preindexed addressing");
6237 inst
.instruction
|= PRE_INDEX
;
6238 if (inst
.operands
[i
].writeback
)
6239 inst
.instruction
|= WRITE_BACK
;
6242 else if (inst
.operands
[i
].postind
)
6244 assert (inst
.operands
[i
].writeback
);
6246 inst
.instruction
|= WRITE_BACK
;
6248 else /* unindexed - only for coprocessor */
6250 inst
.error
= _("instruction does not accept unindexed addressing");
6254 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6255 && (((inst
.instruction
& 0x000f0000) >> 16)
6256 == ((inst
.instruction
& 0x0000f000) >> 12)))
6257 as_warn ((inst
.instruction
& LOAD_BIT
)
6258 ? _("destination register same as write-back base")
6259 : _("source register same as write-back base"));
6262 /* inst.operands[i] was set up by parse_address. Encode it into an
6263 ARM-format mode 2 load or store instruction. If is_t is true,
6264 reject forms that cannot be used with a T instruction (i.e. not
6267 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6269 encode_arm_addr_mode_common (i
, is_t
);
6271 if (inst
.operands
[i
].immisreg
)
6273 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6274 inst
.instruction
|= inst
.operands
[i
].imm
;
6275 if (!inst
.operands
[i
].negative
)
6276 inst
.instruction
|= INDEX_UP
;
6277 if (inst
.operands
[i
].shifted
)
6279 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6280 inst
.instruction
|= SHIFT_ROR
<< 5;
6283 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6284 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6288 else /* immediate offset in inst.reloc */
6290 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6291 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6295 /* inst.operands[i] was set up by parse_address. Encode it into an
6296 ARM-format mode 3 load or store instruction. Reject forms that
6297 cannot be used with such instructions. If is_t is true, reject
6298 forms that cannot be used with a T instruction (i.e. not
6301 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6303 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6305 inst
.error
= _("instruction does not accept scaled register index");
6309 encode_arm_addr_mode_common (i
, is_t
);
6311 if (inst
.operands
[i
].immisreg
)
6313 inst
.instruction
|= inst
.operands
[i
].imm
;
6314 if (!inst
.operands
[i
].negative
)
6315 inst
.instruction
|= INDEX_UP
;
6317 else /* immediate offset in inst.reloc */
6319 inst
.instruction
|= HWOFFSET_IMM
;
6320 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6321 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6325 /* inst.operands[i] was set up by parse_address. Encode it into an
6326 ARM-format instruction. Reject all forms which cannot be encoded
6327 into a coprocessor load/store instruction. If wb_ok is false,
6328 reject use of writeback; if unind_ok is false, reject use of
6329 unindexed addressing. If reloc_override is not 0, use it instead
6330 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6331 (in which case it is preserved). */
6334 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6336 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6338 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6340 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6342 assert (!inst
.operands
[i
].writeback
);
6345 inst
.error
= _("instruction does not support unindexed addressing");
6348 inst
.instruction
|= inst
.operands
[i
].imm
;
6349 inst
.instruction
|= INDEX_UP
;
6353 if (inst
.operands
[i
].preind
)
6354 inst
.instruction
|= PRE_INDEX
;
6356 if (inst
.operands
[i
].writeback
)
6358 if (inst
.operands
[i
].reg
== REG_PC
)
6360 inst
.error
= _("pc may not be used with write-back");
6365 inst
.error
= _("instruction does not support writeback");
6368 inst
.instruction
|= WRITE_BACK
;
6372 inst
.reloc
.type
= reloc_override
;
6373 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6374 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6375 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6378 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6380 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6386 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6387 Determine whether it can be performed with a move instruction; if
6388 it can, convert inst.instruction to that move instruction and
6389 return 1; if it can't, convert inst.instruction to a literal-pool
6390 load and return 0. If this is not a valid thing to do in the
6391 current context, set inst.error and return 1.
6393 inst.operands[i] describes the destination register. */
6396 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6401 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6405 if ((inst
.instruction
& tbit
) == 0)
6407 inst
.error
= _("invalid pseudo operation");
6410 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6412 inst
.error
= _("constant expression expected");
6415 if (inst
.reloc
.exp
.X_op
== O_constant
)
6419 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
6421 /* This can be done with a mov(1) instruction. */
6422 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
6423 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
6429 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6432 /* This can be done with a mov instruction. */
6433 inst
.instruction
&= LITERAL_MASK
;
6434 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6435 inst
.instruction
|= value
& 0xfff;
6439 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6442 /* This can be done with a mvn instruction. */
6443 inst
.instruction
&= LITERAL_MASK
;
6444 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6445 inst
.instruction
|= value
& 0xfff;
6451 if (add_to_lit_pool () == FAIL
)
6453 inst
.error
= _("literal pool insertion failed");
6456 inst
.operands
[1].reg
= REG_PC
;
6457 inst
.operands
[1].isreg
= 1;
6458 inst
.operands
[1].preind
= 1;
6459 inst
.reloc
.pc_rel
= 1;
6460 inst
.reloc
.type
= (thumb_p
6461 ? BFD_RELOC_ARM_THUMB_OFFSET
6463 ? BFD_RELOC_ARM_HWLITERAL
6464 : BFD_RELOC_ARM_LITERAL
));
6468 /* Functions for instruction encoding, sorted by subarchitecture.
6469 First some generics; their names are taken from the conventional
6470 bit positions for register arguments in ARM format instructions. */
6480 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6486 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6487 inst
.instruction
|= inst
.operands
[1].reg
;
6493 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6494 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6500 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6501 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6507 unsigned Rn
= inst
.operands
[2].reg
;
6508 /* Enforce restrictions on SWP instruction. */
6509 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6510 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6511 _("Rn must not overlap other operands"));
6512 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6513 inst
.instruction
|= inst
.operands
[1].reg
;
6514 inst
.instruction
|= Rn
<< 16;
6520 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6521 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6522 inst
.instruction
|= inst
.operands
[2].reg
;
6528 inst
.instruction
|= inst
.operands
[0].reg
;
6529 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6530 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6536 inst
.instruction
|= inst
.operands
[0].imm
;
6542 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6543 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6546 /* ARM instructions, in alphabetical order by function name (except
6547 that wrapper functions appear immediately after the function they
6550 /* This is a pseudo-op of the form "adr rd, label" to be converted
6551 into a relative address of the form "add rd, pc, #label-.-8". */
6556 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6558 /* Frag hacking will turn this into a sub instruction if the offset turns
6559 out to be negative. */
6560 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6561 inst
.reloc
.pc_rel
= 1;
6562 inst
.reloc
.exp
.X_add_number
-= 8;
6565 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6566 into a relative address of the form:
6567 add rd, pc, #low(label-.-8)"
6568 add rd, rd, #high(label-.-8)" */
6573 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6575 /* Frag hacking will turn this into a sub instruction if the offset turns
6576 out to be negative. */
6577 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
6578 inst
.reloc
.pc_rel
= 1;
6579 inst
.size
= INSN_SIZE
* 2;
6580 inst
.reloc
.exp
.X_add_number
-= 8;
6586 if (!inst
.operands
[1].present
)
6587 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
6588 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6589 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6590 encode_arm_shifter_operand (2);
6596 if (inst
.operands
[0].present
)
6598 constraint ((inst
.instruction
& 0xf0) != 0x40
6599 && inst
.operands
[0].imm
!= 0xf,
6600 "bad barrier type");
6601 inst
.instruction
|= inst
.operands
[0].imm
;
6604 inst
.instruction
|= 0xf;
6610 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
6611 constraint (msb
> 32, _("bit-field extends past end of register"));
6612 /* The instruction encoding stores the LSB and MSB,
6613 not the LSB and width. */
6614 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6615 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
6616 inst
.instruction
|= (msb
- 1) << 16;
6624 /* #0 in second position is alternative syntax for bfc, which is
6625 the same instruction but with REG_PC in the Rm field. */
6626 if (!inst
.operands
[1].isreg
)
6627 inst
.operands
[1].reg
= REG_PC
;
6629 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
6630 constraint (msb
> 32, _("bit-field extends past end of register"));
6631 /* The instruction encoding stores the LSB and MSB,
6632 not the LSB and width. */
6633 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6634 inst
.instruction
|= inst
.operands
[1].reg
;
6635 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6636 inst
.instruction
|= (msb
- 1) << 16;
6642 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
6643 _("bit-field extends past end of register"));
6644 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6645 inst
.instruction
|= inst
.operands
[1].reg
;
6646 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6647 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6650 /* ARM V5 breakpoint instruction (argument parse)
6651 BKPT <16 bit unsigned immediate>
6652 Instruction is not conditional.
6653 The bit pattern given in insns[] has the COND_ALWAYS condition,
6654 and it is an error if the caller tried to override that. */
6659 /* Top 12 of 16 bits to bits 19:8. */
6660 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6662 /* Bottom 4 of 16 bits to bits 3:0. */
6663 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6667 encode_branch (int default_reloc
)
6669 if (inst
.operands
[0].hasreloc
)
6671 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6672 _("the only suffix valid here is '(plt)'"));
6673 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6677 inst
.reloc
.type
= default_reloc
;
6679 inst
.reloc
.pc_rel
= 1;
6686 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6687 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6690 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6697 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6699 if (inst
.cond
== COND_ALWAYS
)
6700 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6702 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6706 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6709 /* ARM V5 branch-link-exchange instruction (argument parse)
6710 BLX <target_addr> ie BLX(1)
6711 BLX{<condition>} <Rm> ie BLX(2)
6712 Unfortunately, there are two different opcodes for this mnemonic.
6713 So, the insns[].value is not used, and the code here zaps values
6714 into inst.instruction.
6715 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6720 if (inst
.operands
[0].isreg
)
6722 /* Arg is a register; the opcode provided by insns[] is correct.
6723 It is not illegal to do "blx pc", just useless. */
6724 if (inst
.operands
[0].reg
== REG_PC
)
6725 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6727 inst
.instruction
|= inst
.operands
[0].reg
;
6731 /* Arg is an address; this instruction cannot be executed
6732 conditionally, and the opcode must be adjusted. */
6733 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6734 inst
.instruction
= 0xfa000000;
6736 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6737 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6740 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6747 if (inst
.operands
[0].reg
== REG_PC
)
6748 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6750 inst
.instruction
|= inst
.operands
[0].reg
;
6754 /* ARM v5TEJ. Jump to Jazelle code. */
6759 if (inst
.operands
[0].reg
== REG_PC
)
6760 as_tsktsk (_("use of r15 in bxj is not really useful"));
6762 inst
.instruction
|= inst
.operands
[0].reg
;
6765 /* Co-processor data operation:
6766 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6767 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6771 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6772 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6773 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6774 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6775 inst
.instruction
|= inst
.operands
[4].reg
;
6776 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6782 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6783 encode_arm_shifter_operand (1);
6786 /* Transfer between coprocessor and ARM registers.
6787 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6792 No special properties. */
6797 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6798 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6799 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6800 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6801 inst
.instruction
|= inst
.operands
[4].reg
;
6802 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6805 /* Transfer between coprocessor register and pair of ARM registers.
6806 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6811 Two XScale instructions are special cases of these:
6813 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6814 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6816 Result unpredicatable if Rd or Rn is R15. */
6821 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6822 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6823 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6824 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6825 inst
.instruction
|= inst
.operands
[4].reg
;
6831 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6832 if (inst
.operands
[1].present
)
6834 inst
.instruction
|= CPSI_MMOD
;
6835 inst
.instruction
|= inst
.operands
[1].imm
;
6842 inst
.instruction
|= inst
.operands
[0].imm
;
6848 /* There is no IT instruction in ARM mode. We
6849 process it but do not generate code for it. */
6856 int base_reg
= inst
.operands
[0].reg
;
6857 int range
= inst
.operands
[1].imm
;
6859 inst
.instruction
|= base_reg
<< 16;
6860 inst
.instruction
|= range
;
6862 if (inst
.operands
[1].writeback
)
6863 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6865 if (inst
.operands
[0].writeback
)
6867 inst
.instruction
|= WRITE_BACK
;
6868 /* Check for unpredictable uses of writeback. */
6869 if (inst
.instruction
& LOAD_BIT
)
6871 /* Not allowed in LDM type 2. */
6872 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6873 && ((range
& (1 << REG_PC
)) == 0))
6874 as_warn (_("writeback of base register is UNPREDICTABLE"));
6875 /* Only allowed if base reg not in list for other types. */
6876 else if (range
& (1 << base_reg
))
6877 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6881 /* Not allowed for type 2. */
6882 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
6883 as_warn (_("writeback of base register is UNPREDICTABLE"));
6884 /* Only allowed if base reg not in list, or first in list. */
6885 else if ((range
& (1 << base_reg
))
6886 && (range
& ((1 << base_reg
) - 1)))
6887 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6892 /* ARMv5TE load-consecutive (argument parse)
6901 constraint (inst
.operands
[0].reg
% 2 != 0,
6902 _("first destination register must be even"));
6903 constraint (inst
.operands
[1].present
6904 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6905 _("can only load two consecutive registers"));
6906 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6907 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
6909 if (!inst
.operands
[1].present
)
6910 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
6912 if (inst
.instruction
& LOAD_BIT
)
6914 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6915 register and the first register written; we have to diagnose
6916 overlap between the base and the second register written here. */
6918 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
6919 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
6920 as_warn (_("base register written back, and overlaps "
6921 "second destination register"));
6923 /* For an index-register load, the index register must not overlap the
6924 destination (even if not write-back). */
6925 else if (inst
.operands
[2].immisreg
6926 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
6927 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
6928 as_warn (_("index register overlaps destination register"));
6931 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6932 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
6938 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
6939 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
6940 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
6941 || inst
.operands
[1].negative
6942 /* This can arise if the programmer has written
6944 or if they have mistakenly used a register name as the last
6947 It is very difficult to distinguish between these two cases
6948 because "rX" might actually be a label. ie the register
6949 name has been occluded by a symbol of the same name. So we
6950 just generate a general 'bad addressing mode' type error
6951 message and leave it up to the programmer to discover the
6952 true cause and fix their mistake. */
6953 || (inst
.operands
[1].reg
== REG_PC
),
6956 constraint (inst
.reloc
.exp
.X_op
!= O_constant
6957 || inst
.reloc
.exp
.X_add_number
!= 0,
6958 _("offset must be zero in ARM encoding"));
6960 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6961 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6962 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
6968 constraint (inst
.operands
[0].reg
% 2 != 0,
6969 _("even register required"));
6970 constraint (inst
.operands
[1].present
6971 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
6972 _("can only load two consecutive registers"));
6973 /* If op 1 were present and equal to PC, this function wouldn't
6974 have been called in the first place. */
6975 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
6977 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6978 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6984 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6985 if (!inst
.operands
[1].isreg
)
6986 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
6988 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
6994 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6996 if (inst
.operands
[1].preind
)
6998 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
6999 inst
.reloc
.exp
.X_add_number
!= 0,
7000 _("this instruction requires a post-indexed address"));
7002 inst
.operands
[1].preind
= 0;
7003 inst
.operands
[1].postind
= 1;
7004 inst
.operands
[1].writeback
= 1;
7006 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7007 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7010 /* Halfword and signed-byte load/store operations. */
7015 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7016 if (!inst
.operands
[1].isreg
)
7017 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7019 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7025 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7027 if (inst
.operands
[1].preind
)
7029 constraint (inst
.reloc
.exp
.X_op
!= O_constant
||
7030 inst
.reloc
.exp
.X_add_number
!= 0,
7031 _("this instruction requires a post-indexed address"));
7033 inst
.operands
[1].preind
= 0;
7034 inst
.operands
[1].postind
= 1;
7035 inst
.operands
[1].writeback
= 1;
7037 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7038 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7041 /* Co-processor register load/store.
7042 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7046 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7047 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7048 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7054 /* This restriction does not apply to mls (nor to mla in v6, but
7055 that's hard to detect at present). */
7056 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7057 && !(inst
.instruction
& 0x00400000))
7058 as_tsktsk (_("rd and rm should be different in mla"));
7060 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7061 inst
.instruction
|= inst
.operands
[1].reg
;
7062 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7063 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7070 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7071 encode_arm_shifter_operand (1);
7074 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7081 top
= (inst
.instruction
& 0x00400000) != 0;
7082 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7083 _(":lower16: not allowed this instruction"));
7084 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7085 _(":upper16: not allowed instruction"));
7086 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7087 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7089 imm
= inst
.reloc
.exp
.X_add_number
;
7090 /* The value is in two pieces: 0:11, 16:19. */
7091 inst
.instruction
|= (imm
& 0x00000fff);
7092 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7096 static void do_vfp_nsyn_opcode (const char *);
7099 do_vfp_nsyn_mrs (void)
7101 if (inst
.operands
[0].isvec
)
7103 if (inst
.operands
[1].reg
!= 1)
7104 first_error (_("operand 1 must be FPSCR"));
7105 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7106 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7107 do_vfp_nsyn_opcode ("fmstat");
7109 else if (inst
.operands
[1].isvec
)
7110 do_vfp_nsyn_opcode ("fmrx");
7118 do_vfp_nsyn_msr (void)
7120 if (inst
.operands
[0].isvec
)
7121 do_vfp_nsyn_opcode ("fmxr");
7131 if (do_vfp_nsyn_mrs () == SUCCESS
)
7134 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7135 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7137 _("'CPSR' or 'SPSR' expected"));
7138 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7139 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7142 /* Two possible forms:
7143 "{C|S}PSR_<field>, Rm",
7144 "{C|S}PSR_f, #expression". */
7149 if (do_vfp_nsyn_msr () == SUCCESS
)
7152 inst
.instruction
|= inst
.operands
[0].imm
;
7153 if (inst
.operands
[1].isreg
)
7154 inst
.instruction
|= inst
.operands
[1].reg
;
7157 inst
.instruction
|= INST_IMMEDIATE
;
7158 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7159 inst
.reloc
.pc_rel
= 0;
7166 if (!inst
.operands
[2].present
)
7167 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7168 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7169 inst
.instruction
|= inst
.operands
[1].reg
;
7170 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7172 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7173 as_tsktsk (_("rd and rm should be different in mul"));
7176 /* Long Multiply Parser
7177 UMULL RdLo, RdHi, Rm, Rs
7178 SMULL RdLo, RdHi, Rm, Rs
7179 UMLAL RdLo, RdHi, Rm, Rs
7180 SMLAL RdLo, RdHi, Rm, Rs. */
7185 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7186 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7187 inst
.instruction
|= inst
.operands
[2].reg
;
7188 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7190 /* rdhi, rdlo and rm must all be different. */
7191 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7192 || inst
.operands
[0].reg
== inst
.operands
[2].reg
7193 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7194 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7200 if (inst
.operands
[0].present
)
7202 /* Architectural NOP hints are CPSR sets with no bits selected. */
7203 inst
.instruction
&= 0xf0000000;
7204 inst
.instruction
|= 0x0320f000 + inst
.operands
[0].imm
;
7208 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7209 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7210 Condition defaults to COND_ALWAYS.
7211 Error if Rd, Rn or Rm are R15. */
7216 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7217 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7218 inst
.instruction
|= inst
.operands
[2].reg
;
7219 if (inst
.operands
[3].present
)
7220 encode_arm_shift (3);
7223 /* ARM V6 PKHTB (Argument Parse). */
7228 if (!inst
.operands
[3].present
)
7230 /* If the shift specifier is omitted, turn the instruction
7231 into pkhbt rd, rm, rn. */
7232 inst
.instruction
&= 0xfff00010;
7233 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7234 inst
.instruction
|= inst
.operands
[1].reg
;
7235 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7239 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7240 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7241 inst
.instruction
|= inst
.operands
[2].reg
;
7242 encode_arm_shift (3);
7246 /* ARMv5TE: Preload-Cache
7250 Syntactically, like LDR with B=1, W=0, L=1. */
7255 constraint (!inst
.operands
[0].isreg
,
7256 _("'[' expected after PLD mnemonic"));
7257 constraint (inst
.operands
[0].postind
,
7258 _("post-indexed expression used in preload instruction"));
7259 constraint (inst
.operands
[0].writeback
,
7260 _("writeback used in preload instruction"));
7261 constraint (!inst
.operands
[0].preind
,
7262 _("unindexed addressing used in preload instruction"));
7263 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7266 /* ARMv7: PLI <addr_mode> */
7270 constraint (!inst
.operands
[0].isreg
,
7271 _("'[' expected after PLI mnemonic"));
7272 constraint (inst
.operands
[0].postind
,
7273 _("post-indexed expression used in preload instruction"));
7274 constraint (inst
.operands
[0].writeback
,
7275 _("writeback used in preload instruction"));
7276 constraint (!inst
.operands
[0].preind
,
7277 _("unindexed addressing used in preload instruction"));
7278 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7279 inst
.instruction
&= ~PRE_INDEX
;
7285 inst
.operands
[1] = inst
.operands
[0];
7286 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7287 inst
.operands
[0].isreg
= 1;
7288 inst
.operands
[0].writeback
= 1;
7289 inst
.operands
[0].reg
= REG_SP
;
7293 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7294 word at the specified address and the following word
7296 Unconditionally executed.
7297 Error if Rn is R15. */
7302 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7303 if (inst
.operands
[0].writeback
)
7304 inst
.instruction
|= WRITE_BACK
;
7307 /* ARM V6 ssat (argument parse). */
7312 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7313 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
7314 inst
.instruction
|= inst
.operands
[2].reg
;
7316 if (inst
.operands
[3].present
)
7317 encode_arm_shift (3);
7320 /* ARM V6 usat (argument parse). */
7325 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7326 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7327 inst
.instruction
|= inst
.operands
[2].reg
;
7329 if (inst
.operands
[3].present
)
7330 encode_arm_shift (3);
7333 /* ARM V6 ssat16 (argument parse). */
7338 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7339 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
7340 inst
.instruction
|= inst
.operands
[2].reg
;
7346 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7347 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7348 inst
.instruction
|= inst
.operands
[2].reg
;
7351 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7352 preserving the other bits.
7354 setend <endian_specifier>, where <endian_specifier> is either
7360 if (inst
.operands
[0].imm
)
7361 inst
.instruction
|= 0x200;
7367 unsigned int Rm
= (inst
.operands
[1].present
7368 ? inst
.operands
[1].reg
7369 : inst
.operands
[0].reg
);
7371 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7372 inst
.instruction
|= Rm
;
7373 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
7375 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7376 inst
.instruction
|= SHIFT_BY_REG
;
7379 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7385 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
7386 inst
.reloc
.pc_rel
= 0;
7392 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
7393 inst
.reloc
.pc_rel
= 0;
7396 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7397 SMLAxy{cond} Rd,Rm,Rs,Rn
7398 SMLAWy{cond} Rd,Rm,Rs,Rn
7399 Error if any register is R15. */
7404 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7405 inst
.instruction
|= inst
.operands
[1].reg
;
7406 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7407 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7410 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7411 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7412 Error if any register is R15.
7413 Warning if Rdlo == Rdhi. */
7418 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7419 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7420 inst
.instruction
|= inst
.operands
[2].reg
;
7421 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7423 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7424 as_tsktsk (_("rdhi and rdlo must be different"));
7427 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7428 SMULxy{cond} Rd,Rm,Rs
7429 Error if any register is R15. */
7434 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7435 inst
.instruction
|= inst
.operands
[1].reg
;
7436 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7439 /* ARM V6 srs (argument parse). */
7444 inst
.instruction
|= inst
.operands
[0].imm
;
7445 if (inst
.operands
[0].writeback
)
7446 inst
.instruction
|= WRITE_BACK
;
7449 /* ARM V6 strex (argument parse). */
7454 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
7455 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
7456 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
7457 || inst
.operands
[2].negative
7458 /* See comment in do_ldrex(). */
7459 || (inst
.operands
[2].reg
== REG_PC
),
7462 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7463 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
7465 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7466 || inst
.reloc
.exp
.X_add_number
!= 0,
7467 _("offset must be zero in ARM encoding"));
7469 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7470 inst
.instruction
|= inst
.operands
[1].reg
;
7471 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7472 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7478 constraint (inst
.operands
[1].reg
% 2 != 0,
7479 _("even register required"));
7480 constraint (inst
.operands
[2].present
7481 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
7482 _("can only store two consecutive registers"));
7483 /* If op 2 were present and equal to PC, this function wouldn't
7484 have been called in the first place. */
7485 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
7487 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7488 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
7489 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
7492 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7493 inst
.instruction
|= inst
.operands
[1].reg
;
7494 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7497 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7498 extends it to 32-bits, and adds the result to a value in another
7499 register. You can specify a rotation by 0, 8, 16, or 24 bits
7500 before extracting the 16-bit value.
7501 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7502 Condition defaults to COND_ALWAYS.
7503 Error if any register uses R15. */
7508 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7509 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7510 inst
.instruction
|= inst
.operands
[2].reg
;
7511 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
7516 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7517 Condition defaults to COND_ALWAYS.
7518 Error if any register uses R15. */
7523 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7524 inst
.instruction
|= inst
.operands
[1].reg
;
7525 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
7528 /* VFP instructions. In a logical order: SP variant first, monad
7529 before dyad, arithmetic then move then load/store. */
7532 do_vfp_sp_monadic (void)
7534 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7535 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7539 do_vfp_sp_dyadic (void)
7541 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7542 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7543 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7547 do_vfp_sp_compare_z (void)
7549 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7553 do_vfp_dp_sp_cvt (void)
7555 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7556 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7560 do_vfp_sp_dp_cvt (void)
7562 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7563 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7567 do_vfp_reg_from_sp (void)
7569 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7570 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7574 do_vfp_reg2_from_sp2 (void)
7576 constraint (inst
.operands
[2].imm
!= 2,
7577 _("only two consecutive VFP SP registers allowed here"));
7578 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7579 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7580 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7584 do_vfp_sp_from_reg (void)
7586 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
7587 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7591 do_vfp_sp2_from_reg2 (void)
7593 constraint (inst
.operands
[0].imm
!= 2,
7594 _("only two consecutive VFP SP registers allowed here"));
7595 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
7596 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7597 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7601 do_vfp_sp_ldst (void)
7603 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7604 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7608 do_vfp_dp_ldst (void)
7610 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7611 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7616 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
7618 if (inst
.operands
[0].writeback
)
7619 inst
.instruction
|= WRITE_BACK
;
7621 constraint (ldstm_type
!= VFP_LDSTMIA
,
7622 _("this addressing mode requires base-register writeback"));
7623 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7624 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
7625 inst
.instruction
|= inst
.operands
[1].imm
;
7629 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
7633 if (inst
.operands
[0].writeback
)
7634 inst
.instruction
|= WRITE_BACK
;
7636 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
7637 _("this addressing mode requires base-register writeback"));
7639 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7640 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7642 count
= inst
.operands
[1].imm
<< 1;
7643 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
7646 inst
.instruction
|= count
;
7650 do_vfp_sp_ldstmia (void)
7652 vfp_sp_ldstm (VFP_LDSTMIA
);
7656 do_vfp_sp_ldstmdb (void)
7658 vfp_sp_ldstm (VFP_LDSTMDB
);
7662 do_vfp_dp_ldstmia (void)
7664 vfp_dp_ldstm (VFP_LDSTMIA
);
7668 do_vfp_dp_ldstmdb (void)
7670 vfp_dp_ldstm (VFP_LDSTMDB
);
7674 do_vfp_xp_ldstmia (void)
7676 vfp_dp_ldstm (VFP_LDSTMIAX
);
7680 do_vfp_xp_ldstmdb (void)
7682 vfp_dp_ldstm (VFP_LDSTMDBX
);
7686 do_vfp_dp_rd_rm (void)
7688 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7689 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7693 do_vfp_dp_rn_rd (void)
7695 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
7696 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7700 do_vfp_dp_rd_rn (void)
7702 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7703 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7707 do_vfp_dp_rd_rn_rm (void)
7709 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7710 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7711 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7717 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7721 do_vfp_dp_rm_rd_rn (void)
7723 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7724 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7725 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7728 /* VFPv3 instructions. */
7730 do_vfp_sp_const (void)
7732 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7733 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
7734 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
7738 do_vfp_dp_const (void)
7740 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7741 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
7742 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
7746 vfp_conv (int srcsize
)
7748 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7749 inst
.instruction
|= (immbits
& 1) << 5;
7750 inst
.instruction
|= (immbits
>> 1);
7754 do_vfp_sp_conv_16 (void)
7756 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7761 do_vfp_dp_conv_16 (void)
7763 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7768 do_vfp_sp_conv_32 (void)
7770 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7775 do_vfp_dp_conv_32 (void)
7777 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7782 /* FPA instructions. Also in a logical order. */
7787 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7788 inst
.instruction
|= inst
.operands
[1].reg
;
7792 do_fpa_ldmstm (void)
7794 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7795 switch (inst
.operands
[1].imm
)
7797 case 1: inst
.instruction
|= CP_T_X
; break;
7798 case 2: inst
.instruction
|= CP_T_Y
; break;
7799 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7804 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7806 /* The instruction specified "ea" or "fd", so we can only accept
7807 [Rn]{!}. The instruction does not really support stacking or
7808 unstacking, so we have to emulate these by setting appropriate
7809 bits and offsets. */
7810 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7811 || inst
.reloc
.exp
.X_add_number
!= 0,
7812 _("this instruction does not support indexing"));
7814 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7815 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7817 if (!(inst
.instruction
& INDEX_UP
))
7818 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7820 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7822 inst
.operands
[2].preind
= 0;
7823 inst
.operands
[2].postind
= 1;
7827 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7831 /* iWMMXt instructions: strictly in alphabetical order. */
7834 do_iwmmxt_tandorc (void)
7836 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7840 do_iwmmxt_textrc (void)
7842 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7843 inst
.instruction
|= inst
.operands
[1].imm
;
7847 do_iwmmxt_textrm (void)
7849 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7850 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7851 inst
.instruction
|= inst
.operands
[2].imm
;
7855 do_iwmmxt_tinsr (void)
7857 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7858 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7859 inst
.instruction
|= inst
.operands
[2].imm
;
7863 do_iwmmxt_tmia (void)
7865 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
7866 inst
.instruction
|= inst
.operands
[1].reg
;
7867 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7871 do_iwmmxt_waligni (void)
7873 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7874 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7875 inst
.instruction
|= inst
.operands
[2].reg
;
7876 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
7880 do_iwmmxt_wmerge (void)
7882 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7883 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7884 inst
.instruction
|= inst
.operands
[2].reg
;
7885 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
7889 do_iwmmxt_wmov (void)
7891 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7892 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7893 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7894 inst
.instruction
|= inst
.operands
[1].reg
;
7898 do_iwmmxt_wldstbh (void)
7901 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7903 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
7905 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
7906 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
7910 do_iwmmxt_wldstw (void)
7912 /* RIWR_RIWC clears .isreg for a control register. */
7913 if (!inst
.operands
[0].isreg
)
7915 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7916 inst
.instruction
|= 0xf0000000;
7919 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7920 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7924 do_iwmmxt_wldstd (void)
7926 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7927 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
7928 && inst
.operands
[1].immisreg
)
7930 inst
.instruction
&= ~0x1a000ff;
7931 inst
.instruction
|= (0xf << 28);
7932 if (inst
.operands
[1].preind
)
7933 inst
.instruction
|= PRE_INDEX
;
7934 if (!inst
.operands
[1].negative
)
7935 inst
.instruction
|= INDEX_UP
;
7936 if (inst
.operands
[1].writeback
)
7937 inst
.instruction
|= WRITE_BACK
;
7938 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7939 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
7940 inst
.instruction
|= inst
.operands
[1].imm
;
7943 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
7947 do_iwmmxt_wshufh (void)
7949 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7950 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7951 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
7952 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
7956 do_iwmmxt_wzero (void)
7958 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7959 inst
.instruction
|= inst
.operands
[0].reg
;
7960 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7961 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7965 do_iwmmxt_wrwrwr_or_imm5 (void)
7967 if (inst
.operands
[2].isreg
)
7970 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
7971 _("immediate operand requires iWMMXt2"));
7973 if (inst
.operands
[2].imm
== 0)
7975 switch ((inst
.instruction
>> 20) & 0xf)
7981 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
7982 inst
.operands
[2].imm
= 16;
7983 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
7989 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
7990 inst
.operands
[2].imm
= 32;
7991 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
7998 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8000 wrn
= (inst
.instruction
>> 16) & 0xf;
8001 inst
.instruction
&= 0xff0fff0f;
8002 inst
.instruction
|= wrn
;
8003 /* Bail out here; the instruction is now assembled. */
8008 /* Map 32 -> 0, etc. */
8009 inst
.operands
[2].imm
&= 0x1f;
8010 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8014 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8015 operations first, then control, shift, and load/store. */
8017 /* Insns like "foo X,Y,Z". */
8020 do_mav_triple (void)
8022 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8023 inst
.instruction
|= inst
.operands
[1].reg
;
8024 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8027 /* Insns like "foo W,X,Y,Z".
8028 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8033 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8034 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8035 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8036 inst
.instruction
|= inst
.operands
[3].reg
;
8039 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8043 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8046 /* Maverick shift immediate instructions.
8047 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8048 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8053 int imm
= inst
.operands
[2].imm
;
8055 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8056 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8058 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8059 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8060 Bit 4 should be 0. */
8061 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8063 inst
.instruction
|= imm
;
8066 /* XScale instructions. Also sorted arithmetic before move. */
8068 /* Xscale multiply-accumulate (argument parse)
8071 MIAxycc acc0,Rm,Rs. */
8076 inst
.instruction
|= inst
.operands
[1].reg
;
8077 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8080 /* Xscale move-accumulator-register (argument parse)
8082 MARcc acc0,RdLo,RdHi. */
8087 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8088 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8091 /* Xscale move-register-accumulator (argument parse)
8093 MRAcc RdLo,RdHi,acc0. */
8098 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8099 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8100 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8103 /* Encoding functions relevant only to Thumb. */
8105 /* inst.operands[i] is a shifted-register operand; encode
8106 it into inst.instruction in the format used by Thumb32. */
8109 encode_thumb32_shifted_operand (int i
)
8111 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8112 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8114 constraint (inst
.operands
[i
].immisreg
,
8115 _("shift by register not allowed in thumb mode"));
8116 inst
.instruction
|= inst
.operands
[i
].reg
;
8117 if (shift
== SHIFT_RRX
)
8118 inst
.instruction
|= SHIFT_ROR
<< 4;
8121 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8122 _("expression too complex"));
8124 constraint (value
> 32
8125 || (value
== 32 && (shift
== SHIFT_LSL
8126 || shift
== SHIFT_ROR
)),
8127 _("shift expression is too large"));
8131 else if (value
== 32)
8134 inst
.instruction
|= shift
<< 4;
8135 inst
.instruction
|= (value
& 0x1c) << 10;
8136 inst
.instruction
|= (value
& 0x03) << 6;
8141 /* inst.operands[i] was set up by parse_address. Encode it into a
8142 Thumb32 format load or store instruction. Reject forms that cannot
8143 be used with such instructions. If is_t is true, reject forms that
8144 cannot be used with a T instruction; if is_d is true, reject forms
8145 that cannot be used with a D instruction. */
8148 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8150 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8152 constraint (!inst
.operands
[i
].isreg
,
8153 _("Instruction does not support =N addresses"));
8155 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8156 if (inst
.operands
[i
].immisreg
)
8158 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
8159 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8160 constraint (inst
.operands
[i
].negative
,
8161 _("Thumb does not support negative register indexing"));
8162 constraint (inst
.operands
[i
].postind
,
8163 _("Thumb does not support register post-indexing"));
8164 constraint (inst
.operands
[i
].writeback
,
8165 _("Thumb does not support register indexing with writeback"));
8166 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8167 _("Thumb supports only LSL in shifted register indexing"));
8169 inst
.instruction
|= inst
.operands
[i
].imm
;
8170 if (inst
.operands
[i
].shifted
)
8172 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8173 _("expression too complex"));
8174 constraint (inst
.reloc
.exp
.X_add_number
< 0
8175 || inst
.reloc
.exp
.X_add_number
> 3,
8176 _("shift out of range"));
8177 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8179 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8181 else if (inst
.operands
[i
].preind
)
8183 constraint (is_pc
&& inst
.operands
[i
].writeback
,
8184 _("cannot use writeback with PC-relative addressing"));
8185 constraint (is_t
&& inst
.operands
[i
].writeback
,
8186 _("cannot use writeback with this instruction"));
8190 inst
.instruction
|= 0x01000000;
8191 if (inst
.operands
[i
].writeback
)
8192 inst
.instruction
|= 0x00200000;
8196 inst
.instruction
|= 0x00000c00;
8197 if (inst
.operands
[i
].writeback
)
8198 inst
.instruction
|= 0x00000100;
8200 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8202 else if (inst
.operands
[i
].postind
)
8204 assert (inst
.operands
[i
].writeback
);
8205 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8206 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8209 inst
.instruction
|= 0x00200000;
8211 inst
.instruction
|= 0x00000900;
8212 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8214 else /* unindexed - only for coprocessor */
8215 inst
.error
= _("instruction does not accept unindexed addressing");
8218 /* Table of Thumb instructions which exist in both 16- and 32-bit
8219 encodings (the latter only in post-V6T2 cores). The index is the
8220 value used in the insns table below. When there is more than one
8221 possible 16-bit encoding for the instruction, this table always
8223 Also contains several pseudo-instructions used during relaxation. */
8224 #define T16_32_TAB \
8225 X(adc, 4140, eb400000), \
8226 X(adcs, 4140, eb500000), \
8227 X(add, 1c00, eb000000), \
8228 X(adds, 1c00, eb100000), \
8229 X(addi, 0000, f1000000), \
8230 X(addis, 0000, f1100000), \
8231 X(add_pc,000f, f20f0000), \
8232 X(add_sp,000d, f10d0000), \
8233 X(adr, 000f, f20f0000), \
8234 X(and, 4000, ea000000), \
8235 X(ands, 4000, ea100000), \
8236 X(asr, 1000, fa40f000), \
8237 X(asrs, 1000, fa50f000), \
8238 X(b, e000, f000b000), \
8239 X(bcond, d000, f0008000), \
8240 X(bic, 4380, ea200000), \
8241 X(bics, 4380, ea300000), \
8242 X(cmn, 42c0, eb100f00), \
8243 X(cmp, 2800, ebb00f00), \
8244 X(cpsie, b660, f3af8400), \
8245 X(cpsid, b670, f3af8600), \
8246 X(cpy, 4600, ea4f0000), \
8247 X(dec_sp,80dd, f1bd0d00), \
8248 X(eor, 4040, ea800000), \
8249 X(eors, 4040, ea900000), \
8250 X(inc_sp,00dd, f10d0d00), \
8251 X(ldmia, c800, e8900000), \
8252 X(ldr, 6800, f8500000), \
8253 X(ldrb, 7800, f8100000), \
8254 X(ldrh, 8800, f8300000), \
8255 X(ldrsb, 5600, f9100000), \
8256 X(ldrsh, 5e00, f9300000), \
8257 X(ldr_pc,4800, f85f0000), \
8258 X(ldr_pc2,4800, f85f0000), \
8259 X(ldr_sp,9800, f85d0000), \
8260 X(lsl, 0000, fa00f000), \
8261 X(lsls, 0000, fa10f000), \
8262 X(lsr, 0800, fa20f000), \
8263 X(lsrs, 0800, fa30f000), \
8264 X(mov, 2000, ea4f0000), \
8265 X(movs, 2000, ea5f0000), \
8266 X(mul, 4340, fb00f000), \
8267 X(muls, 4340, ffffffff), /* no 32b muls */ \
8268 X(mvn, 43c0, ea6f0000), \
8269 X(mvns, 43c0, ea7f0000), \
8270 X(neg, 4240, f1c00000), /* rsb #0 */ \
8271 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8272 X(orr, 4300, ea400000), \
8273 X(orrs, 4300, ea500000), \
8274 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8275 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8276 X(rev, ba00, fa90f080), \
8277 X(rev16, ba40, fa90f090), \
8278 X(revsh, bac0, fa90f0b0), \
8279 X(ror, 41c0, fa60f000), \
8280 X(rors, 41c0, fa70f000), \
8281 X(sbc, 4180, eb600000), \
8282 X(sbcs, 4180, eb700000), \
8283 X(stmia, c000, e8800000), \
8284 X(str, 6000, f8400000), \
8285 X(strb, 7000, f8000000), \
8286 X(strh, 8000, f8200000), \
8287 X(str_sp,9000, f84d0000), \
8288 X(sub, 1e00, eba00000), \
8289 X(subs, 1e00, ebb00000), \
8290 X(subi, 8000, f1a00000), \
8291 X(subis, 8000, f1b00000), \
8292 X(sxtb, b240, fa4ff080), \
8293 X(sxth, b200, fa0ff080), \
8294 X(tst, 4200, ea100f00), \
8295 X(uxtb, b2c0, fa5ff080), \
8296 X(uxth, b280, fa1ff080), \
8297 X(nop, bf00, f3af8000), \
8298 X(yield, bf10, f3af8001), \
8299 X(wfe, bf20, f3af8002), \
8300 X(wfi, bf30, f3af8003), \
8301 X(sev, bf40, f3af9004), /* typo, 8004? */
8303 /* To catch errors in encoding functions, the codes are all offset by
8304 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8305 as 16-bit instructions. */
8306 #define X(a,b,c) T_MNEM_##a
8307 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
8310 #define X(a,b,c) 0x##b
8311 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
8312 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8315 #define X(a,b,c) 0x##c
8316 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
8317 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8318 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8322 /* Thumb instruction encoders, in alphabetical order. */
8326 do_t_add_sub_w (void)
8330 Rd
= inst
.operands
[0].reg
;
8331 Rn
= inst
.operands
[1].reg
;
8333 constraint (Rd
== 15, _("PC not allowed as destination"));
8334 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
8335 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8338 /* Parse an add or subtract instruction. We get here with inst.instruction
8339 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8346 Rd
= inst
.operands
[0].reg
;
8347 Rs
= (inst
.operands
[1].present
8348 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8349 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8357 flags
= (inst
.instruction
== T_MNEM_adds
8358 || inst
.instruction
== T_MNEM_subs
);
8360 narrow
= (current_it_mask
== 0);
8362 narrow
= (current_it_mask
!= 0);
8363 if (!inst
.operands
[2].isreg
)
8367 add
= (inst
.instruction
== T_MNEM_add
8368 || inst
.instruction
== T_MNEM_adds
);
8370 if (inst
.size_req
!= 4)
8372 /* Attempt to use a narrow opcode, with relaxation if
8374 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
8375 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
8376 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
8377 opcode
= T_MNEM_add_sp
;
8378 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
8379 opcode
= T_MNEM_add_pc
;
8380 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
8383 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
8385 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
8389 inst
.instruction
= THUMB_OP16(opcode
);
8390 inst
.instruction
|= (Rd
<< 4) | Rs
;
8391 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8392 if (inst
.size_req
!= 2)
8393 inst
.relax
= opcode
;
8396 constraint (inst
.size_req
== 2, BAD_HIREG
);
8398 if (inst
.size_req
== 4
8399 || (inst
.size_req
!= 2 && !opcode
))
8403 /* Always use addw/subw. */
8404 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
8405 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8409 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8410 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
8413 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8415 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
8417 inst
.instruction
|= Rd
<< 8;
8418 inst
.instruction
|= Rs
<< 16;
8423 Rn
= inst
.operands
[2].reg
;
8424 /* See if we can do this with a 16-bit instruction. */
8425 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
8427 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8432 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
8433 || inst
.instruction
== T_MNEM_add
)
8436 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8440 if (inst
.instruction
== T_MNEM_add
)
8444 inst
.instruction
= T_OPCODE_ADD_HI
;
8445 inst
.instruction
|= (Rd
& 8) << 4;
8446 inst
.instruction
|= (Rd
& 7);
8447 inst
.instruction
|= Rn
<< 3;
8450 /* ... because addition is commutative! */
8453 inst
.instruction
= T_OPCODE_ADD_HI
;
8454 inst
.instruction
|= (Rd
& 8) << 4;
8455 inst
.instruction
|= (Rd
& 7);
8456 inst
.instruction
|= Rs
<< 3;
8461 /* If we get here, it can't be done in 16 bits. */
8462 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
8463 _("shift must be constant"));
8464 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8465 inst
.instruction
|= Rd
<< 8;
8466 inst
.instruction
|= Rs
<< 16;
8467 encode_thumb32_shifted_operand (2);
8472 constraint (inst
.instruction
== T_MNEM_adds
8473 || inst
.instruction
== T_MNEM_subs
,
8476 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
8478 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
8479 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
8482 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8484 inst
.instruction
|= (Rd
<< 4) | Rs
;
8485 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8489 Rn
= inst
.operands
[2].reg
;
8490 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
8492 /* We now have Rd, Rs, and Rn set to registers. */
8493 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8495 /* Can't do this for SUB. */
8496 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
8497 inst
.instruction
= T_OPCODE_ADD_HI
;
8498 inst
.instruction
|= (Rd
& 8) << 4;
8499 inst
.instruction
|= (Rd
& 7);
8501 inst
.instruction
|= Rn
<< 3;
8503 inst
.instruction
|= Rs
<< 3;
8505 constraint (1, _("dest must overlap one source register"));
8509 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8510 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
8511 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8519 if (unified_syntax
&& inst
.size_req
== 0 && inst
.operands
[0].reg
<= 7)
8521 /* Defer to section relaxation. */
8522 inst
.relax
= inst
.instruction
;
8523 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8524 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8526 else if (unified_syntax
&& inst
.size_req
!= 2)
8528 /* Generate a 32-bit opcode. */
8529 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8530 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8531 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
8532 inst
.reloc
.pc_rel
= 1;
8536 /* Generate a 16-bit opcode. */
8537 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8538 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8539 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
8540 inst
.reloc
.pc_rel
= 1;
8542 inst
.instruction
|= inst
.operands
[0].reg
<< 4;
8546 /* Arithmetic instructions for which there is just one 16-bit
8547 instruction encoding, and it allows only two low registers.
8548 For maximal compatibility with ARM syntax, we allow three register
8549 operands even when Thumb-32 instructions are not available, as long
8550 as the first two are identical. For instance, both "sbc r0,r1" and
8551 "sbc r0,r0,r1" are allowed. */
8557 Rd
= inst
.operands
[0].reg
;
8558 Rs
= (inst
.operands
[1].present
8559 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8560 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8561 Rn
= inst
.operands
[2].reg
;
8565 if (!inst
.operands
[2].isreg
)
8567 /* For an immediate, we always generate a 32-bit opcode;
8568 section relaxation will shrink it later if possible. */
8569 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8570 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8571 inst
.instruction
|= Rd
<< 8;
8572 inst
.instruction
|= Rs
<< 16;
8573 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8579 /* See if we can do this with a 16-bit instruction. */
8580 if (THUMB_SETS_FLAGS (inst
.instruction
))
8581 narrow
= current_it_mask
== 0;
8583 narrow
= current_it_mask
!= 0;
8585 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8587 if (inst
.operands
[2].shifted
)
8589 if (inst
.size_req
== 4)
8595 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8596 inst
.instruction
|= Rd
;
8597 inst
.instruction
|= Rn
<< 3;
8601 /* If we get here, it can't be done in 16 bits. */
8602 constraint (inst
.operands
[2].shifted
8603 && inst
.operands
[2].immisreg
,
8604 _("shift must be constant"));
8605 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8606 inst
.instruction
|= Rd
<< 8;
8607 inst
.instruction
|= Rs
<< 16;
8608 encode_thumb32_shifted_operand (2);
8613 /* On its face this is a lie - the instruction does set the
8614 flags. However, the only supported mnemonic in this mode
8616 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8618 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8619 _("unshifted register required"));
8620 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8621 constraint (Rd
!= Rs
,
8622 _("dest and source1 must be the same register"));
8624 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8625 inst
.instruction
|= Rd
;
8626 inst
.instruction
|= Rn
<< 3;
8630 /* Similarly, but for instructions where the arithmetic operation is
8631 commutative, so we can allow either of them to be different from
8632 the destination operand in a 16-bit instruction. For instance, all
8633 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8640 Rd
= inst
.operands
[0].reg
;
8641 Rs
= (inst
.operands
[1].present
8642 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8643 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8644 Rn
= inst
.operands
[2].reg
;
8648 if (!inst
.operands
[2].isreg
)
8650 /* For an immediate, we always generate a 32-bit opcode;
8651 section relaxation will shrink it later if possible. */
8652 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8653 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8654 inst
.instruction
|= Rd
<< 8;
8655 inst
.instruction
|= Rs
<< 16;
8656 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8662 /* See if we can do this with a 16-bit instruction. */
8663 if (THUMB_SETS_FLAGS (inst
.instruction
))
8664 narrow
= current_it_mask
== 0;
8666 narrow
= current_it_mask
!= 0;
8668 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8670 if (inst
.operands
[2].shifted
)
8672 if (inst
.size_req
== 4)
8679 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8680 inst
.instruction
|= Rd
;
8681 inst
.instruction
|= Rn
<< 3;
8686 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8687 inst
.instruction
|= Rd
;
8688 inst
.instruction
|= Rs
<< 3;
8693 /* If we get here, it can't be done in 16 bits. */
8694 constraint (inst
.operands
[2].shifted
8695 && inst
.operands
[2].immisreg
,
8696 _("shift must be constant"));
8697 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8698 inst
.instruction
|= Rd
<< 8;
8699 inst
.instruction
|= Rs
<< 16;
8700 encode_thumb32_shifted_operand (2);
8705 /* On its face this is a lie - the instruction does set the
8706 flags. However, the only supported mnemonic in this mode
8708 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8710 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8711 _("unshifted register required"));
8712 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8714 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8715 inst
.instruction
|= Rd
;
8718 inst
.instruction
|= Rn
<< 3;
8720 inst
.instruction
|= Rs
<< 3;
8722 constraint (1, _("dest must overlap one source register"));
8729 if (inst
.operands
[0].present
)
8731 constraint ((inst
.instruction
& 0xf0) != 0x40
8732 && inst
.operands
[0].imm
!= 0xf,
8733 "bad barrier type");
8734 inst
.instruction
|= inst
.operands
[0].imm
;
8737 inst
.instruction
|= 0xf;
8743 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8744 constraint (msb
> 32, _("bit-field extends past end of register"));
8745 /* The instruction encoding stores the LSB and MSB,
8746 not the LSB and width. */
8747 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8748 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
8749 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
8750 inst
.instruction
|= msb
- 1;
8758 /* #0 in second position is alternative syntax for bfc, which is
8759 the same instruction but with REG_PC in the Rm field. */
8760 if (!inst
.operands
[1].isreg
)
8761 inst
.operands
[1].reg
= REG_PC
;
8763 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8764 constraint (msb
> 32, _("bit-field extends past end of register"));
8765 /* The instruction encoding stores the LSB and MSB,
8766 not the LSB and width. */
8767 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8768 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8769 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8770 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8771 inst
.instruction
|= msb
- 1;
8777 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8778 _("bit-field extends past end of register"));
8779 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8780 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8781 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8782 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8783 inst
.instruction
|= inst
.operands
[3].imm
- 1;
8786 /* ARM V5 Thumb BLX (argument parse)
8787 BLX <target_addr> which is BLX(1)
8788 BLX <Rm> which is BLX(2)
8789 Unfortunately, there are two different opcodes for this mnemonic.
8790 So, the insns[].value is not used, and the code here zaps values
8791 into inst.instruction.
8793 ??? How to take advantage of the additional two bits of displacement
8794 available in Thumb32 mode? Need new relocation? */
8799 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8800 if (inst
.operands
[0].isreg
)
8801 /* We have a register, so this is BLX(2). */
8802 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8805 /* No register. This must be BLX(1). */
8806 inst
.instruction
= 0xf000e800;
8808 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8809 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8812 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
8813 inst
.reloc
.pc_rel
= 1;
8823 if (current_it_mask
)
8825 /* Conditional branches inside IT blocks are encoded as unconditional
8828 /* A branch must be the last instruction in an IT block. */
8829 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
8834 if (cond
!= COND_ALWAYS
)
8835 opcode
= T_MNEM_bcond
;
8837 opcode
= inst
.instruction
;
8839 if (unified_syntax
&& inst
.size_req
== 4)
8841 inst
.instruction
= THUMB_OP32(opcode
);
8842 if (cond
== COND_ALWAYS
)
8843 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
8846 assert (cond
!= 0xF);
8847 inst
.instruction
|= cond
<< 22;
8848 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
8853 inst
.instruction
= THUMB_OP16(opcode
);
8854 if (cond
== COND_ALWAYS
)
8855 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
8858 inst
.instruction
|= cond
<< 8;
8859 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
8861 /* Allow section relaxation. */
8862 if (unified_syntax
&& inst
.size_req
!= 2)
8863 inst
.relax
= opcode
;
8866 inst
.reloc
.pc_rel
= 1;
8872 constraint (inst
.cond
!= COND_ALWAYS
,
8873 _("instruction is always unconditional"));
8874 if (inst
.operands
[0].present
)
8876 constraint (inst
.operands
[0].imm
> 255,
8877 _("immediate value out of range"));
8878 inst
.instruction
|= inst
.operands
[0].imm
;
8883 do_t_branch23 (void)
8885 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8886 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
8887 inst
.reloc
.pc_rel
= 1;
8889 /* If the destination of the branch is a defined symbol which does not have
8890 the THUMB_FUNC attribute, then we must be calling a function which has
8891 the (interfacearm) attribute. We look for the Thumb entry point to that
8892 function and change the branch to refer to that function instead. */
8893 if ( inst
.reloc
.exp
.X_op
== O_symbol
8894 && inst
.reloc
.exp
.X_add_symbol
!= NULL
8895 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
8896 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
8897 inst
.reloc
.exp
.X_add_symbol
=
8898 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
8904 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8905 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
8906 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8907 should cause the alignment to be checked once it is known. This is
8908 because BX PC only works if the instruction is word aligned. */
8914 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8915 if (inst
.operands
[0].reg
== REG_PC
)
8916 as_tsktsk (_("use of r15 in bxj is not really useful"));
8918 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8924 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8925 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8926 inst
.instruction
|= inst
.operands
[1].reg
;
8932 constraint (current_it_mask
, BAD_NOT_IT
);
8933 inst
.instruction
|= inst
.operands
[0].imm
;
8939 constraint (current_it_mask
, BAD_NOT_IT
);
8941 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
8942 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
8944 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
8945 inst
.instruction
= 0xf3af8000;
8946 inst
.instruction
|= imod
<< 9;
8947 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
8948 if (inst
.operands
[1].present
)
8949 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
8953 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
8954 && (inst
.operands
[0].imm
& 4),
8955 _("selected processor does not support 'A' form "
8956 "of this instruction"));
8957 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
8958 _("Thumb does not support the 2-argument "
8959 "form of this instruction"));
8960 inst
.instruction
|= inst
.operands
[0].imm
;
8964 /* THUMB CPY instruction (argument parse). */
8969 if (inst
.size_req
== 4)
8971 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
8972 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8973 inst
.instruction
|= inst
.operands
[1].reg
;
8977 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
8978 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
8979 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
8986 constraint (current_it_mask
, BAD_NOT_IT
);
8987 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
8988 inst
.instruction
|= inst
.operands
[0].reg
;
8989 inst
.reloc
.pc_rel
= 1;
8990 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
8996 inst
.instruction
|= inst
.operands
[0].imm
;
9002 if (!inst
.operands
[1].present
)
9003 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9004 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9005 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9006 inst
.instruction
|= inst
.operands
[2].reg
;
9012 if (unified_syntax
&& inst
.size_req
== 4)
9013 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9015 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9021 unsigned int cond
= inst
.operands
[0].imm
;
9023 constraint (current_it_mask
, BAD_NOT_IT
);
9024 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
9027 /* If the condition is a negative condition, invert the mask. */
9028 if ((cond
& 0x1) == 0x0)
9030 unsigned int mask
= inst
.instruction
& 0x000f;
9032 if ((mask
& 0x7) == 0)
9033 /* no conversion needed */;
9034 else if ((mask
& 0x3) == 0)
9036 else if ((mask
& 0x1) == 0)
9041 inst
.instruction
&= 0xfff0;
9042 inst
.instruction
|= mask
;
9045 inst
.instruction
|= cond
<< 4;
9051 /* This really doesn't seem worth it. */
9052 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9053 _("expression too complex"));
9054 constraint (inst
.operands
[1].writeback
,
9055 _("Thumb load/store multiple does not support {reglist}^"));
9059 /* See if we can use a 16-bit instruction. */
9060 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9061 && inst
.size_req
!= 4
9062 && inst
.operands
[0].reg
<= 7
9063 && !(inst
.operands
[1].imm
& ~0xff)
9064 && (inst
.instruction
== T_MNEM_stmia
9065 ? inst
.operands
[0].writeback
9066 : (inst
.operands
[0].writeback
9067 == !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))))
9069 if (inst
.instruction
== T_MNEM_stmia
9070 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9071 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9072 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9073 inst
.operands
[0].reg
);
9075 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9076 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9077 inst
.instruction
|= inst
.operands
[1].imm
;
9081 if (inst
.operands
[1].imm
& (1 << 13))
9082 as_warn (_("SP should not be in register list"));
9083 if (inst
.instruction
== T_MNEM_stmia
)
9085 if (inst
.operands
[1].imm
& (1 << 15))
9086 as_warn (_("PC should not be in register list"));
9087 if (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9088 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9089 inst
.operands
[0].reg
);
9093 if (inst
.operands
[1].imm
& (1 << 14)
9094 && inst
.operands
[1].imm
& (1 << 15))
9095 as_warn (_("LR and PC should not both be in register list"));
9096 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9097 && inst
.operands
[0].writeback
)
9098 as_warn (_("base register should not be in register list "
9099 "when written back"));
9101 if (inst
.instruction
< 0xffff)
9102 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9103 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9104 inst
.instruction
|= inst
.operands
[1].imm
;
9105 if (inst
.operands
[0].writeback
)
9106 inst
.instruction
|= WRITE_BACK
;
9111 constraint (inst
.operands
[0].reg
> 7
9112 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9113 if (inst
.instruction
== T_MNEM_stmia
)
9115 if (!inst
.operands
[0].writeback
)
9116 as_warn (_("this instruction will write back the base register"));
9117 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9118 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9119 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9120 inst
.operands
[0].reg
);
9124 if (!inst
.operands
[0].writeback
9125 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9126 as_warn (_("this instruction will write back the base register"));
9127 else if (inst
.operands
[0].writeback
9128 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9129 as_warn (_("this instruction will not write back the base register"));
9132 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9133 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9134 inst
.instruction
|= inst
.operands
[1].imm
;
9141 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9142 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9143 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9144 || inst
.operands
[1].negative
,
9147 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9148 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9149 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9155 if (!inst
.operands
[1].present
)
9157 constraint (inst
.operands
[0].reg
== REG_LR
,
9158 _("r14 not allowed as first register "
9159 "when second register is omitted"));
9160 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9162 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
9165 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9166 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9167 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9173 unsigned long opcode
;
9176 opcode
= inst
.instruction
;
9179 if (!inst
.operands
[1].isreg
)
9181 if (opcode
<= 0xffff)
9182 inst
.instruction
= THUMB_OP32 (opcode
);
9183 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9186 if (inst
.operands
[1].isreg
9187 && !inst
.operands
[1].writeback
9188 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
9189 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
9191 && inst
.size_req
!= 4)
9193 /* Insn may have a 16-bit form. */
9194 Rn
= inst
.operands
[1].reg
;
9195 if (inst
.operands
[1].immisreg
)
9197 inst
.instruction
= THUMB_OP16 (opcode
);
9199 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
9202 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
9203 && opcode
!= T_MNEM_ldrsb
)
9204 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
9205 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
9212 if (inst
.reloc
.pc_rel
)
9213 opcode
= T_MNEM_ldr_pc2
;
9215 opcode
= T_MNEM_ldr_pc
;
9219 if (opcode
== T_MNEM_ldr
)
9220 opcode
= T_MNEM_ldr_sp
;
9222 opcode
= T_MNEM_str_sp
;
9224 inst
.instruction
= inst
.operands
[0].reg
<< 8;
9228 inst
.instruction
= inst
.operands
[0].reg
;
9229 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9231 inst
.instruction
|= THUMB_OP16 (opcode
);
9232 if (inst
.size_req
== 2)
9233 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9235 inst
.relax
= opcode
;
9239 /* Definitely a 32-bit variant. */
9240 inst
.instruction
= THUMB_OP32 (opcode
);
9241 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9242 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9246 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9248 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
9250 /* Only [Rn,Rm] is acceptable. */
9251 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
9252 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
9253 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
9254 || inst
.operands
[1].negative
,
9255 _("Thumb does not support this addressing mode"));
9256 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9260 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9261 if (!inst
.operands
[1].isreg
)
9262 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9265 constraint (!inst
.operands
[1].preind
9266 || inst
.operands
[1].shifted
9267 || inst
.operands
[1].writeback
,
9268 _("Thumb does not support this addressing mode"));
9269 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
9271 constraint (inst
.instruction
& 0x0600,
9272 _("byte or halfword not valid for base register"));
9273 constraint (inst
.operands
[1].reg
== REG_PC
9274 && !(inst
.instruction
& THUMB_LOAD_BIT
),
9275 _("r15 based store not allowed"));
9276 constraint (inst
.operands
[1].immisreg
,
9277 _("invalid base register for register offset"));
9279 if (inst
.operands
[1].reg
== REG_PC
)
9280 inst
.instruction
= T_OPCODE_LDR_PC
;
9281 else if (inst
.instruction
& THUMB_LOAD_BIT
)
9282 inst
.instruction
= T_OPCODE_LDR_SP
;
9284 inst
.instruction
= T_OPCODE_STR_SP
;
9286 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9287 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9291 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
9292 if (!inst
.operands
[1].immisreg
)
9294 /* Immediate offset. */
9295 inst
.instruction
|= inst
.operands
[0].reg
;
9296 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9297 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9301 /* Register offset. */
9302 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
9303 constraint (inst
.operands
[1].negative
,
9304 _("Thumb does not support this addressing mode"));
9307 switch (inst
.instruction
)
9309 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
9310 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
9311 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
9312 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
9313 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
9314 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
9315 case 0x5600 /* ldrsb */:
9316 case 0x5e00 /* ldrsh */: break;
9320 inst
.instruction
|= inst
.operands
[0].reg
;
9321 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9322 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
9328 if (!inst
.operands
[1].present
)
9330 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9331 constraint (inst
.operands
[0].reg
== REG_LR
,
9332 _("r14 not allowed here"));
9334 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9335 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9336 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
9343 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9344 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
9350 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9351 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9352 inst
.instruction
|= inst
.operands
[2].reg
;
9353 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9359 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9360 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9361 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9362 inst
.instruction
|= inst
.operands
[3].reg
;
9370 int r0off
= (inst
.instruction
== T_MNEM_mov
9371 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
9372 unsigned long opcode
;
9374 bfd_boolean low_regs
;
9376 low_regs
= (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7);
9377 opcode
= inst
.instruction
;
9378 if (current_it_mask
)
9379 narrow
= opcode
!= T_MNEM_movs
;
9381 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
9382 if (inst
.size_req
== 4
9383 || inst
.operands
[1].shifted
)
9386 if (!inst
.operands
[1].isreg
)
9388 /* Immediate operand. */
9389 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
9391 if (low_regs
&& narrow
)
9393 inst
.instruction
= THUMB_OP16 (opcode
);
9394 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9395 if (inst
.size_req
== 2)
9396 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9398 inst
.relax
= opcode
;
9402 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9403 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9404 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9405 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9410 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9411 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9412 encode_thumb32_shifted_operand (1);
9415 switch (inst
.instruction
)
9418 inst
.instruction
= T_OPCODE_MOV_HR
;
9419 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9420 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9421 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9425 /* We know we have low registers at this point.
9426 Generate ADD Rd, Rs, #0. */
9427 inst
.instruction
= T_OPCODE_ADD_I3
;
9428 inst
.instruction
|= inst
.operands
[0].reg
;
9429 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9435 inst
.instruction
= T_OPCODE_CMP_LR
;
9436 inst
.instruction
|= inst
.operands
[0].reg
;
9437 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9441 inst
.instruction
= T_OPCODE_CMP_HR
;
9442 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9443 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9444 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9451 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9452 if (inst
.operands
[1].isreg
)
9454 if (inst
.operands
[0].reg
< 8 && inst
.operands
[1].reg
< 8)
9456 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9457 since a MOV instruction produces unpredictable results. */
9458 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9459 inst
.instruction
= T_OPCODE_ADD_I3
;
9461 inst
.instruction
= T_OPCODE_CMP_LR
;
9463 inst
.instruction
|= inst
.operands
[0].reg
;
9464 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9468 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9469 inst
.instruction
= T_OPCODE_MOV_HR
;
9471 inst
.instruction
= T_OPCODE_CMP_HR
;
9477 constraint (inst
.operands
[0].reg
> 7,
9478 _("only lo regs allowed with immediate"));
9479 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9480 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9490 top
= (inst
.instruction
& 0x00800000) != 0;
9491 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
9493 constraint (top
, _(":lower16: not allowed this instruction"));
9494 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
9496 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
9498 constraint (!top
, _(":upper16: not allowed this instruction"));
9499 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
9502 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9503 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9505 imm
= inst
.reloc
.exp
.X_add_number
;
9506 inst
.instruction
|= (imm
& 0xf000) << 4;
9507 inst
.instruction
|= (imm
& 0x0800) << 15;
9508 inst
.instruction
|= (imm
& 0x0700) << 4;
9509 inst
.instruction
|= (imm
& 0x00ff);
9518 int r0off
= (inst
.instruction
== T_MNEM_mvn
9519 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
9522 if (inst
.size_req
== 4
9523 || inst
.instruction
> 0xffff
9524 || inst
.operands
[1].shifted
9525 || inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9527 else if (inst
.instruction
== T_MNEM_cmn
)
9529 else if (THUMB_SETS_FLAGS (inst
.instruction
))
9530 narrow
= (current_it_mask
== 0);
9532 narrow
= (current_it_mask
!= 0);
9534 if (!inst
.operands
[1].isreg
)
9536 /* For an immediate, we always generate a 32-bit opcode;
9537 section relaxation will shrink it later if possible. */
9538 if (inst
.instruction
< 0xffff)
9539 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9540 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9541 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9542 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9546 /* See if we can do this with a 16-bit instruction. */
9549 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9550 inst
.instruction
|= inst
.operands
[0].reg
;
9551 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9555 constraint (inst
.operands
[1].shifted
9556 && inst
.operands
[1].immisreg
,
9557 _("shift must be constant"));
9558 if (inst
.instruction
< 0xffff)
9559 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9560 inst
.instruction
|= inst
.operands
[0].reg
<< r0off
;
9561 encode_thumb32_shifted_operand (1);
9567 constraint (inst
.instruction
> 0xffff
9568 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
9569 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
9570 _("unshifted register required"));
9571 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9574 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9575 inst
.instruction
|= inst
.operands
[0].reg
;
9576 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9585 if (do_vfp_nsyn_mrs () == SUCCESS
)
9588 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
9591 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
9592 _("selected processor does not support "
9593 "requested special purpose register"));
9597 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9598 _("selected processor does not support "
9599 "requested special purpose register %x"));
9600 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9601 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
9602 _("'CPSR' or 'SPSR' expected"));
9605 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9606 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9607 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
9615 if (do_vfp_nsyn_msr () == SUCCESS
)
9618 constraint (!inst
.operands
[1].isreg
,
9619 _("Thumb encoding does not support an immediate here"));
9620 flags
= inst
.operands
[0].imm
;
9623 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
9624 _("selected processor does not support "
9625 "requested special purpose register"));
9629 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7m
),
9630 _("selected processor does not support "
9631 "requested special purpose register"));
9634 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
9635 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
9636 inst
.instruction
|= (flags
& 0xff);
9637 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9643 if (!inst
.operands
[2].present
)
9644 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9646 /* There is no 32-bit MULS and no 16-bit MUL. */
9647 if (unified_syntax
&& inst
.instruction
== T_MNEM_mul
)
9649 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9650 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9651 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9652 inst
.instruction
|= inst
.operands
[2].reg
<< 0;
9656 constraint (!unified_syntax
9657 && inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
9658 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9661 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9662 inst
.instruction
|= inst
.operands
[0].reg
;
9664 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9665 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9666 else if (inst
.operands
[0].reg
== inst
.operands
[2].reg
)
9667 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9669 constraint (1, _("dest must overlap one source register"));
9676 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9677 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9678 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9679 inst
.instruction
|= inst
.operands
[3].reg
;
9681 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9682 as_tsktsk (_("rdhi and rdlo must be different"));
9690 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
9692 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9693 inst
.instruction
|= inst
.operands
[0].imm
;
9697 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9698 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
9703 constraint (inst
.operands
[0].present
,
9704 _("Thumb does not support NOP with hints"));
9705 inst
.instruction
= 0x46c0;
9716 if (THUMB_SETS_FLAGS (inst
.instruction
))
9717 narrow
= (current_it_mask
== 0);
9719 narrow
= (current_it_mask
!= 0);
9720 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9722 if (inst
.size_req
== 4)
9727 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9728 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9729 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9733 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9734 inst
.instruction
|= inst
.operands
[0].reg
;
9735 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9740 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
9742 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9744 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9745 inst
.instruction
|= inst
.operands
[0].reg
;
9746 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9753 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9754 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9755 inst
.instruction
|= inst
.operands
[2].reg
;
9756 if (inst
.operands
[3].present
)
9758 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
9759 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9760 _("expression too complex"));
9761 inst
.instruction
|= (val
& 0x1c) << 10;
9762 inst
.instruction
|= (val
& 0x03) << 6;
9769 if (!inst
.operands
[3].present
)
9770 inst
.instruction
&= ~0x00000020;
9777 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9781 do_t_push_pop (void)
9785 constraint (inst
.operands
[0].writeback
,
9786 _("push/pop do not support {reglist}^"));
9787 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9788 _("expression too complex"));
9790 mask
= inst
.operands
[0].imm
;
9791 if ((mask
& ~0xff) == 0)
9792 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9793 else if ((inst
.instruction
== T_MNEM_push
9794 && (mask
& ~0xff) == 1 << REG_LR
)
9795 || (inst
.instruction
== T_MNEM_pop
9796 && (mask
& ~0xff) == 1 << REG_PC
))
9798 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9799 inst
.instruction
|= THUMB_PP_PC_LR
;
9802 else if (unified_syntax
)
9804 if (mask
& (1 << 13))
9805 inst
.error
= _("SP not allowed in register list");
9806 if (inst
.instruction
== T_MNEM_push
)
9808 if (mask
& (1 << 15))
9809 inst
.error
= _("PC not allowed in register list");
9813 if (mask
& (1 << 14)
9814 && mask
& (1 << 15))
9815 inst
.error
= _("LR and PC should not both be in register list");
9817 if ((mask
& (mask
- 1)) == 0)
9819 /* Single register push/pop implemented as str/ldr. */
9820 if (inst
.instruction
== T_MNEM_push
)
9821 inst
.instruction
= 0xf84d0d04; /* str reg, [sp, #-4]! */
9823 inst
.instruction
= 0xf85d0b04; /* ldr reg, [sp], #4 */
9824 mask
= ffs(mask
) - 1;
9828 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9832 inst
.error
= _("invalid register list to push/pop instruction");
9836 inst
.instruction
|= mask
;
9842 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9843 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9849 if (inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
9850 && inst
.size_req
!= 4)
9852 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9853 inst
.instruction
|= inst
.operands
[0].reg
;
9854 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9856 else if (unified_syntax
)
9858 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9859 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9860 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9861 inst
.instruction
|= inst
.operands
[1].reg
;
9864 inst
.error
= BAD_HIREG
;
9872 Rd
= inst
.operands
[0].reg
;
9873 Rs
= (inst
.operands
[1].present
9874 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9875 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9877 inst
.instruction
|= Rd
<< 8;
9878 inst
.instruction
|= Rs
<< 16;
9879 if (!inst
.operands
[2].isreg
)
9881 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9882 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9885 encode_thumb32_shifted_operand (2);
9891 constraint (current_it_mask
, BAD_NOT_IT
);
9892 if (inst
.operands
[0].imm
)
9893 inst
.instruction
|= 0x8;
9899 if (!inst
.operands
[1].present
)
9900 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
9907 switch (inst
.instruction
)
9910 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
9912 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
9914 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
9916 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
9920 if (THUMB_SETS_FLAGS (inst
.instruction
))
9921 narrow
= (current_it_mask
== 0);
9923 narrow
= (current_it_mask
!= 0);
9924 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
9926 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
9928 if (inst
.operands
[2].isreg
9929 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
9930 || inst
.operands
[2].reg
> 7))
9932 if (inst
.size_req
== 4)
9937 if (inst
.operands
[2].isreg
)
9939 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9940 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9941 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9942 inst
.instruction
|= inst
.operands
[2].reg
;
9946 inst
.operands
[1].shifted
= 1;
9947 inst
.operands
[1].shift_kind
= shift_kind
;
9948 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
9949 ? T_MNEM_movs
: T_MNEM_mov
);
9950 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9951 encode_thumb32_shifted_operand (1);
9952 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9953 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9958 if (inst
.operands
[2].isreg
)
9962 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
9963 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
9964 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
9965 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
9969 inst
.instruction
|= inst
.operands
[0].reg
;
9970 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
9976 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9977 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9978 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9981 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9982 inst
.instruction
|= inst
.operands
[0].reg
;
9983 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9989 constraint (inst
.operands
[0].reg
> 7
9990 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
9991 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9993 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
9995 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
9996 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
9997 _("source1 and dest must be same register"));
9999 switch (inst
.instruction
)
10001 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10002 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10003 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10004 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10008 inst
.instruction
|= inst
.operands
[0].reg
;
10009 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10013 switch (inst
.instruction
)
10015 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10016 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10017 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10018 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
10021 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10022 inst
.instruction
|= inst
.operands
[0].reg
;
10023 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10031 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10032 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10033 inst
.instruction
|= inst
.operands
[2].reg
;
10039 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10040 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10041 _("expression too complex"));
10042 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10043 inst
.instruction
|= (value
& 0xf000) >> 12;
10044 inst
.instruction
|= (value
& 0x0ff0);
10045 inst
.instruction
|= (value
& 0x000f) << 16;
10051 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10052 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10053 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10055 if (inst
.operands
[3].present
)
10057 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10058 _("expression too complex"));
10060 if (inst
.reloc
.exp
.X_add_number
!= 0)
10062 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10063 inst
.instruction
|= 0x00200000; /* sh bit */
10064 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10065 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10067 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10074 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10075 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10076 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10082 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10083 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10084 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10085 || inst
.operands
[2].negative
,
10088 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10089 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10090 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10091 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
10097 if (!inst
.operands
[2].present
)
10098 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
10100 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10101 || inst
.operands
[0].reg
== inst
.operands
[2].reg
10102 || inst
.operands
[0].reg
== inst
.operands
[3].reg
10103 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
10106 inst
.instruction
|= inst
.operands
[0].reg
;
10107 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10108 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10109 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10115 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10116 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10117 inst
.instruction
|= inst
.operands
[2].reg
;
10118 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
10124 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
10125 && inst
.operands
[0].reg
<= 7 && inst
.operands
[1].reg
<= 7
10126 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
10128 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10129 inst
.instruction
|= inst
.operands
[0].reg
;
10130 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10132 else if (unified_syntax
)
10134 if (inst
.instruction
<= 0xffff)
10135 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10136 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10137 inst
.instruction
|= inst
.operands
[1].reg
;
10138 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
10142 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
10143 _("Thumb encoding does not support rotation"));
10144 constraint (1, BAD_HIREG
);
10151 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
10159 half
= (inst
.instruction
& 0x10) != 0;
10160 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
10161 constraint (inst
.operands
[0].immisreg
,
10162 _("instruction requires register index"));
10163 constraint (inst
.operands
[0].imm
== 15,
10164 _("PC is not a valid index register"));
10165 constraint (!half
&& inst
.operands
[0].shifted
,
10166 _("instruction does not allow shifted index"));
10167 inst
.instruction
|= (inst
.operands
[0].reg
<< 16) | inst
.operands
[0].imm
;
10173 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10174 inst
.instruction
|= inst
.operands
[1].imm
;
10175 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10177 if (inst
.operands
[3].present
)
10179 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10180 _("expression too complex"));
10181 if (inst
.reloc
.exp
.X_add_number
!= 0)
10183 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10184 inst
.instruction
|= 0x00200000; /* sh bit */
10186 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10187 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10189 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10196 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10197 inst
.instruction
|= inst
.operands
[1].imm
;
10198 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10201 /* Neon instruction encoder helpers. */
10203 /* Encodings for the different types for various Neon opcodes. */
10205 /* An "invalid" code for the following tables. */
10208 struct neon_tab_entry
10211 unsigned float_or_poly
;
10212 unsigned scalar_or_imm
;
10215 /* Map overloaded Neon opcodes to their respective encodings. */
10216 #define NEON_ENC_TAB \
10217 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10218 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10219 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10220 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10221 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10222 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10223 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10224 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10225 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10226 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10227 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10228 /* Register variants of the following two instructions are encoded as
10229 vcge / vcgt with the operands reversed. */ \
10230 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10231 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10232 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10233 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10234 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10235 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10236 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10237 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10238 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10239 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10240 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10241 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10242 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10243 X(vshl, 0x0000400, N_INV, 0x0800510), \
10244 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10245 X(vand, 0x0000110, N_INV, 0x0800030), \
10246 X(vbic, 0x0100110, N_INV, 0x0800030), \
10247 X(veor, 0x1000110, N_INV, N_INV), \
10248 X(vorn, 0x0300110, N_INV, 0x0800010), \
10249 X(vorr, 0x0200110, N_INV, 0x0800010), \
10250 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10251 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10252 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10253 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10254 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10255 X(vst1, 0x0000000, 0x0800000, N_INV), \
10256 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10257 X(vst2, 0x0000100, 0x0800100, N_INV), \
10258 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10259 X(vst3, 0x0000200, 0x0800200, N_INV), \
10260 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10261 X(vst4, 0x0000300, 0x0800300, N_INV), \
10262 X(vmovn, 0x1b20200, N_INV, N_INV), \
10263 X(vtrn, 0x1b20080, N_INV, N_INV), \
10264 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10265 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10266 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10267 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10268 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10269 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10270 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10271 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10272 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10276 #define X(OPC,I,F,S) N_MNEM_##OPC
10281 static const struct neon_tab_entry neon_enc_tab
[] =
10283 #define X(OPC,I,F,S) { (I), (F), (S) }
10288 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10289 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10290 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10291 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10292 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10293 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10294 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10295 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10296 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10297 #define NEON_ENC_SINGLE(X) \
10298 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10299 #define NEON_ENC_DOUBLE(X) \
10300 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10302 /* Define shapes for instruction operands. The following mnemonic characters
10303 are used in this table:
10305 F - VFP S<n> register
10306 D - Neon D<n> register
10307 Q - Neon Q<n> register
10311 L - D<n> register list
10313 This table is used to generate various data:
10314 - enumerations of the form NS_DDR to be used as arguments to
10316 - a table classifying shapes into single, double, quad, mixed.
10317 - a table used to drive neon_select_shape.
10320 #define NEON_SHAPE_DEF \
10321 X(3, (D, D, D), DOUBLE), \
10322 X(3, (Q, Q, Q), QUAD), \
10323 X(3, (D, D, I), DOUBLE), \
10324 X(3, (Q, Q, I), QUAD), \
10325 X(3, (D, D, S), DOUBLE), \
10326 X(3, (Q, Q, S), QUAD), \
10327 X(2, (D, D), DOUBLE), \
10328 X(2, (Q, Q), QUAD), \
10329 X(2, (D, S), DOUBLE), \
10330 X(2, (Q, S), QUAD), \
10331 X(2, (D, R), DOUBLE), \
10332 X(2, (Q, R), QUAD), \
10333 X(2, (D, I), DOUBLE), \
10334 X(2, (Q, I), QUAD), \
10335 X(3, (D, L, D), DOUBLE), \
10336 X(2, (D, Q), MIXED), \
10337 X(2, (Q, D), MIXED), \
10338 X(3, (D, Q, I), MIXED), \
10339 X(3, (Q, D, I), MIXED), \
10340 X(3, (Q, D, D), MIXED), \
10341 X(3, (D, Q, Q), MIXED), \
10342 X(3, (Q, Q, D), MIXED), \
10343 X(3, (Q, D, S), MIXED), \
10344 X(3, (D, Q, S), MIXED), \
10345 X(4, (D, D, D, I), DOUBLE), \
10346 X(4, (Q, Q, Q, I), QUAD), \
10347 X(2, (F, F), SINGLE), \
10348 X(3, (F, F, F), SINGLE), \
10349 X(2, (F, I), SINGLE), \
10350 X(2, (F, D), MIXED), \
10351 X(2, (D, F), MIXED), \
10352 X(3, (F, F, I), MIXED), \
10353 X(4, (R, R, F, F), SINGLE), \
10354 X(4, (F, F, R, R), SINGLE), \
10355 X(3, (D, R, R), DOUBLE), \
10356 X(3, (R, R, D), DOUBLE), \
10357 X(2, (S, R), SINGLE), \
10358 X(2, (R, S), SINGLE), \
10359 X(2, (F, R), SINGLE), \
10360 X(2, (R, F), SINGLE)
10362 #define S2(A,B) NS_##A##B
10363 #define S3(A,B,C) NS_##A##B##C
10364 #define S4(A,B,C,D) NS_##A##B##C##D
10366 #define X(N, L, C) S##N L
10379 enum neon_shape_class
10387 #define X(N, L, C) SC_##C
10389 static enum neon_shape_class neon_shape_class
[] =
10407 /* Register widths of above. */
10408 static unsigned neon_shape_el_size
[] =
10419 struct neon_shape_info
10422 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
10425 #define S2(A,B) { SE_##A, SE_##B }
10426 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10427 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10429 #define X(N, L, C) { N, S##N L }
10431 static struct neon_shape_info neon_shape_tab
[] =
10441 /* Bit masks used in type checking given instructions.
10442 'N_EQK' means the type must be the same as (or based on in some way) the key
10443 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10444 set, various other bits can be set as well in order to modify the meaning of
10445 the type constraint. */
10447 enum neon_type_mask
10469 N_KEY
= 0x100000, /* key element (main type specifier). */
10470 N_EQK
= 0x200000, /* given operand has the same type & size as the key. */
10471 N_VFP
= 0x400000, /* VFP mode: operand size must match register width. */
10472 N_DBL
= 0x000001, /* if N_EQK, this operand is twice the size. */
10473 N_HLF
= 0x000002, /* if N_EQK, this operand is half the size. */
10474 N_SGN
= 0x000004, /* if N_EQK, this operand is forced to be signed. */
10475 N_UNS
= 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10476 N_INT
= 0x000010, /* if N_EQK, this operand is forced to be integer. */
10477 N_FLT
= 0x000020, /* if N_EQK, this operand is forced to be float. */
10478 N_SIZ
= 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10480 N_MAX_NONSPECIAL
= N_F64
10483 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10485 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10486 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10487 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10488 #define N_SUF_32 (N_SU_32 | N_F32)
10489 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10490 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10492 /* Pass this as the first type argument to neon_check_type to ignore types
10494 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10496 /* Select a "shape" for the current instruction (describing register types or
10497 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10498 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10499 function of operand parsing, so this function doesn't need to be called.
10500 Shapes should be listed in order of decreasing length. */
10502 static enum neon_shape
10503 neon_select_shape (enum neon_shape shape
, ...)
10506 enum neon_shape first_shape
= shape
;
10508 /* Fix missing optional operands. FIXME: we don't know at this point how
10509 many arguments we should have, so this makes the assumption that we have
10510 > 1. This is true of all current Neon opcodes, I think, but may not be
10511 true in the future. */
10512 if (!inst
.operands
[1].present
)
10513 inst
.operands
[1] = inst
.operands
[0];
10515 va_start (ap
, shape
);
10517 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
10522 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
10524 if (!inst
.operands
[j
].present
)
10530 switch (neon_shape_tab
[shape
].el
[j
])
10533 if (!(inst
.operands
[j
].isreg
10534 && inst
.operands
[j
].isvec
10535 && inst
.operands
[j
].issingle
10536 && !inst
.operands
[j
].isquad
))
10541 if (!(inst
.operands
[j
].isreg
10542 && inst
.operands
[j
].isvec
10543 && !inst
.operands
[j
].isquad
10544 && !inst
.operands
[j
].issingle
))
10549 if (!(inst
.operands
[j
].isreg
10550 && !inst
.operands
[j
].isvec
))
10555 if (!(inst
.operands
[j
].isreg
10556 && inst
.operands
[j
].isvec
10557 && inst
.operands
[j
].isquad
10558 && !inst
.operands
[j
].issingle
))
10563 if (!(!inst
.operands
[j
].isreg
10564 && !inst
.operands
[j
].isscalar
))
10569 if (!(!inst
.operands
[j
].isreg
10570 && inst
.operands
[j
].isscalar
))
10584 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
10585 first_error (_("invalid instruction shape"));
10590 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10591 means the Q bit should be set). */
10594 neon_quad (enum neon_shape shape
)
10596 return neon_shape_class
[shape
] == SC_QUAD
;
10600 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
10603 /* Allow modification to be made to types which are constrained to be
10604 based on the key element, based on bits set alongside N_EQK. */
10605 if ((typebits
& N_EQK
) != 0)
10607 if ((typebits
& N_HLF
) != 0)
10609 else if ((typebits
& N_DBL
) != 0)
10611 if ((typebits
& N_SGN
) != 0)
10612 *g_type
= NT_signed
;
10613 else if ((typebits
& N_UNS
) != 0)
10614 *g_type
= NT_unsigned
;
10615 else if ((typebits
& N_INT
) != 0)
10616 *g_type
= NT_integer
;
10617 else if ((typebits
& N_FLT
) != 0)
10618 *g_type
= NT_float
;
10619 else if ((typebits
& N_SIZ
) != 0)
10620 *g_type
= NT_untyped
;
10624 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10625 operand type, i.e. the single type specified in a Neon instruction when it
10626 is the only one given. */
10628 static struct neon_type_el
10629 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
10631 struct neon_type_el dest
= *key
;
10633 assert ((thisarg
& N_EQK
) != 0);
10635 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
10640 /* Convert Neon type and size into compact bitmask representation. */
10642 static enum neon_type_mask
10643 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
10650 case 8: return N_8
;
10651 case 16: return N_16
;
10652 case 32: return N_32
;
10653 case 64: return N_64
;
10661 case 8: return N_I8
;
10662 case 16: return N_I16
;
10663 case 32: return N_I32
;
10664 case 64: return N_I64
;
10672 case 32: return N_F32
;
10673 case 64: return N_F64
;
10681 case 8: return N_P8
;
10682 case 16: return N_P16
;
10690 case 8: return N_S8
;
10691 case 16: return N_S16
;
10692 case 32: return N_S32
;
10693 case 64: return N_S64
;
10701 case 8: return N_U8
;
10702 case 16: return N_U16
;
10703 case 32: return N_U32
;
10704 case 64: return N_U64
;
10715 /* Convert compact Neon bitmask type representation to a type and size. Only
10716 handles the case where a single bit is set in the mask. */
10719 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
10720 enum neon_type_mask mask
)
10722 if ((mask
& N_EQK
) != 0)
10725 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
10727 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
10729 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
10731 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
10736 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
10738 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
10739 *type
= NT_unsigned
;
10740 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
10741 *type
= NT_integer
;
10742 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
10743 *type
= NT_untyped
;
10744 else if ((mask
& (N_P8
| N_P16
)) != 0)
10746 else if ((mask
& (N_F32
| N_F64
)) != 0)
10754 /* Modify a bitmask of allowed types. This is only needed for type
10758 modify_types_allowed (unsigned allowed
, unsigned mods
)
10761 enum neon_el_type type
;
10767 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
10769 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
10771 neon_modify_type_size (mods
, &type
, &size
);
10772 destmask
|= type_chk_of_el_type (type
, size
);
10779 /* Check type and return type classification.
10780 The manual states (paraphrase): If one datatype is given, it indicates the
10782 - the second operand, if there is one
10783 - the operand, if there is no second operand
10784 - the result, if there are no operands.
10785 This isn't quite good enough though, so we use a concept of a "key" datatype
10786 which is set on a per-instruction basis, which is the one which matters when
10787 only one data type is written.
10788 Note: this function has side-effects (e.g. filling in missing operands). All
10789 Neon instructions should call it before performing bit encoding. */
10791 static struct neon_type_el
10792 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
10795 unsigned i
, pass
, key_el
= 0;
10796 unsigned types
[NEON_MAX_TYPE_ELS
];
10797 enum neon_el_type k_type
= NT_invtype
;
10798 unsigned k_size
= -1u;
10799 struct neon_type_el badtype
= {NT_invtype
, -1};
10800 unsigned key_allowed
= 0;
10802 /* Optional registers in Neon instructions are always (not) in operand 1.
10803 Fill in the missing operand here, if it was omitted. */
10804 if (els
> 1 && !inst
.operands
[1].present
)
10805 inst
.operands
[1] = inst
.operands
[0];
10807 /* Suck up all the varargs. */
10809 for (i
= 0; i
< els
; i
++)
10811 unsigned thisarg
= va_arg (ap
, unsigned);
10812 if (thisarg
== N_IGNORE_TYPE
)
10817 types
[i
] = thisarg
;
10818 if ((thisarg
& N_KEY
) != 0)
10823 if (inst
.vectype
.elems
> 0)
10824 for (i
= 0; i
< els
; i
++)
10825 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
10827 first_error (_("types specified in both the mnemonic and operands"));
10831 /* Duplicate inst.vectype elements here as necessary.
10832 FIXME: No idea if this is exactly the same as the ARM assembler,
10833 particularly when an insn takes one register and one non-register
10835 if (inst
.vectype
.elems
== 1 && els
> 1)
10838 inst
.vectype
.elems
= els
;
10839 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
10840 for (j
= 0; j
< els
; j
++)
10842 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
10845 else if (inst
.vectype
.elems
== 0 && els
> 0)
10848 /* No types were given after the mnemonic, so look for types specified
10849 after each operand. We allow some flexibility here; as long as the
10850 "key" operand has a type, we can infer the others. */
10851 for (j
= 0; j
< els
; j
++)
10852 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
10853 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
10855 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
10857 for (j
= 0; j
< els
; j
++)
10858 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
10859 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
10864 first_error (_("operand types can't be inferred"));
10868 else if (inst
.vectype
.elems
!= els
)
10870 first_error (_("type specifier has the wrong number of parts"));
10874 for (pass
= 0; pass
< 2; pass
++)
10876 for (i
= 0; i
< els
; i
++)
10878 unsigned thisarg
= types
[i
];
10879 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
10880 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
10881 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
10882 unsigned g_size
= inst
.vectype
.el
[i
].size
;
10884 /* Decay more-specific signed & unsigned types to sign-insensitive
10885 integer types if sign-specific variants are unavailable. */
10886 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
10887 && (types_allowed
& N_SU_ALL
) == 0)
10888 g_type
= NT_integer
;
10890 /* If only untyped args are allowed, decay any more specific types to
10891 them. Some instructions only care about signs for some element
10892 sizes, so handle that properly. */
10893 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
10894 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
10895 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
10896 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
10897 g_type
= NT_untyped
;
10901 if ((thisarg
& N_KEY
) != 0)
10905 key_allowed
= thisarg
& ~N_KEY
;
10910 if ((thisarg
& N_VFP
) != 0)
10912 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
10913 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
10915 /* In VFP mode, operands must match register widths. If we
10916 have a key operand, use its width, else use the width of
10917 the current operand. */
10923 if (regwidth
!= match
)
10925 first_error (_("operand size must match register width"));
10930 if ((thisarg
& N_EQK
) == 0)
10932 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
10934 if ((given_type
& types_allowed
) == 0)
10936 first_error (_("bad type in Neon instruction"));
10942 enum neon_el_type mod_k_type
= k_type
;
10943 unsigned mod_k_size
= k_size
;
10944 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
10945 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
10947 first_error (_("inconsistent types in Neon instruction"));
10955 return inst
.vectype
.el
[key_el
];
10958 /* Neon-style VFP instruction forwarding. */
10960 /* Thumb VFP instructions have 0xE in the condition field. */
10963 do_vfp_cond_or_thumb (void)
10966 inst
.instruction
|= 0xe0000000;
10968 inst
.instruction
|= inst
.cond
<< 28;
10971 /* Look up and encode a simple mnemonic, for use as a helper function for the
10972 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10973 etc. It is assumed that operand parsing has already been done, and that the
10974 operands are in the form expected by the given opcode (this isn't necessarily
10975 the same as the form in which they were parsed, hence some massaging must
10976 take place before this function is called).
10977 Checks current arch version against that in the looked-up opcode. */
10980 do_vfp_nsyn_opcode (const char *opname
)
10982 const struct asm_opcode
*opcode
;
10984 opcode
= hash_find (arm_ops_hsh
, opname
);
10989 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
10990 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
10995 inst
.instruction
= opcode
->tvalue
;
10996 opcode
->tencode ();
11000 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
11001 opcode
->aencode ();
11006 do_vfp_nsyn_add_sub (enum neon_shape rs
)
11008 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
11013 do_vfp_nsyn_opcode ("fadds");
11015 do_vfp_nsyn_opcode ("fsubs");
11020 do_vfp_nsyn_opcode ("faddd");
11022 do_vfp_nsyn_opcode ("fsubd");
11026 /* Check operand types to see if this is a VFP instruction, and if so call
11030 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
11032 enum neon_shape rs
;
11033 struct neon_type_el et
;
11038 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11039 et
= neon_check_type (2, rs
,
11040 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11044 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11045 et
= neon_check_type (3, rs
,
11046 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11053 if (et
.type
!= NT_invtype
)
11065 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
11067 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
11072 do_vfp_nsyn_opcode ("fmacs");
11074 do_vfp_nsyn_opcode ("fmscs");
11079 do_vfp_nsyn_opcode ("fmacd");
11081 do_vfp_nsyn_opcode ("fmscd");
11086 do_vfp_nsyn_mul (enum neon_shape rs
)
11089 do_vfp_nsyn_opcode ("fmuls");
11091 do_vfp_nsyn_opcode ("fmuld");
11095 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
11097 int is_neg
= (inst
.instruction
& 0x80) != 0;
11098 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
11103 do_vfp_nsyn_opcode ("fnegs");
11105 do_vfp_nsyn_opcode ("fabss");
11110 do_vfp_nsyn_opcode ("fnegd");
11112 do_vfp_nsyn_opcode ("fabsd");
11116 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11117 insns belong to Neon, and are handled elsewhere. */
11120 do_vfp_nsyn_ldm_stm (int is_dbmode
)
11122 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
11126 do_vfp_nsyn_opcode ("fldmdbs");
11128 do_vfp_nsyn_opcode ("fldmias");
11133 do_vfp_nsyn_opcode ("fstmdbs");
11135 do_vfp_nsyn_opcode ("fstmias");
11140 do_vfp_nsyn_sqrt (void)
11142 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11143 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11146 do_vfp_nsyn_opcode ("fsqrts");
11148 do_vfp_nsyn_opcode ("fsqrtd");
11152 do_vfp_nsyn_div (void)
11154 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11155 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11156 N_F32
| N_F64
| N_KEY
| N_VFP
);
11159 do_vfp_nsyn_opcode ("fdivs");
11161 do_vfp_nsyn_opcode ("fdivd");
11165 do_vfp_nsyn_nmul (void)
11167 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11168 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11169 N_F32
| N_F64
| N_KEY
| N_VFP
);
11173 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11174 do_vfp_sp_dyadic ();
11178 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11179 do_vfp_dp_rd_rn_rm ();
11181 do_vfp_cond_or_thumb ();
11185 do_vfp_nsyn_cmp (void)
11187 if (inst
.operands
[1].isreg
)
11189 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11190 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11194 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11195 do_vfp_sp_monadic ();
11199 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11200 do_vfp_dp_rd_rm ();
11205 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
11206 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
11208 switch (inst
.instruction
& 0x0fffffff)
11211 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
11214 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
11222 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11223 do_vfp_sp_compare_z ();
11227 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11231 do_vfp_cond_or_thumb ();
11235 nsyn_insert_sp (void)
11237 inst
.operands
[1] = inst
.operands
[0];
11238 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
11239 inst
.operands
[0].reg
= 13;
11240 inst
.operands
[0].isreg
= 1;
11241 inst
.operands
[0].writeback
= 1;
11242 inst
.operands
[0].present
= 1;
11246 do_vfp_nsyn_push (void)
11249 if (inst
.operands
[1].issingle
)
11250 do_vfp_nsyn_opcode ("fstmdbs");
11252 do_vfp_nsyn_opcode ("fstmdbd");
11256 do_vfp_nsyn_pop (void)
11259 if (inst
.operands
[1].issingle
)
11260 do_vfp_nsyn_opcode ("fldmdbs");
11262 do_vfp_nsyn_opcode ("fldmdbd");
11265 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11266 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11269 neon_dp_fixup (unsigned i
)
11273 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11287 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11291 neon_logbits (unsigned x
)
11293 return ffs (x
) - 4;
11296 #define LOW4(R) ((R) & 0xf)
11297 #define HI1(R) (((R) >> 4) & 1)
11299 /* Encode insns with bit pattern:
11301 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11302 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11304 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11305 different meaning for some instruction. */
11308 neon_three_same (int isquad
, int ubit
, int size
)
11310 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11311 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11312 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11313 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11314 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11315 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11316 inst
.instruction
|= (isquad
!= 0) << 6;
11317 inst
.instruction
|= (ubit
!= 0) << 24;
11319 inst
.instruction
|= neon_logbits (size
) << 20;
11321 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11324 /* Encode instructions of the form:
11326 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11327 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11329 Don't write size if SIZE == -1. */
11332 neon_two_same (int qbit
, int ubit
, int size
)
11334 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11335 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11336 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11337 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11338 inst
.instruction
|= (qbit
!= 0) << 6;
11339 inst
.instruction
|= (ubit
!= 0) << 24;
11342 inst
.instruction
|= neon_logbits (size
) << 18;
11344 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11347 /* Neon instruction encoders, in approximate order of appearance. */
11350 do_neon_dyadic_i_su (void)
11352 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11353 struct neon_type_el et
= neon_check_type (3, rs
,
11354 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
11355 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11359 do_neon_dyadic_i64_su (void)
11361 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11362 struct neon_type_el et
= neon_check_type (3, rs
,
11363 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
11364 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11368 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
11371 unsigned size
= et
.size
>> 3;
11372 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11373 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11374 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11375 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11376 inst
.instruction
|= (isquad
!= 0) << 6;
11377 inst
.instruction
|= immbits
<< 16;
11378 inst
.instruction
|= (size
>> 3) << 7;
11379 inst
.instruction
|= (size
& 0x7) << 19;
11381 inst
.instruction
|= (uval
!= 0) << 24;
11383 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11387 do_neon_shl_imm (void)
11389 if (!inst
.operands
[2].isreg
)
11391 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11392 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
11393 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11394 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
11398 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11399 struct neon_type_el et
= neon_check_type (3, rs
,
11400 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
11403 /* VSHL/VQSHL 3-register variants have syntax such as:
11405 whereas other 3-register operations encoded by neon_three_same have
11408 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
11410 tmp
= inst
.operands
[2].reg
;
11411 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11412 inst
.operands
[1].reg
= tmp
;
11413 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11414 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11419 do_neon_qshl_imm (void)
11421 if (!inst
.operands
[2].isreg
)
11423 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11424 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
11426 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11427 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
11428 inst
.operands
[2].imm
);
11432 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11433 struct neon_type_el et
= neon_check_type (3, rs
,
11434 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
11437 /* See note in do_neon_shl_imm. */
11438 tmp
= inst
.operands
[2].reg
;
11439 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11440 inst
.operands
[1].reg
= tmp
;
11441 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11442 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11447 do_neon_rshl (void)
11449 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11450 struct neon_type_el et
= neon_check_type (3, rs
,
11451 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
11454 tmp
= inst
.operands
[2].reg
;
11455 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
11456 inst
.operands
[1].reg
= tmp
;
11457 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
11461 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
11463 /* Handle .I8 pseudo-instructions. */
11466 /* Unfortunately, this will make everything apart from zero out-of-range.
11467 FIXME is this the intended semantics? There doesn't seem much point in
11468 accepting .I8 if so. */
11469 immediate
|= immediate
<< 8;
11475 if (immediate
== (immediate
& 0x000000ff))
11477 *immbits
= immediate
;
11480 else if (immediate
== (immediate
& 0x0000ff00))
11482 *immbits
= immediate
>> 8;
11485 else if (immediate
== (immediate
& 0x00ff0000))
11487 *immbits
= immediate
>> 16;
11490 else if (immediate
== (immediate
& 0xff000000))
11492 *immbits
= immediate
>> 24;
11495 if ((immediate
& 0xffff) != (immediate
>> 16))
11496 goto bad_immediate
;
11497 immediate
&= 0xffff;
11500 if (immediate
== (immediate
& 0x000000ff))
11502 *immbits
= immediate
;
11505 else if (immediate
== (immediate
& 0x0000ff00))
11507 *immbits
= immediate
>> 8;
11512 first_error (_("immediate value out of range"));
11516 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11520 neon_bits_same_in_bytes (unsigned imm
)
11522 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
11523 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
11524 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
11525 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
11528 /* For immediate of above form, return 0bABCD. */
11531 neon_squash_bits (unsigned imm
)
11533 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
11534 | ((imm
& 0x01000000) >> 21);
11537 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11540 neon_qfloat_bits (unsigned imm
)
11542 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
11545 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11546 the instruction. *OP is passed as the initial value of the op field, and
11547 may be set to a different value depending on the constant (i.e.
11548 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11549 MVN). If the immediate looks like a repeated parttern then also
11550 try smaller element sizes. */
11553 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, unsigned *immbits
,
11554 int *op
, int size
, enum neon_el_type type
)
11556 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
11558 if (size
!= 32 || *op
== 1)
11560 *immbits
= neon_qfloat_bits (immlo
);
11566 if (neon_bits_same_in_bytes (immhi
)
11567 && neon_bits_same_in_bytes (immlo
))
11571 *immbits
= (neon_squash_bits (immhi
) << 4)
11572 | neon_squash_bits (immlo
);
11577 if (immhi
!= immlo
)
11583 if (immlo
== (immlo
& 0x000000ff))
11588 else if (immlo
== (immlo
& 0x0000ff00))
11590 *immbits
= immlo
>> 8;
11593 else if (immlo
== (immlo
& 0x00ff0000))
11595 *immbits
= immlo
>> 16;
11598 else if (immlo
== (immlo
& 0xff000000))
11600 *immbits
= immlo
>> 24;
11603 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
11605 *immbits
= (immlo
>> 8) & 0xff;
11608 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
11610 *immbits
= (immlo
>> 16) & 0xff;
11614 if ((immlo
& 0xffff) != (immlo
>> 16))
11621 if (immlo
== (immlo
& 0x000000ff))
11626 else if (immlo
== (immlo
& 0x0000ff00))
11628 *immbits
= immlo
>> 8;
11632 if ((immlo
& 0xff) != (immlo
>> 8))
11637 if (immlo
== (immlo
& 0x000000ff))
11639 /* Don't allow MVN with 8-bit immediate. */
11649 /* Write immediate bits [7:0] to the following locations:
11651 |28/24|23 19|18 16|15 4|3 0|
11652 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11654 This function is used by VMOV/VMVN/VORR/VBIC. */
11657 neon_write_immbits (unsigned immbits
)
11659 inst
.instruction
|= immbits
& 0xf;
11660 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
11661 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
11664 /* Invert low-order SIZE bits of XHI:XLO. */
11667 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
11669 unsigned immlo
= xlo
? *xlo
: 0;
11670 unsigned immhi
= xhi
? *xhi
: 0;
11675 immlo
= (~immlo
) & 0xff;
11679 immlo
= (~immlo
) & 0xffff;
11683 immhi
= (~immhi
) & 0xffffffff;
11684 /* fall through. */
11687 immlo
= (~immlo
) & 0xffffffff;
11702 do_neon_logic (void)
11704 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
11706 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11707 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11708 /* U bit and size field were set as part of the bitmask. */
11709 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11710 neon_three_same (neon_quad (rs
), 0, -1);
11714 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
11715 struct neon_type_el et
= neon_check_type (2, rs
,
11716 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
11717 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
11721 if (et
.type
== NT_invtype
)
11724 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11726 immbits
= inst
.operands
[1].imm
;
11729 /* .i64 is a pseudo-op, so the immediate must be a repeating
11731 if (immbits
!= (inst
.operands
[1].regisimm
?
11732 inst
.operands
[1].reg
: 0))
11734 /* Set immbits to an invalid constant. */
11735 immbits
= 0xdeadbeef;
11742 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11746 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11750 /* Pseudo-instruction for VBIC. */
11751 neon_invert_size (&immbits
, 0, et
.size
);
11752 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11756 /* Pseudo-instruction for VORR. */
11757 neon_invert_size (&immbits
, 0, et
.size
);
11758 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
11768 inst
.instruction
|= neon_quad (rs
) << 6;
11769 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11770 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11771 inst
.instruction
|= cmode
<< 8;
11772 neon_write_immbits (immbits
);
11774 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11779 do_neon_bitfield (void)
11781 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11782 neon_check_type (3, rs
, N_IGNORE_TYPE
);
11783 neon_three_same (neon_quad (rs
), 0, -1);
11787 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
11790 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
11791 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
11793 if (et
.type
== NT_float
)
11795 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
11796 neon_three_same (neon_quad (rs
), 0, -1);
11800 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
11801 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
11806 do_neon_dyadic_if_su (void)
11808 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
11812 do_neon_dyadic_if_su_d (void)
11814 /* This version only allow D registers, but that constraint is enforced during
11815 operand parsing so we don't need to do anything extra here. */
11816 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
11820 do_neon_dyadic_if_i_d (void)
11822 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11823 affected if we specify unsigned args. */
11824 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
11827 enum vfp_or_neon_is_neon_bits
11830 NEON_CHECK_ARCH
= 2
11833 /* Call this function if an instruction which may have belonged to the VFP or
11834 Neon instruction sets, but turned out to be a Neon instruction (due to the
11835 operand types involved, etc.). We have to check and/or fix-up a couple of
11838 - Make sure the user hasn't attempted to make a Neon instruction
11840 - Alter the value in the condition code field if necessary.
11841 - Make sure that the arch supports Neon instructions.
11843 Which of these operations take place depends on bits from enum
11844 vfp_or_neon_is_neon_bits.
11846 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11847 current instruction's condition is COND_ALWAYS, the condition field is
11848 changed to inst.uncond_value. This is necessary because instructions shared
11849 between VFP and Neon may be conditional for the VFP variants only, and the
11850 unconditional Neon version must have, e.g., 0xF in the condition field. */
11853 vfp_or_neon_is_neon (unsigned check
)
11855 /* Conditions are always legal in Thumb mode (IT blocks). */
11856 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
11858 if (inst
.cond
!= COND_ALWAYS
)
11860 first_error (_(BAD_COND
));
11863 if (inst
.uncond_value
!= -1)
11864 inst
.instruction
|= inst
.uncond_value
<< 28;
11867 if ((check
& NEON_CHECK_ARCH
)
11868 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
11870 first_error (_(BAD_FPU
));
11878 do_neon_addsub_if_i (void)
11880 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
11883 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
11886 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11887 affected if we specify unsigned args. */
11888 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
11891 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11893 V<op> A,B (A is operand 0, B is operand 2)
11898 so handle that case specially. */
11901 neon_exchange_operands (void)
11903 void *scratch
= alloca (sizeof (inst
.operands
[0]));
11904 if (inst
.operands
[1].present
)
11906 /* Swap operands[1] and operands[2]. */
11907 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
11908 inst
.operands
[1] = inst
.operands
[2];
11909 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
11913 inst
.operands
[1] = inst
.operands
[2];
11914 inst
.operands
[2] = inst
.operands
[0];
11919 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
11921 if (inst
.operands
[2].isreg
)
11924 neon_exchange_operands ();
11925 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
11929 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
11930 struct neon_type_el et
= neon_check_type (2, rs
,
11931 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
11933 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
11934 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11935 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11936 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
11937 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
11938 inst
.instruction
|= neon_quad (rs
) << 6;
11939 inst
.instruction
|= (et
.type
== NT_float
) << 10;
11940 inst
.instruction
|= neon_logbits (et
.size
) << 18;
11942 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11949 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
11953 do_neon_cmp_inv (void)
11955 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
11961 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
11964 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11965 scalars, which are encoded in 5 bits, M : Rm.
11966 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11967 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11971 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
11973 unsigned regno
= NEON_SCALAR_REG (scalar
);
11974 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
11979 if (regno
> 7 || elno
> 3)
11981 return regno
| (elno
<< 3);
11984 if (regno
> 15 || elno
> 1)
11986 return regno
| (elno
<< 4);
11990 first_error (_("scalar out of range for multiply instruction"));
11996 /* Encode multiply / multiply-accumulate scalar instructions. */
11999 neon_mul_mac (struct neon_type_el et
, int ubit
)
12003 /* Give a more helpful error message if we have an invalid type. */
12004 if (et
.type
== NT_invtype
)
12007 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
12008 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12009 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12010 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12011 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12012 inst
.instruction
|= LOW4 (scalar
);
12013 inst
.instruction
|= HI1 (scalar
) << 5;
12014 inst
.instruction
|= (et
.type
== NT_float
) << 8;
12015 inst
.instruction
|= neon_logbits (et
.size
) << 20;
12016 inst
.instruction
|= (ubit
!= 0) << 24;
12018 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12022 do_neon_mac_maybe_scalar (void)
12024 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
12027 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12030 if (inst
.operands
[2].isscalar
)
12032 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12033 struct neon_type_el et
= neon_check_type (3, rs
,
12034 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
12035 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12036 neon_mul_mac (et
, neon_quad (rs
));
12040 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12041 affected if we specify unsigned args. */
12042 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12049 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12050 struct neon_type_el et
= neon_check_type (3, rs
,
12051 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12052 neon_three_same (neon_quad (rs
), 0, et
.size
);
12055 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12056 same types as the MAC equivalents. The polynomial type for this instruction
12057 is encoded the same as the integer type. */
12062 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
12065 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12068 if (inst
.operands
[2].isscalar
)
12069 do_neon_mac_maybe_scalar ();
12071 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
12075 do_neon_qdmulh (void)
12077 if (inst
.operands
[2].isscalar
)
12079 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12080 struct neon_type_el et
= neon_check_type (3, rs
,
12081 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12082 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12083 neon_mul_mac (et
, neon_quad (rs
));
12087 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12088 struct neon_type_el et
= neon_check_type (3, rs
,
12089 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12090 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12091 /* The U bit (rounding) comes from bit mask. */
12092 neon_three_same (neon_quad (rs
), 0, et
.size
);
12097 do_neon_fcmp_absolute (void)
12099 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12100 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12101 /* Size field comes from bit mask. */
12102 neon_three_same (neon_quad (rs
), 1, -1);
12106 do_neon_fcmp_absolute_inv (void)
12108 neon_exchange_operands ();
12109 do_neon_fcmp_absolute ();
12113 do_neon_step (void)
12115 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12116 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12117 neon_three_same (neon_quad (rs
), 0, -1);
12121 do_neon_abs_neg (void)
12123 enum neon_shape rs
;
12124 struct neon_type_el et
;
12126 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
12129 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12132 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12133 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
12135 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12136 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12137 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12138 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12139 inst
.instruction
|= neon_quad (rs
) << 6;
12140 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12141 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12143 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12149 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12150 struct neon_type_el et
= neon_check_type (2, rs
,
12151 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12152 int imm
= inst
.operands
[2].imm
;
12153 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12154 _("immediate out of range for insert"));
12155 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12161 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12162 struct neon_type_el et
= neon_check_type (2, rs
,
12163 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12164 int imm
= inst
.operands
[2].imm
;
12165 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12166 _("immediate out of range for insert"));
12167 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
12171 do_neon_qshlu_imm (void)
12173 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12174 struct neon_type_el et
= neon_check_type (2, rs
,
12175 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
12176 int imm
= inst
.operands
[2].imm
;
12177 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12178 _("immediate out of range for shift"));
12179 /* Only encodes the 'U present' variant of the instruction.
12180 In this case, signed types have OP (bit 8) set to 0.
12181 Unsigned types have OP set to 1. */
12182 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
12183 /* The rest of the bits are the same as other immediate shifts. */
12184 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12188 do_neon_qmovn (void)
12190 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12191 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12192 /* Saturating move where operands can be signed or unsigned, and the
12193 destination has the same signedness. */
12194 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12195 if (et
.type
== NT_unsigned
)
12196 inst
.instruction
|= 0xc0;
12198 inst
.instruction
|= 0x80;
12199 neon_two_same (0, 1, et
.size
/ 2);
12203 do_neon_qmovun (void)
12205 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12206 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12207 /* Saturating move with unsigned results. Operands must be signed. */
12208 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12209 neon_two_same (0, 1, et
.size
/ 2);
12213 do_neon_rshift_sat_narrow (void)
12215 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12216 or unsigned. If operands are unsigned, results must also be unsigned. */
12217 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12218 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12219 int imm
= inst
.operands
[2].imm
;
12220 /* This gets the bounds check, size encoding and immediate bits calculation
12224 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12225 VQMOVN.I<size> <Dd>, <Qm>. */
12228 inst
.operands
[2].present
= 0;
12229 inst
.instruction
= N_MNEM_vqmovn
;
12234 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12235 _("immediate out of range"));
12236 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
12240 do_neon_rshift_sat_narrow_u (void)
12242 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12243 or unsigned. If operands are unsigned, results must also be unsigned. */
12244 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12245 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12246 int imm
= inst
.operands
[2].imm
;
12247 /* This gets the bounds check, size encoding and immediate bits calculation
12251 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12252 VQMOVUN.I<size> <Dd>, <Qm>. */
12255 inst
.operands
[2].present
= 0;
12256 inst
.instruction
= N_MNEM_vqmovun
;
12261 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12262 _("immediate out of range"));
12263 /* FIXME: The manual is kind of unclear about what value U should have in
12264 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12266 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
12270 do_neon_movn (void)
12272 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12273 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12274 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12275 neon_two_same (0, 1, et
.size
/ 2);
12279 do_neon_rshift_narrow (void)
12281 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12282 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12283 int imm
= inst
.operands
[2].imm
;
12284 /* This gets the bounds check, size encoding and immediate bits calculation
12288 /* If immediate is zero then we are a pseudo-instruction for
12289 VMOVN.I<size> <Dd>, <Qm> */
12292 inst
.operands
[2].present
= 0;
12293 inst
.instruction
= N_MNEM_vmovn
;
12298 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12299 _("immediate out of range for narrowing operation"));
12300 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
12304 do_neon_shll (void)
12306 /* FIXME: Type checking when lengthening. */
12307 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
12308 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
12309 unsigned imm
= inst
.operands
[2].imm
;
12311 if (imm
== et
.size
)
12313 /* Maximum shift variant. */
12314 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12315 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12316 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12317 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12318 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12319 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12321 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12325 /* A more-specific type check for non-max versions. */
12326 et
= neon_check_type (2, NS_QDI
,
12327 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12328 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12329 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
12333 /* Check the various types for the VCVT instruction, and return which version
12334 the current instruction is. */
12337 neon_cvt_flavour (enum neon_shape rs
)
12339 #define CVT_VAR(C,X,Y) \
12340 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12341 if (et.type != NT_invtype) \
12343 inst.error = NULL; \
12346 struct neon_type_el et
;
12347 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
12348 || rs
== NS_FF
) ? N_VFP
: 0;
12349 /* The instruction versions which take an immediate take one register
12350 argument, which is extended to the width of the full register. Thus the
12351 "source" and "destination" registers must have the same width. Hack that
12352 here by making the size equal to the key (wider, in this case) operand. */
12353 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
12355 CVT_VAR (0, N_S32
, N_F32
);
12356 CVT_VAR (1, N_U32
, N_F32
);
12357 CVT_VAR (2, N_F32
, N_S32
);
12358 CVT_VAR (3, N_F32
, N_U32
);
12362 /* VFP instructions. */
12363 CVT_VAR (4, N_F32
, N_F64
);
12364 CVT_VAR (5, N_F64
, N_F32
);
12365 CVT_VAR (6, N_S32
, N_F64
| key
);
12366 CVT_VAR (7, N_U32
, N_F64
| key
);
12367 CVT_VAR (8, N_F64
| key
, N_S32
);
12368 CVT_VAR (9, N_F64
| key
, N_U32
);
12369 /* VFP instructions with bitshift. */
12370 CVT_VAR (10, N_F32
| key
, N_S16
);
12371 CVT_VAR (11, N_F32
| key
, N_U16
);
12372 CVT_VAR (12, N_F64
| key
, N_S16
);
12373 CVT_VAR (13, N_F64
| key
, N_U16
);
12374 CVT_VAR (14, N_S16
, N_F32
| key
);
12375 CVT_VAR (15, N_U16
, N_F32
| key
);
12376 CVT_VAR (16, N_S16
, N_F64
| key
);
12377 CVT_VAR (17, N_U16
, N_F64
| key
);
12383 /* Neon-syntax VFP conversions. */
12386 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
12388 const char *opname
= 0;
12390 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
12392 /* Conversions with immediate bitshift. */
12393 const char *enc
[] =
12415 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
12417 opname
= enc
[flavour
];
12418 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
12419 _("operands 0 and 1 must be the same register"));
12420 inst
.operands
[1] = inst
.operands
[2];
12421 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
12426 /* Conversions without bitshift. */
12427 const char *enc
[] =
12441 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
12442 opname
= enc
[flavour
];
12446 do_vfp_nsyn_opcode (opname
);
12450 do_vfp_nsyn_cvtz (void)
12452 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
12453 int flavour
= neon_cvt_flavour (rs
);
12454 const char *enc
[] =
12466 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
12467 do_vfp_nsyn_opcode (enc
[flavour
]);
12473 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
12474 NS_FD
, NS_DF
, NS_FF
, NS_NULL
);
12475 int flavour
= neon_cvt_flavour (rs
);
12477 /* VFP rather than Neon conversions. */
12480 do_vfp_nsyn_cvt (rs
, flavour
);
12489 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12492 /* Fixed-point conversion with #0 immediate is encoded as an
12493 integer conversion. */
12494 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
12496 unsigned immbits
= 32 - inst
.operands
[2].imm
;
12497 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12498 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12500 inst
.instruction
|= enctab
[flavour
];
12501 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12502 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12503 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12504 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12505 inst
.instruction
|= neon_quad (rs
) << 6;
12506 inst
.instruction
|= 1 << 21;
12507 inst
.instruction
|= immbits
<< 16;
12509 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12517 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
12519 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12521 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12525 inst
.instruction
|= enctab
[flavour
];
12527 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12528 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12529 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12530 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12531 inst
.instruction
|= neon_quad (rs
) << 6;
12532 inst
.instruction
|= 2 << 18;
12534 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12539 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12540 do_vfp_nsyn_cvt (rs
, flavour
);
12545 neon_move_immediate (void)
12547 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
12548 struct neon_type_el et
= neon_check_type (2, rs
,
12549 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
12550 unsigned immlo
, immhi
= 0, immbits
;
12553 constraint (et
.type
== NT_invtype
,
12554 _("operand size must be specified for immediate VMOV"));
12556 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12557 op
= (inst
.instruction
& (1 << 5)) != 0;
12559 immlo
= inst
.operands
[1].imm
;
12560 if (inst
.operands
[1].regisimm
)
12561 immhi
= inst
.operands
[1].reg
;
12563 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
12564 _("immediate has bits set outside the operand size"));
12566 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
12567 et
.size
, et
.type
)) == FAIL
)
12569 /* Invert relevant bits only. */
12570 neon_invert_size (&immlo
, &immhi
, et
.size
);
12571 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12572 with one or the other; those cases are caught by
12573 neon_cmode_for_move_imm. */
12575 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, &immbits
, &op
,
12576 et
.size
, et
.type
)) == FAIL
)
12578 first_error (_("immediate out of range"));
12583 inst
.instruction
&= ~(1 << 5);
12584 inst
.instruction
|= op
<< 5;
12586 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12587 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12588 inst
.instruction
|= neon_quad (rs
) << 6;
12589 inst
.instruction
|= cmode
<< 8;
12591 neon_write_immbits (immbits
);
12597 if (inst
.operands
[1].isreg
)
12599 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12601 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12602 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12603 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12604 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12605 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12606 inst
.instruction
|= neon_quad (rs
) << 6;
12610 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12611 neon_move_immediate ();
12614 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12617 /* Encode instructions of form:
12619 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12620 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12625 neon_mixed_length (struct neon_type_el et
, unsigned size
)
12627 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12628 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12629 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12630 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12631 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12632 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12633 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
12634 inst
.instruction
|= neon_logbits (size
) << 20;
12636 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12640 do_neon_dyadic_long (void)
12642 /* FIXME: Type checking for lengthening op. */
12643 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12644 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12645 neon_mixed_length (et
, et
.size
);
12649 do_neon_abal (void)
12651 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12652 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
12653 neon_mixed_length (et
, et
.size
);
12657 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
12659 if (inst
.operands
[2].isscalar
)
12661 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
12662 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
12663 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12664 neon_mul_mac (et
, et
.type
== NT_unsigned
);
12668 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12669 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
12670 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12671 neon_mixed_length (et
, et
.size
);
12676 do_neon_mac_maybe_scalar_long (void)
12678 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
12682 do_neon_dyadic_wide (void)
12684 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
12685 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
12686 neon_mixed_length (et
, et
.size
);
12690 do_neon_dyadic_narrow (void)
12692 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12693 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
12694 /* Operand sign is unimportant, and the U bit is part of the opcode,
12695 so force the operand type to integer. */
12696 et
.type
= NT_integer
;
12697 neon_mixed_length (et
, et
.size
/ 2);
12701 do_neon_mul_sat_scalar_long (void)
12703 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
12707 do_neon_vmull (void)
12709 if (inst
.operands
[2].isscalar
)
12710 do_neon_mac_maybe_scalar_long ();
12713 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
12714 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
12715 if (et
.type
== NT_poly
)
12716 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
12718 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12719 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12720 zero. Should be OK as-is. */
12721 neon_mixed_length (et
, et
.size
);
12728 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
12729 struct neon_type_el et
= neon_check_type (3, rs
,
12730 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12731 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
12732 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12733 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12734 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12735 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12736 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12737 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12738 inst
.instruction
|= neon_quad (rs
) << 6;
12739 inst
.instruction
|= imm
<< 8;
12741 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12747 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12748 struct neon_type_el et
= neon_check_type (2, rs
,
12749 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12750 unsigned op
= (inst
.instruction
>> 7) & 3;
12751 /* N (width of reversed regions) is encoded as part of the bitmask. We
12752 extract it here to check the elements to be reversed are smaller.
12753 Otherwise we'd get a reserved instruction. */
12754 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
12755 assert (elsize
!= 0);
12756 constraint (et
.size
>= elsize
,
12757 _("elements must be smaller than reversal region"));
12758 neon_two_same (neon_quad (rs
), 1, et
.size
);
12764 if (inst
.operands
[1].isscalar
)
12766 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
12767 struct neon_type_el et
= neon_check_type (2, rs
,
12768 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12769 unsigned sizebits
= et
.size
>> 3;
12770 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12771 int logsize
= neon_logbits (et
.size
);
12772 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
12774 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
12777 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12778 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12779 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12780 inst
.instruction
|= LOW4 (dm
);
12781 inst
.instruction
|= HI1 (dm
) << 5;
12782 inst
.instruction
|= neon_quad (rs
) << 6;
12783 inst
.instruction
|= x
<< 17;
12784 inst
.instruction
|= sizebits
<< 16;
12786 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12790 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
12791 struct neon_type_el et
= neon_check_type (2, rs
,
12792 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
12793 /* Duplicate ARM register to lanes of vector. */
12794 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
12797 case 8: inst
.instruction
|= 0x400000; break;
12798 case 16: inst
.instruction
|= 0x000020; break;
12799 case 32: inst
.instruction
|= 0x000000; break;
12802 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
12803 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
12804 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
12805 inst
.instruction
|= neon_quad (rs
) << 21;
12806 /* The encoding for this instruction is identical for the ARM and Thumb
12807 variants, except for the condition field. */
12808 do_vfp_cond_or_thumb ();
12812 /* VMOV has particularly many variations. It can be one of:
12813 0. VMOV<c><q> <Qd>, <Qm>
12814 1. VMOV<c><q> <Dd>, <Dm>
12815 (Register operations, which are VORR with Rm = Rn.)
12816 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12817 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12819 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12820 (ARM register to scalar.)
12821 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12822 (Two ARM registers to vector.)
12823 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12824 (Scalar to ARM register.)
12825 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12826 (Vector to two ARM registers.)
12827 8. VMOV.F32 <Sd>, <Sm>
12828 9. VMOV.F64 <Dd>, <Dm>
12829 (VFP register moves.)
12830 10. VMOV.F32 <Sd>, #imm
12831 11. VMOV.F64 <Dd>, #imm
12832 (VFP float immediate load.)
12833 12. VMOV <Rd>, <Sm>
12834 (VFP single to ARM reg.)
12835 13. VMOV <Sd>, <Rm>
12836 (ARM reg to VFP single.)
12837 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12838 (Two ARM regs to two VFP singles.)
12839 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12840 (Two VFP singles to two ARM regs.)
12842 These cases can be disambiguated using neon_select_shape, except cases 1/9
12843 and 3/11 which depend on the operand type too.
12845 All the encoded bits are hardcoded by this function.
12847 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12848 Cases 5, 7 may be used with VFPv2 and above.
12850 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12851 can specify a type where it doesn't make sense to, and is ignored).
12857 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
12858 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
12860 struct neon_type_el et
;
12861 const char *ldconst
= 0;
12865 case NS_DD
: /* case 1/9. */
12866 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
12867 /* It is not an error here if no type is given. */
12869 if (et
.type
== NT_float
&& et
.size
== 64)
12871 do_vfp_nsyn_opcode ("fcpyd");
12874 /* fall through. */
12876 case NS_QQ
: /* case 0/1. */
12878 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12880 /* The architecture manual I have doesn't explicitly state which
12881 value the U bit should have for register->register moves, but
12882 the equivalent VORR instruction has U = 0, so do that. */
12883 inst
.instruction
= 0x0200110;
12884 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12885 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12886 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12887 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12888 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12889 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12890 inst
.instruction
|= neon_quad (rs
) << 6;
12892 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12896 case NS_DI
: /* case 3/11. */
12897 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
12899 if (et
.type
== NT_float
&& et
.size
== 64)
12901 /* case 11 (fconstd). */
12902 ldconst
= "fconstd";
12903 goto encode_fconstd
;
12905 /* fall through. */
12907 case NS_QI
: /* case 2/3. */
12908 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12910 inst
.instruction
= 0x0800010;
12911 neon_move_immediate ();
12912 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12915 case NS_SR
: /* case 4. */
12917 unsigned bcdebits
= 0;
12918 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
12919 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
12920 int logsize
= neon_logbits (et
.size
);
12921 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
12922 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
12924 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
12926 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
12927 && et
.size
!= 32, _(BAD_FPU
));
12928 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
12929 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
12933 case 8: bcdebits
= 0x8; break;
12934 case 16: bcdebits
= 0x1; break;
12935 case 32: bcdebits
= 0x0; break;
12939 bcdebits
|= x
<< logsize
;
12941 inst
.instruction
= 0xe000b10;
12942 do_vfp_cond_or_thumb ();
12943 inst
.instruction
|= LOW4 (dn
) << 16;
12944 inst
.instruction
|= HI1 (dn
) << 7;
12945 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12946 inst
.instruction
|= (bcdebits
& 3) << 5;
12947 inst
.instruction
|= (bcdebits
>> 2) << 21;
12951 case NS_DRR
: /* case 5 (fmdrr). */
12952 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
12955 inst
.instruction
= 0xc400b10;
12956 do_vfp_cond_or_thumb ();
12957 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
12958 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
12959 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
12960 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12963 case NS_RS
: /* case 6. */
12965 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
12966 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
12967 unsigned logsize
= neon_logbits (et
.size
);
12968 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
12969 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
12970 unsigned abcdebits
= 0;
12972 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
12974 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
12975 && et
.size
!= 32, _(BAD_FPU
));
12976 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
12977 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
12981 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
12982 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
12983 case 32: abcdebits
= 0x00; break;
12987 abcdebits
|= x
<< logsize
;
12988 inst
.instruction
= 0xe100b10;
12989 do_vfp_cond_or_thumb ();
12990 inst
.instruction
|= LOW4 (dn
) << 16;
12991 inst
.instruction
|= HI1 (dn
) << 7;
12992 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12993 inst
.instruction
|= (abcdebits
& 3) << 5;
12994 inst
.instruction
|= (abcdebits
>> 2) << 21;
12998 case NS_RRD
: /* case 7 (fmrrd). */
12999 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
13002 inst
.instruction
= 0xc500b10;
13003 do_vfp_cond_or_thumb ();
13004 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
13005 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13006 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13007 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13010 case NS_FF
: /* case 8 (fcpys). */
13011 do_vfp_nsyn_opcode ("fcpys");
13014 case NS_FI
: /* case 10 (fconsts). */
13015 ldconst
= "fconsts";
13017 if (is_quarter_float (inst
.operands
[1].imm
))
13019 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
13020 do_vfp_nsyn_opcode (ldconst
);
13023 first_error (_("immediate out of range"));
13026 case NS_RF
: /* case 12 (fmrs). */
13027 do_vfp_nsyn_opcode ("fmrs");
13030 case NS_FR
: /* case 13 (fmsr). */
13031 do_vfp_nsyn_opcode ("fmsr");
13034 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13035 (one of which is a list), but we have parsed four. Do some fiddling to
13036 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13038 case NS_RRFF
: /* case 14 (fmrrs). */
13039 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
13040 _("VFP registers must be adjacent"));
13041 inst
.operands
[2].imm
= 2;
13042 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13043 do_vfp_nsyn_opcode ("fmrrs");
13046 case NS_FFRR
: /* case 15 (fmsrr). */
13047 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
13048 _("VFP registers must be adjacent"));
13049 inst
.operands
[1] = inst
.operands
[2];
13050 inst
.operands
[2] = inst
.operands
[3];
13051 inst
.operands
[0].imm
= 2;
13052 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13053 do_vfp_nsyn_opcode ("fmsrr");
13062 do_neon_rshift_round_imm (void)
13064 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13065 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
13066 int imm
= inst
.operands
[2].imm
;
13068 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13071 inst
.operands
[2].present
= 0;
13076 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13077 _("immediate out of range for shift"));
13078 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
13083 do_neon_movl (void)
13085 struct neon_type_el et
= neon_check_type (2, NS_QD
,
13086 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13087 unsigned sizebits
= et
.size
>> 3;
13088 inst
.instruction
|= sizebits
<< 19;
13089 neon_two_same (0, et
.type
== NT_unsigned
, -1);
13095 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13096 struct neon_type_el et
= neon_check_type (2, rs
,
13097 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13098 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13099 neon_two_same (neon_quad (rs
), 1, et
.size
);
13103 do_neon_zip_uzp (void)
13105 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13106 struct neon_type_el et
= neon_check_type (2, rs
,
13107 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13108 if (rs
== NS_DD
&& et
.size
== 32)
13110 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13111 inst
.instruction
= N_MNEM_vtrn
;
13115 neon_two_same (neon_quad (rs
), 1, et
.size
);
13119 do_neon_sat_abs_neg (void)
13121 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13122 struct neon_type_el et
= neon_check_type (2, rs
,
13123 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13124 neon_two_same (neon_quad (rs
), 1, et
.size
);
13128 do_neon_pair_long (void)
13130 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13131 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
13132 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13133 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
13134 neon_two_same (neon_quad (rs
), 1, et
.size
);
13138 do_neon_recip_est (void)
13140 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13141 struct neon_type_el et
= neon_check_type (2, rs
,
13142 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
13143 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13144 neon_two_same (neon_quad (rs
), 1, et
.size
);
13150 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13151 struct neon_type_el et
= neon_check_type (2, rs
,
13152 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13153 neon_two_same (neon_quad (rs
), 1, et
.size
);
13159 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13160 struct neon_type_el et
= neon_check_type (2, rs
,
13161 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
13162 neon_two_same (neon_quad (rs
), 1, et
.size
);
13168 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13169 struct neon_type_el et
= neon_check_type (2, rs
,
13170 N_EQK
| N_INT
, N_8
| N_KEY
);
13171 neon_two_same (neon_quad (rs
), 1, et
.size
);
13177 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13178 neon_two_same (neon_quad (rs
), 1, -1);
13182 do_neon_tbl_tbx (void)
13184 unsigned listlenbits
;
13185 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
13187 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
13189 first_error (_("bad list length for table lookup"));
13193 listlenbits
= inst
.operands
[1].imm
- 1;
13194 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13195 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13196 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13197 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13198 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13199 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13200 inst
.instruction
|= listlenbits
<< 8;
13202 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13206 do_neon_ldm_stm (void)
13208 /* P, U and L bits are part of bitmask. */
13209 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
13210 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
13212 if (inst
.operands
[1].issingle
)
13214 do_vfp_nsyn_ldm_stm (is_dbmode
);
13218 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
13219 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13221 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
13222 _("register list must contain at least 1 and at most 16 "
13225 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
13226 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
13227 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13228 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
13230 inst
.instruction
|= offsetbits
;
13232 do_vfp_cond_or_thumb ();
13236 do_neon_ldr_str (void)
13238 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
13240 if (inst
.operands
[0].issingle
)
13243 do_vfp_nsyn_opcode ("flds");
13245 do_vfp_nsyn_opcode ("fsts");
13250 do_vfp_nsyn_opcode ("fldd");
13252 do_vfp_nsyn_opcode ("fstd");
13256 /* "interleave" version also handles non-interleaving register VLD1/VST1
13260 do_neon_ld_st_interleave (void)
13262 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
13263 N_8
| N_16
| N_32
| N_64
);
13264 unsigned alignbits
= 0;
13266 /* The bits in this table go:
13267 0: register stride of one (0) or two (1)
13268 1,2: register list length, minus one (1, 2, 3, 4).
13269 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13270 We use -1 for invalid entries. */
13271 const int typetable
[] =
13273 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13274 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13275 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13276 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13280 if (et
.type
== NT_invtype
)
13283 if (inst
.operands
[1].immisalign
)
13284 switch (inst
.operands
[1].imm
>> 8)
13286 case 64: alignbits
= 1; break;
13288 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
13289 goto bad_alignment
;
13293 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
13294 goto bad_alignment
;
13299 first_error (_("bad alignment"));
13303 inst
.instruction
|= alignbits
<< 4;
13304 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13306 /* Bits [4:6] of the immediate in a list specifier encode register stride
13307 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13308 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13309 up the right value for "type" in a table based on this value and the given
13310 list style, then stick it back. */
13311 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
13312 | (((inst
.instruction
>> 8) & 3) << 3);
13314 typebits
= typetable
[idx
];
13316 constraint (typebits
== -1, _("bad list type for instruction"));
13318 inst
.instruction
&= ~0xf00;
13319 inst
.instruction
|= typebits
<< 8;
13322 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13323 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13324 otherwise. The variable arguments are a list of pairs of legal (size, align)
13325 values, terminated with -1. */
13328 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
13331 int result
= FAIL
, thissize
, thisalign
;
13333 if (!inst
.operands
[1].immisalign
)
13339 va_start (ap
, do_align
);
13343 thissize
= va_arg (ap
, int);
13344 if (thissize
== -1)
13346 thisalign
= va_arg (ap
, int);
13348 if (size
== thissize
&& align
== thisalign
)
13351 while (result
!= SUCCESS
);
13355 if (result
== SUCCESS
)
13358 first_error (_("unsupported alignment for instruction"));
13364 do_neon_ld_st_lane (void)
13366 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
13367 int align_good
, do_align
= 0;
13368 int logsize
= neon_logbits (et
.size
);
13369 int align
= inst
.operands
[1].imm
>> 8;
13370 int n
= (inst
.instruction
>> 8) & 3;
13371 int max_el
= 64 / et
.size
;
13373 if (et
.type
== NT_invtype
)
13376 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
13377 _("bad list length"));
13378 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
13379 _("scalar index out of range"));
13380 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
13382 _("stride of 2 unavailable when element size is 8"));
13386 case 0: /* VLD1 / VST1. */
13387 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
13389 if (align_good
== FAIL
)
13393 unsigned alignbits
= 0;
13396 case 16: alignbits
= 0x1; break;
13397 case 32: alignbits
= 0x3; break;
13400 inst
.instruction
|= alignbits
<< 4;
13404 case 1: /* VLD2 / VST2. */
13405 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
13407 if (align_good
== FAIL
)
13410 inst
.instruction
|= 1 << 4;
13413 case 2: /* VLD3 / VST3. */
13414 constraint (inst
.operands
[1].immisalign
,
13415 _("can't use alignment with this instruction"));
13418 case 3: /* VLD4 / VST4. */
13419 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
13420 16, 64, 32, 64, 32, 128, -1);
13421 if (align_good
== FAIL
)
13425 unsigned alignbits
= 0;
13428 case 8: alignbits
= 0x1; break;
13429 case 16: alignbits
= 0x1; break;
13430 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
13433 inst
.instruction
|= alignbits
<< 4;
13440 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13441 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13442 inst
.instruction
|= 1 << (4 + logsize
);
13444 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
13445 inst
.instruction
|= logsize
<< 10;
13448 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13451 do_neon_ld_dup (void)
13453 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
13454 int align_good
, do_align
= 0;
13456 if (et
.type
== NT_invtype
)
13459 switch ((inst
.instruction
>> 8) & 3)
13461 case 0: /* VLD1. */
13462 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
13463 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
13464 &do_align
, 16, 16, 32, 32, -1);
13465 if (align_good
== FAIL
)
13467 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
13470 case 2: inst
.instruction
|= 1 << 5; break;
13471 default: first_error (_("bad list length")); return;
13473 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13476 case 1: /* VLD2. */
13477 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
13478 &do_align
, 8, 16, 16, 32, 32, 64, -1);
13479 if (align_good
== FAIL
)
13481 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
13482 _("bad list length"));
13483 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13484 inst
.instruction
|= 1 << 5;
13485 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13488 case 2: /* VLD3. */
13489 constraint (inst
.operands
[1].immisalign
,
13490 _("can't use alignment with this instruction"));
13491 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
13492 _("bad list length"));
13493 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13494 inst
.instruction
|= 1 << 5;
13495 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13498 case 3: /* VLD4. */
13500 int align
= inst
.operands
[1].imm
>> 8;
13501 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
13502 16, 64, 32, 64, 32, 128, -1);
13503 if (align_good
== FAIL
)
13505 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
13506 _("bad list length"));
13507 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
13508 inst
.instruction
|= 1 << 5;
13509 if (et
.size
== 32 && align
== 128)
13510 inst
.instruction
|= 0x3 << 6;
13512 inst
.instruction
|= neon_logbits (et
.size
) << 6;
13519 inst
.instruction
|= do_align
<< 4;
13522 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13523 apart from bits [11:4]. */
13526 do_neon_ldx_stx (void)
13528 switch (NEON_LANE (inst
.operands
[0].imm
))
13530 case NEON_INTERLEAVE_LANES
:
13531 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
13532 do_neon_ld_st_interleave ();
13535 case NEON_ALL_LANES
:
13536 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
13541 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
13542 do_neon_ld_st_lane ();
13545 /* L bit comes from bit mask. */
13546 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13547 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13548 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13550 if (inst
.operands
[1].postind
)
13552 int postreg
= inst
.operands
[1].imm
& 0xf;
13553 constraint (!inst
.operands
[1].immisreg
,
13554 _("post-index must be a register"));
13555 constraint (postreg
== 0xd || postreg
== 0xf,
13556 _("bad register for post-index"));
13557 inst
.instruction
|= postreg
;
13559 else if (inst
.operands
[1].writeback
)
13561 inst
.instruction
|= 0xd;
13564 inst
.instruction
|= 0xf;
13567 inst
.instruction
|= 0xf9000000;
13569 inst
.instruction
|= 0xf4000000;
13573 /* Overall per-instruction processing. */
13575 /* We need to be able to fix up arbitrary expressions in some statements.
13576 This is so that we can handle symbols that are an arbitrary distance from
13577 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13578 which returns part of an address in a form which will be valid for
13579 a data instruction. We do this by pushing the expression into a symbol
13580 in the expr_section, and creating a fix for that. */
13583 fix_new_arm (fragS
* frag
,
13598 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
13602 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
13607 /* Mark whether the fix is to a THUMB instruction, or an ARM
13609 new_fix
->tc_fix_data
= thumb_mode
;
13612 /* Create a frg for an instruction requiring relaxation. */
13614 output_relax_insn (void)
13620 /* The size of the instruction is unknown, so tie the debug info to the
13621 start of the instruction. */
13622 dwarf2_emit_insn (0);
13624 switch (inst
.reloc
.exp
.X_op
)
13627 sym
= inst
.reloc
.exp
.X_add_symbol
;
13628 offset
= inst
.reloc
.exp
.X_add_number
;
13632 offset
= inst
.reloc
.exp
.X_add_number
;
13635 sym
= make_expr_symbol (&inst
.reloc
.exp
);
13639 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
13640 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
13641 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
13644 /* Write a 32-bit thumb instruction to buf. */
13646 put_thumb32_insn (char * buf
, unsigned long insn
)
13648 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
13649 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
13653 output_inst (const char * str
)
13659 as_bad ("%s -- `%s'", inst
.error
, str
);
13663 output_relax_insn();
13666 if (inst
.size
== 0)
13669 to
= frag_more (inst
.size
);
13671 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
13673 assert (inst
.size
== (2 * THUMB_SIZE
));
13674 put_thumb32_insn (to
, inst
.instruction
);
13676 else if (inst
.size
> INSN_SIZE
)
13678 assert (inst
.size
== (2 * INSN_SIZE
));
13679 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
13680 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
13683 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
13685 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
13686 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
13687 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
13690 dwarf2_emit_insn (inst
.size
);
13693 /* Tag values used in struct asm_opcode's tag field. */
13696 OT_unconditional
, /* Instruction cannot be conditionalized.
13697 The ARM condition field is still 0xE. */
13698 OT_unconditionalF
, /* Instruction cannot be conditionalized
13699 and carries 0xF in its ARM condition field. */
13700 OT_csuffix
, /* Instruction takes a conditional suffix. */
13701 OT_csuffixF
, /* Some forms of the instruction take a conditional
13702 suffix, others place 0xF where the condition field
13704 OT_cinfix3
, /* Instruction takes a conditional infix,
13705 beginning at character index 3. (In
13706 unified mode, it becomes a suffix.) */
13707 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
13708 tsts, cmps, cmns, and teqs. */
13709 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
13710 character index 3, even in unified mode. Used for
13711 legacy instructions where suffix and infix forms
13712 may be ambiguous. */
13713 OT_csuf_or_in3
, /* Instruction takes either a conditional
13714 suffix or an infix at character index 3. */
13715 OT_odd_infix_unc
, /* This is the unconditional variant of an
13716 instruction that takes a conditional infix
13717 at an unusual position. In unified mode,
13718 this variant will accept a suffix. */
13719 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
13720 are the conditional variants of instructions that
13721 take conditional infixes in unusual positions.
13722 The infix appears at character index
13723 (tag - OT_odd_infix_0). These are not accepted
13724 in unified mode. */
13727 /* Subroutine of md_assemble, responsible for looking up the primary
13728 opcode from the mnemonic the user wrote. STR points to the
13729 beginning of the mnemonic.
13731 This is not simply a hash table lookup, because of conditional
13732 variants. Most instructions have conditional variants, which are
13733 expressed with a _conditional affix_ to the mnemonic. If we were
13734 to encode each conditional variant as a literal string in the opcode
13735 table, it would have approximately 20,000 entries.
13737 Most mnemonics take this affix as a suffix, and in unified syntax,
13738 'most' is upgraded to 'all'. However, in the divided syntax, some
13739 instructions take the affix as an infix, notably the s-variants of
13740 the arithmetic instructions. Of those instructions, all but six
13741 have the infix appear after the third character of the mnemonic.
13743 Accordingly, the algorithm for looking up primary opcodes given
13746 1. Look up the identifier in the opcode table.
13747 If we find a match, go to step U.
13749 2. Look up the last two characters of the identifier in the
13750 conditions table. If we find a match, look up the first N-2
13751 characters of the identifier in the opcode table. If we
13752 find a match, go to step CE.
13754 3. Look up the fourth and fifth characters of the identifier in
13755 the conditions table. If we find a match, extract those
13756 characters from the identifier, and look up the remaining
13757 characters in the opcode table. If we find a match, go
13762 U. Examine the tag field of the opcode structure, in case this is
13763 one of the six instructions with its conditional infix in an
13764 unusual place. If it is, the tag tells us where to find the
13765 infix; look it up in the conditions table and set inst.cond
13766 accordingly. Otherwise, this is an unconditional instruction.
13767 Again set inst.cond accordingly. Return the opcode structure.
13769 CE. Examine the tag field to make sure this is an instruction that
13770 should receive a conditional suffix. If it is not, fail.
13771 Otherwise, set inst.cond from the suffix we already looked up,
13772 and return the opcode structure.
13774 CM. Examine the tag field to make sure this is an instruction that
13775 should receive a conditional infix after the third character.
13776 If it is not, fail. Otherwise, undo the edits to the current
13777 line of input and proceed as for case CE. */
13779 static const struct asm_opcode
*
13780 opcode_lookup (char **str
)
13784 const struct asm_opcode
*opcode
;
13785 const struct asm_cond
*cond
;
13787 bfd_boolean neon_supported
;
13789 neon_supported
= ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
);
13791 /* Scan up to the end of the mnemonic, which must end in white space,
13792 '.' (in unified mode, or for Neon instructions), or end of string. */
13793 for (base
= end
= *str
; *end
!= '\0'; end
++)
13794 if (*end
== ' ' || ((unified_syntax
|| neon_supported
) && *end
== '.'))
13800 /* Handle a possible width suffix and/or Neon type suffix. */
13805 /* The .w and .n suffixes are only valid if the unified syntax is in
13807 if (unified_syntax
&& end
[1] == 'w')
13809 else if (unified_syntax
&& end
[1] == 'n')
13814 inst
.vectype
.elems
= 0;
13816 *str
= end
+ offset
;
13818 if (end
[offset
] == '.')
13820 /* See if we have a Neon type suffix (possible in either unified or
13821 non-unified ARM syntax mode). */
13822 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
13825 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
13831 /* Look for unaffixed or special-case affixed mnemonic. */
13832 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
13836 if (opcode
->tag
< OT_odd_infix_0
)
13838 inst
.cond
= COND_ALWAYS
;
13842 if (unified_syntax
)
13843 as_warn (_("conditional infixes are deprecated in unified syntax"));
13844 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
13845 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13848 inst
.cond
= cond
->value
;
13852 /* Cannot have a conditional suffix on a mnemonic of less than two
13854 if (end
- base
< 3)
13857 /* Look for suffixed mnemonic. */
13859 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13860 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
13861 if (opcode
&& cond
)
13864 switch (opcode
->tag
)
13866 case OT_cinfix3_legacy
:
13867 /* Ignore conditional suffixes matched on infix only mnemonics. */
13871 case OT_cinfix3_deprecated
:
13872 case OT_odd_infix_unc
:
13873 if (!unified_syntax
)
13875 /* else fall through */
13879 case OT_csuf_or_in3
:
13880 inst
.cond
= cond
->value
;
13883 case OT_unconditional
:
13884 case OT_unconditionalF
:
13887 inst
.cond
= cond
->value
;
13891 /* delayed diagnostic */
13892 inst
.error
= BAD_COND
;
13893 inst
.cond
= COND_ALWAYS
;
13902 /* Cannot have a usual-position infix on a mnemonic of less than
13903 six characters (five would be a suffix). */
13904 if (end
- base
< 6)
13907 /* Look for infixed mnemonic in the usual position. */
13909 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
13913 memcpy (save
, affix
, 2);
13914 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
13915 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
13916 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
13917 memcpy (affix
, save
, 2);
13920 && (opcode
->tag
== OT_cinfix3
13921 || opcode
->tag
== OT_cinfix3_deprecated
13922 || opcode
->tag
== OT_csuf_or_in3
13923 || opcode
->tag
== OT_cinfix3_legacy
))
13927 && (opcode
->tag
== OT_cinfix3
13928 || opcode
->tag
== OT_cinfix3_deprecated
))
13929 as_warn (_("conditional infixes are deprecated in unified syntax"));
13931 inst
.cond
= cond
->value
;
13939 md_assemble (char *str
)
13942 const struct asm_opcode
* opcode
;
13944 /* Align the previous label if needed. */
13945 if (last_label_seen
!= NULL
)
13947 symbol_set_frag (last_label_seen
, frag_now
);
13948 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
13949 S_SET_SEGMENT (last_label_seen
, now_seg
);
13952 memset (&inst
, '\0', sizeof (inst
));
13953 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
13955 opcode
= opcode_lookup (&p
);
13958 /* It wasn't an instruction, but it might be a register alias of
13959 the form alias .req reg, or a Neon .dn/.qn directive. */
13960 if (!create_register_alias (str
, p
)
13961 && !create_neon_reg_alias (str
, p
))
13962 as_bad (_("bad instruction `%s'"), str
);
13967 if (opcode
->tag
== OT_cinfix3_deprecated
)
13968 as_warn (_("s suffix on comparison instruction is deprecated"));
13970 /* The value which unconditional instructions should have in place of the
13971 condition field. */
13972 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
13976 arm_feature_set variant
;
13978 variant
= cpu_variant
;
13979 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13980 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
13981 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
13982 /* Check that this instruction is supported for this CPU. */
13983 if (!opcode
->tvariant
13984 || (thumb_mode
== 1
13985 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
13987 as_bad (_("selected processor does not support `%s'"), str
);
13990 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
13991 && opcode
->tencode
!= do_t_branch
)
13993 as_bad (_("Thumb does not support conditional execution"));
13997 /* Check conditional suffixes. */
13998 if (current_it_mask
)
14001 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
14002 current_it_mask
<<= 1;
14003 current_it_mask
&= 0x1f;
14004 /* The BKPT instruction is unconditional even in an IT block. */
14006 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
14008 as_bad (_("incorrect condition in IT block"));
14012 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
14014 as_bad (_("thumb conditional instrunction not in IT block"));
14018 mapping_state (MAP_THUMB
);
14019 inst
.instruction
= opcode
->tvalue
;
14021 if (!parse_operands (p
, opcode
->operands
))
14022 opcode
->tencode ();
14024 /* Clear current_it_mask at the end of an IT block. */
14025 if (current_it_mask
== 0x10)
14026 current_it_mask
= 0;
14028 if (!(inst
.error
|| inst
.relax
))
14030 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
14031 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
14032 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
14034 as_bad (_("cannot honor width suffix -- `%s'"), str
);
14038 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14039 *opcode
->tvariant
);
14040 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14041 set those bits when Thumb-2 32-bit instructions are seen. ie.
14042 anything other than bl/blx.
14043 This is overly pessimistic for relaxable instructions. */
14044 if ((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
14046 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14049 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
14051 /* Check that this instruction is supported for this CPU. */
14052 if (!opcode
->avariant
||
14053 !ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
))
14055 as_bad (_("selected processor does not support `%s'"), str
);
14060 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
14064 mapping_state (MAP_ARM
);
14065 inst
.instruction
= opcode
->avalue
;
14066 if (opcode
->tag
== OT_unconditionalF
)
14067 inst
.instruction
|= 0xF << 28;
14069 inst
.instruction
|= inst
.cond
<< 28;
14070 inst
.size
= INSN_SIZE
;
14071 if (!parse_operands (p
, opcode
->operands
))
14072 opcode
->aencode ();
14073 /* Arm mode bx is marked as both v4T and v5 because it's still required
14074 on a hypothetical non-thumb v5 core. */
14075 if (ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v4t
)
14076 || ARM_CPU_HAS_FEATURE (*opcode
->avariant
, arm_ext_v5
))
14077 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
14079 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
14080 *opcode
->avariant
);
14084 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14091 /* Various frobbings of labels and their addresses. */
14094 arm_start_line_hook (void)
14096 last_label_seen
= NULL
;
14100 arm_frob_label (symbolS
* sym
)
14102 last_label_seen
= sym
;
14104 ARM_SET_THUMB (sym
, thumb_mode
);
14106 #if defined OBJ_COFF || defined OBJ_ELF
14107 ARM_SET_INTERWORK (sym
, support_interwork
);
14110 /* Note - do not allow local symbols (.Lxxx) to be labeled
14111 as Thumb functions. This is because these labels, whilst
14112 they exist inside Thumb code, are not the entry points for
14113 possible ARM->Thumb calls. Also, these labels can be used
14114 as part of a computed goto or switch statement. eg gcc
14115 can generate code that looks like this:
14117 ldr r2, [pc, .Laaa]
14127 The first instruction loads the address of the jump table.
14128 The second instruction converts a table index into a byte offset.
14129 The third instruction gets the jump address out of the table.
14130 The fourth instruction performs the jump.
14132 If the address stored at .Laaa is that of a symbol which has the
14133 Thumb_Func bit set, then the linker will arrange for this address
14134 to have the bottom bit set, which in turn would mean that the
14135 address computation performed by the third instruction would end
14136 up with the bottom bit set. Since the ARM is capable of unaligned
14137 word loads, the instruction would then load the incorrect address
14138 out of the jump table, and chaos would ensue. */
14139 if (label_is_thumb_function_name
14140 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
14141 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
14143 /* When the address of a Thumb function is taken the bottom
14144 bit of that address should be set. This will allow
14145 interworking between Arm and Thumb functions to work
14148 THUMB_SET_FUNC (sym
, 1);
14150 label_is_thumb_function_name
= FALSE
;
14153 dwarf2_emit_label (sym
);
14157 arm_data_in_code (void)
14159 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
14161 *input_line_pointer
= '/';
14162 input_line_pointer
+= 5;
14163 *input_line_pointer
= 0;
14171 arm_canonicalize_symbol_name (char * name
)
14175 if (thumb_mode
&& (len
= strlen (name
)) > 5
14176 && streq (name
+ len
- 5, "/data"))
14177 *(name
+ len
- 5) = 0;
14182 /* Table of all register names defined by default. The user can
14183 define additional names with .req. Note that all register names
14184 should appear in both upper and lowercase variants. Some registers
14185 also have mixed-case names. */
14187 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14188 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14189 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14190 #define REGSET(p,t) \
14191 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14192 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14193 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14194 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14195 #define REGSETH(p,t) \
14196 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14197 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14198 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14199 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14200 #define REGSET2(p,t) \
14201 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14202 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14203 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14204 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14206 static const struct reg_entry reg_names
[] =
14208 /* ARM integer registers. */
14209 REGSET(r
, RN
), REGSET(R
, RN
),
14211 /* ATPCS synonyms. */
14212 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
14213 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
14214 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
14216 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
14217 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
14218 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
14220 /* Well-known aliases. */
14221 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
14222 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
14224 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
14225 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
14227 /* Coprocessor numbers. */
14228 REGSET(p
, CP
), REGSET(P
, CP
),
14230 /* Coprocessor register numbers. The "cr" variants are for backward
14232 REGSET(c
, CN
), REGSET(C
, CN
),
14233 REGSET(cr
, CN
), REGSET(CR
, CN
),
14235 /* FPA registers. */
14236 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
14237 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
14239 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
14240 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
14242 /* VFP SP registers. */
14243 REGSET(s
,VFS
), REGSET(S
,VFS
),
14244 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
14246 /* VFP DP Registers. */
14247 REGSET(d
,VFD
), REGSET(D
,VFD
),
14248 /* Extra Neon DP registers. */
14249 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
14251 /* Neon QP registers. */
14252 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
14254 /* VFP control registers. */
14255 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
14256 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
14258 /* Maverick DSP coprocessor registers. */
14259 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
14260 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
14262 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
14263 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
14264 REGDEF(dspsc
,0,DSPSC
),
14266 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
14267 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
14268 REGDEF(DSPSC
,0,DSPSC
),
14270 /* iWMMXt data registers - p0, c0-15. */
14271 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
14273 /* iWMMXt control registers - p1, c0-3. */
14274 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
14275 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
14276 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
14277 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
14279 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14280 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
14281 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
14282 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
14283 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
14285 /* XScale accumulator registers. */
14286 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
14292 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14293 within psr_required_here. */
14294 static const struct asm_psr psrs
[] =
14296 /* Backward compatibility notation. Note that "all" is no longer
14297 truly all possible PSR bits. */
14298 {"all", PSR_c
| PSR_f
},
14302 /* Individual flags. */
14307 /* Combinations of flags. */
14308 {"fs", PSR_f
| PSR_s
},
14309 {"fx", PSR_f
| PSR_x
},
14310 {"fc", PSR_f
| PSR_c
},
14311 {"sf", PSR_s
| PSR_f
},
14312 {"sx", PSR_s
| PSR_x
},
14313 {"sc", PSR_s
| PSR_c
},
14314 {"xf", PSR_x
| PSR_f
},
14315 {"xs", PSR_x
| PSR_s
},
14316 {"xc", PSR_x
| PSR_c
},
14317 {"cf", PSR_c
| PSR_f
},
14318 {"cs", PSR_c
| PSR_s
},
14319 {"cx", PSR_c
| PSR_x
},
14320 {"fsx", PSR_f
| PSR_s
| PSR_x
},
14321 {"fsc", PSR_f
| PSR_s
| PSR_c
},
14322 {"fxs", PSR_f
| PSR_x
| PSR_s
},
14323 {"fxc", PSR_f
| PSR_x
| PSR_c
},
14324 {"fcs", PSR_f
| PSR_c
| PSR_s
},
14325 {"fcx", PSR_f
| PSR_c
| PSR_x
},
14326 {"sfx", PSR_s
| PSR_f
| PSR_x
},
14327 {"sfc", PSR_s
| PSR_f
| PSR_c
},
14328 {"sxf", PSR_s
| PSR_x
| PSR_f
},
14329 {"sxc", PSR_s
| PSR_x
| PSR_c
},
14330 {"scf", PSR_s
| PSR_c
| PSR_f
},
14331 {"scx", PSR_s
| PSR_c
| PSR_x
},
14332 {"xfs", PSR_x
| PSR_f
| PSR_s
},
14333 {"xfc", PSR_x
| PSR_f
| PSR_c
},
14334 {"xsf", PSR_x
| PSR_s
| PSR_f
},
14335 {"xsc", PSR_x
| PSR_s
| PSR_c
},
14336 {"xcf", PSR_x
| PSR_c
| PSR_f
},
14337 {"xcs", PSR_x
| PSR_c
| PSR_s
},
14338 {"cfs", PSR_c
| PSR_f
| PSR_s
},
14339 {"cfx", PSR_c
| PSR_f
| PSR_x
},
14340 {"csf", PSR_c
| PSR_s
| PSR_f
},
14341 {"csx", PSR_c
| PSR_s
| PSR_x
},
14342 {"cxf", PSR_c
| PSR_x
| PSR_f
},
14343 {"cxs", PSR_c
| PSR_x
| PSR_s
},
14344 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
14345 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
14346 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
14347 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
14348 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
14349 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
14350 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
14351 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
14352 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
14353 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
14354 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
14355 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
14356 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
14357 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
14358 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
14359 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
14360 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
14361 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
14362 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
14363 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
14364 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
14365 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
14366 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
14367 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
14370 /* Table of V7M psr names. */
14371 static const struct asm_psr v7m_psrs
[] =
14384 {"basepri_max", 18},
14389 /* Table of all shift-in-operand names. */
14390 static const struct asm_shift_name shift_names
[] =
14392 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
14393 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
14394 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
14395 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
14396 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
14397 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
14400 /* Table of all explicit relocation names. */
14402 static struct reloc_entry reloc_names
[] =
14404 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
14405 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
14406 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
14407 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
14408 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
14409 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
14410 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
14411 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
14412 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
14413 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
14414 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
14418 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14419 static const struct asm_cond conds
[] =
14423 {"cs", 0x2}, {"hs", 0x2},
14424 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14438 static struct asm_barrier_opt barrier_opt_names
[] =
14446 /* Table of ARM-format instructions. */
14448 /* Macros for gluing together operand strings. N.B. In all cases
14449 other than OPS0, the trailing OP_stop comes from default
14450 zero-initialization of the unspecified elements of the array. */
14451 #define OPS0() { OP_stop, }
14452 #define OPS1(a) { OP_##a, }
14453 #define OPS2(a,b) { OP_##a,OP_##b, }
14454 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14455 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14456 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14457 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14459 /* These macros abstract out the exact format of the mnemonic table and
14460 save some repeated characters. */
14462 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14463 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14464 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14465 THUMB_VARIANT, do_##ae, do_##te }
14467 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14468 a T_MNEM_xyz enumerator. */
14469 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14470 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14471 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14472 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14474 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14475 infix after the third character. */
14476 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14477 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14478 THUMB_VARIANT, do_##ae, do_##te }
14479 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
14480 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
14481 THUMB_VARIANT, do_##ae, do_##te }
14482 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14483 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14484 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
14485 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
14486 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14487 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14488 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
14489 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14491 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14492 appear in the condition table. */
14493 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14494 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14495 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14497 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14498 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14499 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14500 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14501 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14502 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14503 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14504 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14505 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14506 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14507 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14508 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14509 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14510 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14511 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14512 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14513 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14514 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14515 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14516 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14518 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14519 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14520 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14521 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14523 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14524 field is still 0xE. Many of the Thumb variants can be executed
14525 conditionally, so this is checked separately. */
14526 #define TUE(mnem, op, top, nops, ops, ae, te) \
14527 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14528 THUMB_VARIANT, do_##ae, do_##te }
14530 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14531 condition code field. */
14532 #define TUF(mnem, op, top, nops, ops, ae, te) \
14533 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14534 THUMB_VARIANT, do_##ae, do_##te }
14536 /* ARM-only variants of all the above. */
14537 #define CE(mnem, op, nops, ops, ae) \
14538 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14540 #define C3(mnem, op, nops, ops, ae) \
14541 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14543 /* Legacy mnemonics that always have conditional infix after the third
14545 #define CL(mnem, op, nops, ops, ae) \
14546 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14547 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14549 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14550 #define cCE(mnem, op, nops, ops, ae) \
14551 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14553 /* Legacy coprocessor instructions where conditional infix and conditional
14554 suffix are ambiguous. For consistency this includes all FPA instructions,
14555 not just the potentially ambiguous ones. */
14556 #define cCL(mnem, op, nops, ops, ae) \
14557 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14558 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14560 /* Coprocessor, takes either a suffix or a position-3 infix
14561 (for an FPA corner case). */
14562 #define C3E(mnem, op, nops, ops, ae) \
14563 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14564 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14566 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14567 { #m1 #m2 #m3, OPS##nops ops, \
14568 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14569 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14571 #define CM(m1, m2, op, nops, ops, ae) \
14572 xCM_(m1, , m2, op, nops, ops, ae), \
14573 xCM_(m1, eq, m2, op, nops, ops, ae), \
14574 xCM_(m1, ne, m2, op, nops, ops, ae), \
14575 xCM_(m1, cs, m2, op, nops, ops, ae), \
14576 xCM_(m1, hs, m2, op, nops, ops, ae), \
14577 xCM_(m1, cc, m2, op, nops, ops, ae), \
14578 xCM_(m1, ul, m2, op, nops, ops, ae), \
14579 xCM_(m1, lo, m2, op, nops, ops, ae), \
14580 xCM_(m1, mi, m2, op, nops, ops, ae), \
14581 xCM_(m1, pl, m2, op, nops, ops, ae), \
14582 xCM_(m1, vs, m2, op, nops, ops, ae), \
14583 xCM_(m1, vc, m2, op, nops, ops, ae), \
14584 xCM_(m1, hi, m2, op, nops, ops, ae), \
14585 xCM_(m1, ls, m2, op, nops, ops, ae), \
14586 xCM_(m1, ge, m2, op, nops, ops, ae), \
14587 xCM_(m1, lt, m2, op, nops, ops, ae), \
14588 xCM_(m1, gt, m2, op, nops, ops, ae), \
14589 xCM_(m1, le, m2, op, nops, ops, ae), \
14590 xCM_(m1, al, m2, op, nops, ops, ae)
14592 #define UE(mnem, op, nops, ops, ae) \
14593 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14595 #define UF(mnem, op, nops, ops, ae) \
14596 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14598 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14599 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14600 use the same encoding function for each. */
14601 #define NUF(mnem, op, nops, ops, enc) \
14602 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14603 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14605 /* Neon data processing, version which indirects through neon_enc_tab for
14606 the various overloaded versions of opcodes. */
14607 #define nUF(mnem, op, nops, ops, enc) \
14608 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14609 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14611 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14613 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14614 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14615 THUMB_VARIANT, do_##enc, do_##enc }
14617 #define NCE(mnem, op, nops, ops, enc) \
14618 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14620 #define NCEF(mnem, op, nops, ops, enc) \
14621 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14623 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14624 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14625 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14626 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14628 #define nCE(mnem, op, nops, ops, enc) \
14629 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14631 #define nCEF(mnem, op, nops, ops, enc) \
14632 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14636 /* Thumb-only, unconditional. */
14637 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14639 static const struct asm_opcode insns
[] =
14641 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14642 #define THUMB_VARIANT &arm_ext_v4t
14643 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14644 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14645 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14646 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14647 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14648 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
14649 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14650 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
14651 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14652 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14653 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14654 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14655 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14656 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
14657 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14658 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
14660 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14661 for setting PSR flag bits. They are obsolete in V6 and do not
14662 have Thumb equivalents. */
14663 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14664 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14665 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
14666 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14667 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
14668 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
14669 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14670 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14671 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
14673 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14674 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
14675 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14676 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
14678 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14679 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14680 tCE(str
, 4000000, str
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14681 tC3(strb
, 4400000, strb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
14683 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14684 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14685 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14686 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14687 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14688 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14690 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14691 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
14692 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
14693 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
14696 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
14697 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
14698 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
14700 /* Thumb-compatibility pseudo ops. */
14701 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14702 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14703 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14704 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14705 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14706 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14707 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14708 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
14709 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
14710 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
14711 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
14712 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
14714 #undef THUMB_VARIANT
14715 #define THUMB_VARIANT &arm_ext_v6
14716 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
14718 /* V1 instructions with no Thumb analogue prior to V6T2. */
14719 #undef THUMB_VARIANT
14720 #define THUMB_VARIANT &arm_ext_v6t2
14721 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14722 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
14723 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14724 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
14725 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
14727 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14728 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14729 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14730 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
14732 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14733 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14735 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14736 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
14738 /* V1 instructions with no Thumb analogue at all. */
14739 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
14740 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
14742 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14743 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
14744 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14745 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
14746 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14747 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
14748 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14749 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
14752 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14753 #undef THUMB_VARIANT
14754 #define THUMB_VARIANT &arm_ext_v4t
14755 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14756 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
14758 #undef THUMB_VARIANT
14759 #define THUMB_VARIANT &arm_ext_v6t2
14760 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
14761 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
14763 /* Generic coprocessor instructions. */
14764 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
14765 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14766 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14767 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14768 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14769 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14770 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14773 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14774 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
14775 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
14778 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14779 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
14780 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
14783 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14784 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14785 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14786 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14787 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14788 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14789 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14790 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
14791 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
14794 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14795 #undef THUMB_VARIANT
14796 #define THUMB_VARIANT &arm_ext_v4t
14797 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14798 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14799 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14800 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14801 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14802 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
14805 #define ARM_VARIANT &arm_ext_v4t_5
14806 /* ARM Architecture 4T. */
14807 /* Note: bx (and blx) are required on V5, even if the processor does
14808 not support Thumb. */
14809 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
14812 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14813 #undef THUMB_VARIANT
14814 #define THUMB_VARIANT &arm_ext_v5t
14815 /* Note: blx has 2 variants; the .value coded here is for
14816 BLX(2). Only this variant has conditional execution. */
14817 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
14818 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
14820 #undef THUMB_VARIANT
14821 #define THUMB_VARIANT &arm_ext_v6t2
14822 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
14823 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14824 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14825 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14826 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
14827 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
14828 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14829 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
14832 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14833 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14834 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14835 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14836 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14838 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14839 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
14841 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14842 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14843 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14844 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
14846 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14847 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14848 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14849 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14851 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14852 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14854 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14855 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14856 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14857 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, rd_rm_rn
),
14860 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14861 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
14862 TC3(ldrd
, 00000d0
, e9500000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
14863 TC3(strd
, 00000f0
, e9400000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
14865 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14866 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14869 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14870 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
14873 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14874 #undef THUMB_VARIANT
14875 #define THUMB_VARIANT &arm_ext_v6
14876 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
14877 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
14878 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14879 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14880 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
14881 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14882 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14883 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14884 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14885 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
14887 #undef THUMB_VARIANT
14888 #define THUMB_VARIANT &arm_ext_v6t2
14889 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
14890 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14891 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
14893 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
14894 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
14896 /* ARM V6 not included in V7M (eg. integer SIMD). */
14897 #undef THUMB_VARIANT
14898 #define THUMB_VARIANT &arm_ext_v6_notm
14899 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
14900 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
14901 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
14902 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14903 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14904 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14905 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14906 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14907 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14908 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14909 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14910 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14911 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14912 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14913 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14914 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14915 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14916 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14917 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14918 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14919 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14920 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14921 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14922 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14923 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14924 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14925 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14926 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14927 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14928 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14929 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14930 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14931 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14932 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14933 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14934 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14935 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14936 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14937 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14938 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
14939 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
14940 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
14941 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
14942 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
14943 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
14944 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
14945 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
14946 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14947 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14948 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14949 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14950 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14951 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14952 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
14953 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
14954 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
14955 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14956 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14957 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14958 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14959 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14960 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14961 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14962 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
14963 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14964 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14965 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14966 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14967 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14968 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14969 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14970 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14971 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14972 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14973 TUF(srsia
, 8cd0500
, e980c000
, 1, (I31w
), srs
, srs
),
14974 UF(srsib
, 9cd0500
, 1, (I31w
), srs
),
14975 UF(srsda
, 84d0500
, 1, (I31w
), srs
),
14976 TUF(srsdb
, 94d0500
, e800c000
, 1, (I31w
), srs
, srs
),
14977 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
14978 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
14979 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
14980 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
14981 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
14982 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
14985 #define ARM_VARIANT &arm_ext_v6k
14986 #undef THUMB_VARIANT
14987 #define THUMB_VARIANT &arm_ext_v6k
14988 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
14989 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
14990 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
14991 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
14993 #undef THUMB_VARIANT
14994 #define THUMB_VARIANT &arm_ext_v6_notm
14995 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
14996 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
14998 #undef THUMB_VARIANT
14999 #define THUMB_VARIANT &arm_ext_v6t2
15000 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
15001 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
15002 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
15003 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
15004 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
15007 #define ARM_VARIANT &arm_ext_v6z
15008 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
15011 #define ARM_VARIANT &arm_ext_v6t2
15012 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
15013 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
15014 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
15015 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
15017 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
15018 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
15019 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
15020 TCE(rbit
, 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
15022 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15023 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15024 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15025 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15027 UT(cbnz
, b900
, 2, (RR
, EXP
), t_cbz
),
15028 UT(cbz
, b100
, 2, (RR
, EXP
), t_cbz
),
15029 /* ARM does not really have an IT instruction, so always allow it. */
15031 #define ARM_VARIANT &arm_ext_v1
15032 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
15033 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
15034 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
15035 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
15036 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
15037 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
15038 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
15039 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
15040 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
15041 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
15042 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
15043 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
15044 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
15045 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
15046 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
15048 /* Thumb2 only instructions. */
15050 #define ARM_VARIANT NULL
15052 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15053 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15054 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
15055 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
15057 /* Thumb-2 hardware division instructions (R and M profiles only). */
15058 #undef THUMB_VARIANT
15059 #define THUMB_VARIANT &arm_ext_div
15060 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15061 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15063 /* ARM V7 instructions. */
15065 #define ARM_VARIANT &arm_ext_v7
15066 #undef THUMB_VARIANT
15067 #define THUMB_VARIANT &arm_ext_v7
15068 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
15069 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
15070 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
15071 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
15072 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
15075 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15076 cCE(wfs
, e200110
, 1, (RR
), rd
),
15077 cCE(rfs
, e300110
, 1, (RR
), rd
),
15078 cCE(wfc
, e400110
, 1, (RR
), rd
),
15079 cCE(rfc
, e500110
, 1, (RR
), rd
),
15081 cCL(ldfs
, c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15082 cCL(ldfd
, c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15083 cCL(ldfe
, c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15084 cCL(ldfp
, c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15086 cCL(stfs
, c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15087 cCL(stfd
, c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15088 cCL(stfe
, c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15089 cCL(stfp
, c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15091 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
15092 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
15093 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
15094 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
15095 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
15096 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
15097 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
15098 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
15099 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
15100 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
15101 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
15102 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
15104 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
15105 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
15106 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
15107 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
15108 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
15109 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
15110 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
15111 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
15112 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
15113 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
15114 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
15115 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
15117 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
15118 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
15119 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
15120 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
15121 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
15122 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
15123 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
15124 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
15125 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
15126 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
15127 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
15128 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
15130 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
15131 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
15132 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
15133 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
15134 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
15135 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
15136 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
15137 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
15138 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
15139 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
15140 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
15141 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
15143 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
15144 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
15145 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
15146 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
15147 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
15148 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
15149 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
15150 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
15151 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
15152 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
15153 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
15154 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
15156 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
15157 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
15158 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
15159 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
15160 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
15161 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
15162 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
15163 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
15164 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
15165 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
15166 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
15167 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
15169 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
15170 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
15171 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
15172 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
15173 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
15174 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
15175 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
15176 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
15177 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
15178 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
15179 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
15180 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
15182 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
15183 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
15184 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
15185 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
15186 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
15187 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
15188 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
15189 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
15190 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
15191 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
15192 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
15193 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
15195 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
15196 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
15197 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
15198 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
15199 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
15200 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
15201 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
15202 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
15203 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
15204 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
15205 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
15206 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
15208 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
15209 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
15210 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
15211 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
15212 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
15213 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
15214 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
15215 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
15216 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
15217 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
15218 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
15219 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
15221 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
15222 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
15223 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
15224 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
15225 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
15226 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
15227 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
15228 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
15229 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
15230 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
15231 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
15232 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
15234 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
15235 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
15236 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
15237 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
15238 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
15239 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
15240 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
15241 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
15242 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
15243 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
15244 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
15245 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
15247 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
15248 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
15249 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
15250 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
15251 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
15252 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
15253 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
15254 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
15255 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
15256 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
15257 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
15258 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
15260 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
15261 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
15262 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
15263 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
15264 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
15265 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
15266 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
15267 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
15268 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
15269 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
15270 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
15271 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
15273 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
15274 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
15275 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
15276 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
15277 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
15278 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
15279 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
15280 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
15281 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
15282 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
15283 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
15284 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
15286 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
15287 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
15288 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
15289 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
15290 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
15291 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
15292 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
15293 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
15294 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
15295 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
15296 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
15297 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
15299 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15300 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15301 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15302 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15303 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15304 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15305 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15306 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15307 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15308 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15309 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15310 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15312 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15313 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15314 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15315 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15316 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15317 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15318 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15319 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15320 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15321 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15322 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15323 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15325 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15326 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15327 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15328 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15329 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15330 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15331 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15332 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15333 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15334 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15335 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15336 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15338 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15339 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15340 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15341 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15342 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15343 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15344 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15345 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15346 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15347 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15348 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15349 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15351 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15352 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15353 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15354 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15355 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15356 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15357 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15358 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15359 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15360 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15361 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15362 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15364 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15365 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15366 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15367 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15368 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15369 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15370 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15371 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15372 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15373 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15374 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15375 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15377 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15378 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15379 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15380 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15381 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15382 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15383 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15384 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15385 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15386 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15387 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15388 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15390 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15391 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15392 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15393 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15394 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15395 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15396 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15397 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15398 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15399 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15400 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15401 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15403 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15404 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15405 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15406 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15407 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15408 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15409 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15410 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15411 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15412 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15413 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15414 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15416 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15417 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15418 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15419 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15420 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15421 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15422 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15423 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15424 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15425 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15426 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15427 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15429 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15430 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15431 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15432 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15433 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15434 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15435 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15436 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15437 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15438 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15439 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15440 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15442 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15443 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15444 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15445 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15446 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15447 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15448 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15449 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15450 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15451 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15452 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15453 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15455 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15456 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15457 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15458 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15459 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15460 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15461 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15462 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15463 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15464 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15465 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15466 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
15468 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15469 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15470 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15471 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
15473 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
15474 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
15475 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
15476 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
15477 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
15478 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
15479 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
15480 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
15481 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
15482 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
15483 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
15484 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
15486 /* The implementation of the FIX instruction is broken on some
15487 assemblers, in that it accepts a precision specifier as well as a
15488 rounding specifier, despite the fact that this is meaningless.
15489 To be more compatible, we accept it as well, though of course it
15490 does not set any bits. */
15491 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
15492 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
15493 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
15494 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
15495 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
15496 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
15497 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
15498 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
15499 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
15500 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
15501 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
15502 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
15503 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
15505 /* Instructions that were new with the real FPA, call them V2. */
15507 #define ARM_VARIANT &fpu_fpa_ext_v2
15508 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15509 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15510 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15511 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15512 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15513 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
15516 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15517 /* Moves and type conversions. */
15518 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15519 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
15520 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
15521 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
15522 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15523 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15524 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15525 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15526 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15527 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15528 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
15529 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
15531 /* Memory operations. */
15532 cCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
15533 cCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
15534 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15535 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15536 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15537 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15538 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15539 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15540 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15541 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15542 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15543 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
15544 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15545 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
15546 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15547 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
15548 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15549 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
15551 /* Monadic operations. */
15552 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15553 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15554 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15556 /* Dyadic operations. */
15557 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15558 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15559 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15560 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15561 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15562 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15563 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15564 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15565 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
15568 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15569 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
15570 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
15571 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
15574 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15575 /* Moves and type conversions. */
15576 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15577 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15578 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15579 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15580 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
15581 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15582 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
15583 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15584 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
15585 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15586 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15587 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15588 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
15590 /* Memory operations. */
15591 cCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15592 cCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
15593 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15594 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15595 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15596 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15597 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15598 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
15599 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15600 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
15602 /* Monadic operations. */
15603 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15604 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15605 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15607 /* Dyadic operations. */
15608 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15609 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15610 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15611 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15612 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15613 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15614 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15615 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15616 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
15619 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15620 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
15621 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
15622 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
15625 #define ARM_VARIANT &fpu_vfp_ext_v2
15626 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
15627 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
15628 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
15629 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
15631 /* Instructions which may belong to either the Neon or VFP instruction sets.
15632 Individual encoder functions perform additional architecture checks. */
15634 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15635 #undef THUMB_VARIANT
15636 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15637 /* These mnemonics are unique to VFP. */
15638 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
15639 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
15640 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15641 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15642 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
15643 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15644 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
15645 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
15646 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
15647 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
15649 /* Mnemonics shared by Neon and VFP. */
15650 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
15651 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15652 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
15654 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15655 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
15657 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15658 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
15660 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15661 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15662 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15663 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15664 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15665 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
15666 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15667 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
15669 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
15671 /* NOTE: All VMOV encoding is special-cased! */
15672 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
15673 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
15675 #undef THUMB_VARIANT
15676 #define THUMB_VARIANT &fpu_neon_ext_v1
15678 #define ARM_VARIANT &fpu_neon_ext_v1
15679 /* Data processing with three registers of the same length. */
15680 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15681 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
15682 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
15683 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15684 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15685 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15686 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15687 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
15688 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
15689 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15690 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15691 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15692 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
15693 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
15694 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
15695 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
15696 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
15697 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
15698 /* If not immediate, fall back to neon_dyadic_i64_su.
15699 shl_imm should accept I8 I16 I32 I64,
15700 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15701 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
15702 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
15703 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
15704 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
15705 /* Logic ops, types optional & ignored. */
15706 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
15707 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
15708 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
15709 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
15710 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
15711 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
15712 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
15713 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
15714 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
15715 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
15716 /* Bitfield ops, untyped. */
15717 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15718 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15719 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15720 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15721 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
15722 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
15723 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15724 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15725 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15726 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15727 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15728 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
15729 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
15730 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15731 back to neon_dyadic_if_su. */
15732 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15733 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15734 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
15735 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
15736 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15737 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15738 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
15739 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
15740 /* Comparison. Type I8 I16 I32 F32. */
15741 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
15742 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
15743 /* As above, D registers only. */
15744 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15745 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
15746 /* Int and float variants, signedness unimportant. */
15747 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15748 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
15749 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
15750 /* Add/sub take types I8 I16 I32 I64 F32. */
15751 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15752 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
15753 /* vtst takes sizes 8, 16, 32. */
15754 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
15755 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
15756 /* VMUL takes I8 I16 I32 F32 P8. */
15757 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
15758 /* VQD{R}MULH takes S16 S32. */
15759 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15760 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15761 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
15762 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
15763 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
15764 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
15765 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
15766 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
15767 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
15768 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
15769 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
15770 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
15771 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
15772 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
15773 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
15774 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
15776 /* Two address, int/float. Types S8 S16 S32 F32. */
15777 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
15778 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
15780 /* Data processing with two registers and a shift amount. */
15781 /* Right shifts, and variants with rounding.
15782 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15783 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
15784 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
15785 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
15786 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
15787 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
15788 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
15789 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
15790 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
15791 /* Shift and insert. Sizes accepted 8 16 32 64. */
15792 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
15793 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
15794 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
15795 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
15796 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15797 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
15798 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
15799 /* Right shift immediate, saturating & narrowing, with rounding variants.
15800 Types accepted S16 S32 S64 U16 U32 U64. */
15801 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
15802 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
15803 /* As above, unsigned. Types accepted S16 S32 S64. */
15804 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
15805 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
15806 /* Right shift narrowing. Types accepted I16 I32 I64. */
15807 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
15808 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
15809 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15810 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
15811 /* CVT with optional immediate for fixed-point variant. */
15812 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
15814 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
15815 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
15817 /* Data processing, three registers of different lengths. */
15818 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15819 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
15820 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15821 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15822 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
15823 /* If not scalar, fall back to neon_dyadic_long.
15824 Vector types as above, scalar types S16 S32 U16 U32. */
15825 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
15826 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
15827 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15828 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
15829 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
15830 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15831 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15832 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15833 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15834 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
15835 /* Saturating doubling multiplies. Types S16 S32. */
15836 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15837 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15838 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
15839 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15840 S16 S32 U16 U32. */
15841 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
15843 /* Extract. Size 8. */
15844 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I7
), neon_ext
),
15845 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I7
), neon_ext
),
15847 /* Two registers, miscellaneous. */
15848 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15849 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
15850 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
15851 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
15852 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
15853 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
15854 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
15855 /* Vector replicate. Sizes 8 16 32. */
15856 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
15857 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
15858 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15859 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
15860 /* VMOVN. Types I16 I32 I64. */
15861 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
15862 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15863 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
15864 /* VQMOVUN. Types S16 S32 S64. */
15865 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
15866 /* VZIP / VUZP. Sizes 8 16 32. */
15867 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
15868 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
15869 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
15870 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
15871 /* VQABS / VQNEG. Types S8 S16 S32. */
15872 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
15873 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
15874 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
15875 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
15876 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15877 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
15878 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
15879 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
15880 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
15881 /* Reciprocal estimates. Types U32 F32. */
15882 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
15883 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
15884 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
15885 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
15886 /* VCLS. Types S8 S16 S32. */
15887 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
15888 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
15889 /* VCLZ. Types I8 I16 I32. */
15890 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
15891 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
15892 /* VCNT. Size 8. */
15893 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
15894 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
15895 /* Two address, untyped. */
15896 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
15897 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
15898 /* VTRN. Sizes 8 16 32. */
15899 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
15900 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
15902 /* Table lookup. Size 8. */
15903 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
15904 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
15906 #undef THUMB_VARIANT
15907 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15909 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15910 /* Neon element/structure load/store. */
15911 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15912 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15913 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15914 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15915 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15916 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15917 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15918 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
15920 #undef THUMB_VARIANT
15921 #define THUMB_VARIANT &fpu_vfp_ext_v3
15923 #define ARM_VARIANT &fpu_vfp_ext_v3
15924 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
15925 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
15926 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15927 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15928 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15929 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15930 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15931 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15932 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15933 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15934 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15935 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15936 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15937 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15938 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
15939 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
15940 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
15941 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
15943 #undef THUMB_VARIANT
15945 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15946 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15947 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15948 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15949 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15950 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15951 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
15952 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
15953 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
15956 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15957 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
15958 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
15959 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
15960 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
15961 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
15962 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
15963 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
15964 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
15965 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
15966 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15967 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15968 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15969 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15970 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15971 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
15972 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15973 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15974 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
15975 cCE(tmcr
, e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
15976 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
15977 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15978 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15979 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15980 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15981 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15982 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
15983 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
15984 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
15985 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
15986 cCE(tmrc
, e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
15987 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
15988 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
15989 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
15990 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
15991 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15992 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15993 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
15994 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15995 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15996 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15997 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15998 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
15999 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16000 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16001 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16002 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16003 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
16004 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16005 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16006 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16007 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16008 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16009 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16010 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16011 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16012 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16013 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16014 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16015 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16016 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16017 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16018 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16019 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16020 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16021 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16022 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16023 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16024 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16025 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16026 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16027 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16028 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16029 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16030 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16031 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16032 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16033 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16034 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16035 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16036 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16037 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16038 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16039 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16040 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16041 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16042 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16043 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16044 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16045 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
16046 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16047 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16048 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16049 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16050 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16051 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16052 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16053 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16054 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16055 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16056 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16057 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16058 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16059 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16060 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16061 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16062 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16063 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16064 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16065 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16066 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16067 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
16068 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16069 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16070 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16071 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16072 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16073 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16074 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16075 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16076 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16077 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16078 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16079 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16080 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16081 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16082 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16083 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16084 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16085 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16086 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16087 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16088 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16089 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16090 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16091 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16092 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16093 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16094 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16095 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16096 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16097 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16098 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16099 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16100 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16101 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16102 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16103 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16104 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16105 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16106 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16107 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16108 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16109 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16110 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16111 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16112 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16113 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16114 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16115 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16116 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16117 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16118 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
16121 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16122 cCE(torvscb
, e13f190
, 1, (RR
), iwmmxt_tandorc
),
16123 cCE(torvsch
, e53f190
, 1, (RR
), iwmmxt_tandorc
),
16124 cCE(torvscw
, e93f190
, 1, (RR
), iwmmxt_tandorc
),
16125 cCE(wabsb
, e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16126 cCE(wabsh
, e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16127 cCE(wabsw
, ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16128 cCE(wabsdiffb
, e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16129 cCE(wabsdiffh
, e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16130 cCE(wabsdiffw
, e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16131 cCE(waddbhusl
, e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16132 cCE(waddbhusm
, e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16133 cCE(waddhc
, e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16134 cCE(waddwc
, ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16135 cCE(waddsubhx
, ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16136 cCE(wavg4
, e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16137 cCE(wavg4r
, e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16138 cCE(wmaddsn
, ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16139 cCE(wmaddsx
, eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16140 cCE(wmaddun
, ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16141 cCE(wmaddux
, e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16142 cCE(wmerge
, e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
16143 cCE(wmiabb
, e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16144 cCE(wmiabt
, e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16145 cCE(wmiatb
, e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16146 cCE(wmiatt
, e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16147 cCE(wmiabbn
, e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16148 cCE(wmiabtn
, e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16149 cCE(wmiatbn
, e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16150 cCE(wmiattn
, e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16151 cCE(wmiawbb
, e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16152 cCE(wmiawbt
, e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16153 cCE(wmiawtb
, ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16154 cCE(wmiawtt
, eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16155 cCE(wmiawbbn
, ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16156 cCE(wmiawbtn
, ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16157 cCE(wmiawtbn
, ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16158 cCE(wmiawttn
, ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16159 cCE(wmulsmr
, ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16160 cCE(wmulumr
, ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16161 cCE(wmulwumr
, ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16162 cCE(wmulwsmr
, ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16163 cCE(wmulwum
, ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16164 cCE(wmulwsm
, ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16165 cCE(wmulwl
, eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16166 cCE(wqmiabb
, e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16167 cCE(wqmiabt
, e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16168 cCE(wqmiatb
, ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16169 cCE(wqmiatt
, eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16170 cCE(wqmiabbn
, ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16171 cCE(wqmiabtn
, ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16172 cCE(wqmiatbn
, ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16173 cCE(wqmiattn
, ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16174 cCE(wqmulm
, e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16175 cCE(wqmulmr
, e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16176 cCE(wqmulwm
, ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16177 cCE(wqmulwmr
, ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16178 cCE(wsubaddhx
, ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16181 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16182 cCE(cfldrs
, c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
16183 cCE(cfldrd
, c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
16184 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
16185 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
16186 cCE(cfstrs
, c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
16187 cCE(cfstrd
, c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
16188 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
16189 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
16190 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
16191 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
16192 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
16193 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
16194 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
16195 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
16196 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
16197 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
16198 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
16199 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
16200 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
16201 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
16202 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
16203 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
16204 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
16205 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
16206 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
16207 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
16208 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
16209 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
16210 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
16211 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
16212 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
16213 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
16214 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
16215 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
16216 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
16217 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
16218 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
16219 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
16220 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
16221 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
16222 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
16223 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
16224 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
16225 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
16226 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
16227 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
16228 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
16229 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
16230 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
16231 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
16232 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
16233 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
16234 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
16235 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
16236 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16237 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16238 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16239 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16240 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
16241 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
16242 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
16243 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
16244 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
16245 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
16246 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16247 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16248 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16249 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16250 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16251 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
16252 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16253 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
16254 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
16255 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
16256 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
16257 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
16260 #undef THUMB_VARIANT
16287 /* MD interface: bits in the object file. */
16289 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16290 for use in the a.out file, and stores them in the array pointed to by buf.
16291 This knows about the endian-ness of the target machine and does
16292 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16293 2 (short) and 4 (long) Floating numbers are put out as a series of
16294 LITTLENUMS (shorts, here at least). */
16297 md_number_to_chars (char * buf
, valueT val
, int n
)
16299 if (target_big_endian
)
16300 number_to_chars_bigendian (buf
, val
, n
);
16302 number_to_chars_littleendian (buf
, val
, n
);
16306 md_chars_to_number (char * buf
, int n
)
16309 unsigned char * where
= (unsigned char *) buf
;
16311 if (target_big_endian
)
16316 result
|= (*where
++ & 255);
16324 result
|= (where
[n
] & 255);
16331 /* MD interface: Sections. */
16333 /* Estimate the size of a frag before relaxing. Assume everything fits in
16337 md_estimate_size_before_relax (fragS
* fragp
,
16338 segT segtype ATTRIBUTE_UNUSED
)
16344 /* Convert a machine dependent frag. */
16347 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
16349 unsigned long insn
;
16350 unsigned long old_op
;
16358 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16360 old_op
= bfd_get_16(abfd
, buf
);
16361 if (fragp
->fr_symbol
) {
16362 exp
.X_op
= O_symbol
;
16363 exp
.X_add_symbol
= fragp
->fr_symbol
;
16365 exp
.X_op
= O_constant
;
16367 exp
.X_add_number
= fragp
->fr_offset
;
16368 opcode
= fragp
->fr_subtype
;
16371 case T_MNEM_ldr_pc
:
16372 case T_MNEM_ldr_pc2
:
16373 case T_MNEM_ldr_sp
:
16374 case T_MNEM_str_sp
:
16381 if (fragp
->fr_var
== 4)
16383 insn
= THUMB_OP32(opcode
);
16384 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
16386 insn
|= (old_op
& 0x700) << 4;
16390 insn
|= (old_op
& 7) << 12;
16391 insn
|= (old_op
& 0x38) << 13;
16393 insn
|= 0x00000c00;
16394 put_thumb32_insn (buf
, insn
);
16395 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
16399 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
16401 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
16404 if (fragp
->fr_var
== 4)
16406 insn
= THUMB_OP32 (opcode
);
16407 insn
|= (old_op
& 0xf0) << 4;
16408 put_thumb32_insn (buf
, insn
);
16409 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
16413 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16414 exp
.X_add_number
-= 4;
16422 if (fragp
->fr_var
== 4)
16424 int r0off
= (opcode
== T_MNEM_mov
16425 || opcode
== T_MNEM_movs
) ? 0 : 8;
16426 insn
= THUMB_OP32 (opcode
);
16427 insn
= (insn
& 0xe1ffffff) | 0x10000000;
16428 insn
|= (old_op
& 0x700) << r0off
;
16429 put_thumb32_insn (buf
, insn
);
16430 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
16434 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
16439 if (fragp
->fr_var
== 4)
16441 insn
= THUMB_OP32(opcode
);
16442 put_thumb32_insn (buf
, insn
);
16443 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
16446 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
16450 if (fragp
->fr_var
== 4)
16452 insn
= THUMB_OP32(opcode
);
16453 insn
|= (old_op
& 0xf00) << 14;
16454 put_thumb32_insn (buf
, insn
);
16455 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
16458 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
16461 case T_MNEM_add_sp
:
16462 case T_MNEM_add_pc
:
16463 case T_MNEM_inc_sp
:
16464 case T_MNEM_dec_sp
:
16465 if (fragp
->fr_var
== 4)
16467 /* ??? Choose between add and addw. */
16468 insn
= THUMB_OP32 (opcode
);
16469 insn
|= (old_op
& 0xf0) << 4;
16470 put_thumb32_insn (buf
, insn
);
16471 if (opcode
== T_MNEM_add_pc
)
16472 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
16474 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
16477 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16485 if (fragp
->fr_var
== 4)
16487 insn
= THUMB_OP32 (opcode
);
16488 insn
|= (old_op
& 0xf0) << 4;
16489 insn
|= (old_op
& 0xf) << 16;
16490 put_thumb32_insn (buf
, insn
);
16491 if (insn
& (1 << 20))
16492 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
16494 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
16497 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
16503 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
16505 fixp
->fx_file
= fragp
->fr_file
;
16506 fixp
->fx_line
= fragp
->fr_line
;
16507 fragp
->fr_fix
+= fragp
->fr_var
;
16510 /* Return the size of a relaxable immediate operand instruction.
16511 SHIFT and SIZE specify the form of the allowable immediate. */
16513 relax_immediate (fragS
*fragp
, int size
, int shift
)
16519 /* ??? Should be able to do better than this. */
16520 if (fragp
->fr_symbol
)
16523 low
= (1 << shift
) - 1;
16524 mask
= (1 << (shift
+ size
)) - (1 << shift
);
16525 offset
= fragp
->fr_offset
;
16526 /* Force misaligned offsets to 32-bit variant. */
16529 if (offset
& ~mask
)
16534 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16537 relax_adr (fragS
*fragp
, asection
*sec
)
16542 /* Assume worst case for symbols not known to be in the same section. */
16543 if (!S_IS_DEFINED(fragp
->fr_symbol
)
16544 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
16547 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
16548 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
16549 addr
= (addr
+ 4) & ~3;
16550 /* Fix the insn as the 4-byte version if the target address is not
16551 sufficiently aligned. This is prevents an infinite loop when two
16552 instructions have contradictory range/alignment requirements. */
16556 if (val
< 0 || val
> 1020)
16561 /* Return the size of a relaxable add/sub immediate instruction. */
16563 relax_addsub (fragS
*fragp
, asection
*sec
)
16568 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
16569 op
= bfd_get_16(sec
->owner
, buf
);
16570 if ((op
& 0xf) == ((op
>> 4) & 0xf))
16571 return relax_immediate (fragp
, 8, 0);
16573 return relax_immediate (fragp
, 3, 0);
16577 /* Return the size of a relaxable branch instruction. BITS is the
16578 size of the offset field in the narrow instruction. */
16581 relax_branch (fragS
*fragp
, asection
*sec
, int bits
)
16587 /* Assume worst case for symbols not known to be in the same section. */
16588 if (!S_IS_DEFINED(fragp
->fr_symbol
)
16589 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
16592 val
= S_GET_VALUE(fragp
->fr_symbol
) + fragp
->fr_offset
;
16593 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
16596 /* Offset is a signed value *2 */
16598 if (val
>= limit
|| val
< -limit
)
16604 /* Relax a machine dependent frag. This returns the amount by which
16605 the current size of the frag should change. */
16608 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch ATTRIBUTE_UNUSED
)
16613 oldsize
= fragp
->fr_var
;
16614 switch (fragp
->fr_subtype
)
16616 case T_MNEM_ldr_pc2
:
16617 newsize
= relax_adr(fragp
, sec
);
16619 case T_MNEM_ldr_pc
:
16620 case T_MNEM_ldr_sp
:
16621 case T_MNEM_str_sp
:
16622 newsize
= relax_immediate(fragp
, 8, 2);
16626 newsize
= relax_immediate(fragp
, 5, 2);
16630 newsize
= relax_immediate(fragp
, 5, 1);
16634 newsize
= relax_immediate(fragp
, 5, 0);
16637 newsize
= relax_adr(fragp
, sec
);
16643 newsize
= relax_immediate(fragp
, 8, 0);
16646 newsize
= relax_branch(fragp
, sec
, 11);
16649 newsize
= relax_branch(fragp
, sec
, 8);
16651 case T_MNEM_add_sp
:
16652 case T_MNEM_add_pc
:
16653 newsize
= relax_immediate (fragp
, 8, 2);
16655 case T_MNEM_inc_sp
:
16656 case T_MNEM_dec_sp
:
16657 newsize
= relax_immediate (fragp
, 7, 2);
16663 newsize
= relax_addsub (fragp
, sec
);
16670 fragp
->fr_var
= -newsize
;
16671 md_convert_frag (sec
->owner
, sec
, fragp
);
16673 return -(newsize
+ oldsize
);
16675 fragp
->fr_var
= newsize
;
16676 return newsize
- oldsize
;
16679 /* Round up a section size to the appropriate boundary. */
16682 md_section_align (segT segment ATTRIBUTE_UNUSED
,
16685 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
16686 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
16688 /* For a.out, force the section size to be aligned. If we don't do
16689 this, BFD will align it for us, but it will not write out the
16690 final bytes of the section. This may be a bug in BFD, but it is
16691 easier to fix it here since that is how the other a.out targets
16695 align
= bfd_get_section_alignment (stdoutput
, segment
);
16696 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
16703 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16704 of an rs_align_code fragment. */
16707 arm_handle_align (fragS
* fragP
)
16709 static char const arm_noop
[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16710 static char const thumb_noop
[2] = { 0xc0, 0x46 };
16711 static char const arm_bigend_noop
[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16712 static char const thumb_bigend_noop
[2] = { 0x46, 0xc0 };
16714 int bytes
, fix
, noop_size
;
16718 if (fragP
->fr_type
!= rs_align_code
)
16721 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
16722 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
16725 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
16726 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
16728 if (fragP
->tc_frag_data
)
16730 if (target_big_endian
)
16731 noop
= thumb_bigend_noop
;
16734 noop_size
= sizeof (thumb_noop
);
16738 if (target_big_endian
)
16739 noop
= arm_bigend_noop
;
16742 noop_size
= sizeof (arm_noop
);
16745 if (bytes
& (noop_size
- 1))
16747 fix
= bytes
& (noop_size
- 1);
16748 memset (p
, 0, fix
);
16753 while (bytes
>= noop_size
)
16755 memcpy (p
, noop
, noop_size
);
16757 bytes
-= noop_size
;
16761 fragP
->fr_fix
+= fix
;
16762 fragP
->fr_var
= noop_size
;
16765 /* Called from md_do_align. Used to create an alignment
16766 frag in a code section. */
16769 arm_frag_align_code (int n
, int max
)
16773 /* We assume that there will never be a requirement
16774 to support alignments greater than 32 bytes. */
16775 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
16776 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16778 p
= frag_var (rs_align_code
,
16779 MAX_MEM_FOR_RS_ALIGN_CODE
,
16781 (relax_substateT
) max
,
16788 /* Perform target specific initialisation of a frag. */
16791 arm_init_frag (fragS
* fragP
)
16793 /* Record whether this frag is in an ARM or a THUMB area. */
16794 fragP
->tc_frag_data
= thumb_mode
;
16798 /* When we change sections we need to issue a new mapping symbol. */
16801 arm_elf_change_section (void)
16804 segment_info_type
*seginfo
;
16806 /* Link an unlinked unwind index table section to the .text section. */
16807 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
16808 && elf_linked_to_section (now_seg
) == NULL
)
16809 elf_linked_to_section (now_seg
) = text_section
;
16811 if (!SEG_NORMAL (now_seg
))
16814 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
16816 /* We can ignore sections that only contain debug info. */
16817 if ((flags
& SEC_ALLOC
) == 0)
16820 seginfo
= seg_info (now_seg
);
16821 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
16822 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
16826 arm_elf_section_type (const char * str
, size_t len
)
16828 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
16829 return SHT_ARM_EXIDX
;
16834 /* Code to deal with unwinding tables. */
16836 static void add_unwind_adjustsp (offsetT
);
16838 /* Cenerate and deferred unwind frame offset. */
16841 flush_pending_unwind (void)
16845 offset
= unwind
.pending_offset
;
16846 unwind
.pending_offset
= 0;
16848 add_unwind_adjustsp (offset
);
16851 /* Add an opcode to this list for this function. Two-byte opcodes should
16852 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16856 add_unwind_opcode (valueT op
, int length
)
16858 /* Add any deferred stack adjustment. */
16859 if (unwind
.pending_offset
)
16860 flush_pending_unwind ();
16862 unwind
.sp_restored
= 0;
16864 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
16866 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
16867 if (unwind
.opcodes
)
16868 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
16869 unwind
.opcode_alloc
);
16871 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
16876 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
16878 unwind
.opcode_count
++;
16882 /* Add unwind opcodes to adjust the stack pointer. */
16885 add_unwind_adjustsp (offsetT offset
)
16889 if (offset
> 0x200)
16891 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16896 /* Long form: 0xb2, uleb128. */
16897 /* This might not fit in a word so add the individual bytes,
16898 remembering the list is built in reverse order. */
16899 o
= (valueT
) ((offset
- 0x204) >> 2);
16901 add_unwind_opcode (0, 1);
16903 /* Calculate the uleb128 encoding of the offset. */
16907 bytes
[n
] = o
& 0x7f;
16913 /* Add the insn. */
16915 add_unwind_opcode (bytes
[n
- 1], 1);
16916 add_unwind_opcode (0xb2, 1);
16918 else if (offset
> 0x100)
16920 /* Two short opcodes. */
16921 add_unwind_opcode (0x3f, 1);
16922 op
= (offset
- 0x104) >> 2;
16923 add_unwind_opcode (op
, 1);
16925 else if (offset
> 0)
16927 /* Short opcode. */
16928 op
= (offset
- 4) >> 2;
16929 add_unwind_opcode (op
, 1);
16931 else if (offset
< 0)
16934 while (offset
> 0x100)
16936 add_unwind_opcode (0x7f, 1);
16939 op
= ((offset
- 4) >> 2) | 0x40;
16940 add_unwind_opcode (op
, 1);
16944 /* Finish the list of unwind opcodes for this function. */
16946 finish_unwind_opcodes (void)
16950 if (unwind
.fp_used
)
16952 /* Adjust sp as necessary. */
16953 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
16954 flush_pending_unwind ();
16956 /* After restoring sp from the frame pointer. */
16957 op
= 0x90 | unwind
.fp_reg
;
16958 add_unwind_opcode (op
, 1);
16961 flush_pending_unwind ();
16965 /* Start an exception table entry. If idx is nonzero this is an index table
16969 start_unwind_section (const segT text_seg
, int idx
)
16971 const char * text_name
;
16972 const char * prefix
;
16973 const char * prefix_once
;
16974 const char * group_name
;
16978 size_t sec_name_len
;
16985 prefix
= ELF_STRING_ARM_unwind
;
16986 prefix_once
= ELF_STRING_ARM_unwind_once
;
16987 type
= SHT_ARM_EXIDX
;
16991 prefix
= ELF_STRING_ARM_unwind_info
;
16992 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
16993 type
= SHT_PROGBITS
;
16996 text_name
= segment_name (text_seg
);
16997 if (streq (text_name
, ".text"))
17000 if (strncmp (text_name
, ".gnu.linkonce.t.",
17001 strlen (".gnu.linkonce.t.")) == 0)
17003 prefix
= prefix_once
;
17004 text_name
+= strlen (".gnu.linkonce.t.");
17007 prefix_len
= strlen (prefix
);
17008 text_len
= strlen (text_name
);
17009 sec_name_len
= prefix_len
+ text_len
;
17010 sec_name
= xmalloc (sec_name_len
+ 1);
17011 memcpy (sec_name
, prefix
, prefix_len
);
17012 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
17013 sec_name
[prefix_len
+ text_len
] = '\0';
17019 /* Handle COMDAT group. */
17020 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
17022 group_name
= elf_group_name (text_seg
);
17023 if (group_name
== NULL
)
17025 as_bad ("Group section `%s' has no group signature",
17026 segment_name (text_seg
));
17027 ignore_rest_of_line ();
17030 flags
|= SHF_GROUP
;
17034 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
17036 /* Set the setion link for index tables. */
17038 elf_linked_to_section (now_seg
) = text_seg
;
17042 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17043 personality routine data. Returns zero, or the index table value for
17044 and inline entry. */
17047 create_unwind_entry (int have_data
)
17052 /* The current word of data. */
17054 /* The number of bytes left in this word. */
17057 finish_unwind_opcodes ();
17059 /* Remember the current text section. */
17060 unwind
.saved_seg
= now_seg
;
17061 unwind
.saved_subseg
= now_subseg
;
17063 start_unwind_section (now_seg
, 0);
17065 if (unwind
.personality_routine
== NULL
)
17067 if (unwind
.personality_index
== -2)
17070 as_bad (_("handerdata in cantunwind frame"));
17071 return 1; /* EXIDX_CANTUNWIND. */
17074 /* Use a default personality routine if none is specified. */
17075 if (unwind
.personality_index
== -1)
17077 if (unwind
.opcode_count
> 3)
17078 unwind
.personality_index
= 1;
17080 unwind
.personality_index
= 0;
17083 /* Space for the personality routine entry. */
17084 if (unwind
.personality_index
== 0)
17086 if (unwind
.opcode_count
> 3)
17087 as_bad (_("too many unwind opcodes for personality routine 0"));
17091 /* All the data is inline in the index table. */
17094 while (unwind
.opcode_count
> 0)
17096 unwind
.opcode_count
--;
17097 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
17101 /* Pad with "finish" opcodes. */
17103 data
= (data
<< 8) | 0xb0;
17110 /* We get two opcodes "free" in the first word. */
17111 size
= unwind
.opcode_count
- 2;
17114 /* An extra byte is required for the opcode count. */
17115 size
= unwind
.opcode_count
+ 1;
17117 size
= (size
+ 3) >> 2;
17119 as_bad (_("too many unwind opcodes"));
17121 frag_align (2, 0, 0);
17122 record_alignment (now_seg
, 2);
17123 unwind
.table_entry
= expr_build_dot ();
17125 /* Allocate the table entry. */
17126 ptr
= frag_more ((size
<< 2) + 4);
17127 where
= frag_now_fix () - ((size
<< 2) + 4);
17129 switch (unwind
.personality_index
)
17132 /* ??? Should this be a PLT generating relocation? */
17133 /* Custom personality routine. */
17134 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
17135 BFD_RELOC_ARM_PREL31
);
17140 /* Set the first byte to the number of additional words. */
17145 /* ABI defined personality routines. */
17147 /* Three opcodes bytes are packed into the first word. */
17154 /* The size and first two opcode bytes go in the first word. */
17155 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
17160 /* Should never happen. */
17164 /* Pack the opcodes into words (MSB first), reversing the list at the same
17166 while (unwind
.opcode_count
> 0)
17170 md_number_to_chars (ptr
, data
, 4);
17175 unwind
.opcode_count
--;
17177 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
17180 /* Finish off the last word. */
17183 /* Pad with "finish" opcodes. */
17185 data
= (data
<< 8) | 0xb0;
17187 md_number_to_chars (ptr
, data
, 4);
17192 /* Add an empty descriptor if there is no user-specified data. */
17193 ptr
= frag_more (4);
17194 md_number_to_chars (ptr
, 0, 4);
17201 /* Initialize the DWARF-2 unwind information for this procedure. */
17204 tc_arm_frame_initial_instructions (void)
17206 cfi_add_CFA_def_cfa (REG_SP
, 0);
17208 #endif /* OBJ_ELF */
17210 /* Convert REGNAME to a DWARF-2 register number. */
17213 tc_arm_regname_to_dw2regnum (char *regname
)
17215 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
17225 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
17229 expr
.X_op
= O_secrel
;
17230 expr
.X_add_symbol
= symbol
;
17231 expr
.X_add_number
= 0;
17232 emit_expr (&expr
, size
);
17236 /* MD interface: Symbol and relocation handling. */
17238 /* Return the address within the segment that a PC-relative fixup is
17239 relative to. For ARM, PC-relative fixups applied to instructions
17240 are generally relative to the location of the fixup plus 8 bytes.
17241 Thumb branches are offset by 4, and Thumb loads relative to PC
17242 require special handling. */
17245 md_pcrel_from_section (fixS
* fixP
, segT seg
)
17247 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
17249 /* If this is pc-relative and we are going to emit a relocation
17250 then we just want to put out any pipeline compensation that the linker
17251 will need. Otherwise we want to use the calculated base.
17252 For WinCE we skip the bias for externals as well, since this
17253 is how the MS ARM-CE assembler behaves and we want to be compatible. */
17255 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
17256 || (arm_force_relocation (fixP
)
17258 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
17263 switch (fixP
->fx_r_type
)
17265 /* PC relative addressing on the Thumb is slightly odd as the
17266 bottom two bits of the PC are forced to zero for the
17267 calculation. This happens *after* application of the
17268 pipeline offset. However, Thumb adrl already adjusts for
17269 this, so we need not do it again. */
17270 case BFD_RELOC_ARM_THUMB_ADD
:
17273 case BFD_RELOC_ARM_THUMB_OFFSET
:
17274 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
17275 case BFD_RELOC_ARM_T32_ADD_PC12
:
17276 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
17277 return (base
+ 4) & ~3;
17279 /* Thumb branches are simply offset by +4. */
17280 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
17281 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
17282 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
17283 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
17284 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
17285 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
17286 case BFD_RELOC_THUMB_PCREL_BLX
:
17289 /* ARM mode branches are offset by +8. However, the Windows CE
17290 loader expects the relocation not to take this into account. */
17291 case BFD_RELOC_ARM_PCREL_BRANCH
:
17292 case BFD_RELOC_ARM_PCREL_CALL
:
17293 case BFD_RELOC_ARM_PCREL_JUMP
:
17294 case BFD_RELOC_ARM_PCREL_BLX
:
17295 case BFD_RELOC_ARM_PLT32
:
17297 /* When handling fixups immediately, because we have already
17298 discovered the value of a symbol, or the address of the frag involved
17299 we must account for the offset by +8, as the OS loader will never see the reloc.
17300 see fixup_segment() in write.c
17301 The S_IS_EXTERNAL test handles the case of global symbols.
17302 Those need the calculated base, not just the pipe compensation the linker will need. */
17304 && fixP
->fx_addsy
!= NULL
17305 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
17306 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
17313 /* ARM mode loads relative to PC are also offset by +8. Unlike
17314 branches, the Windows CE loader *does* expect the relocation
17315 to take this into account. */
17316 case BFD_RELOC_ARM_OFFSET_IMM
:
17317 case BFD_RELOC_ARM_OFFSET_IMM8
:
17318 case BFD_RELOC_ARM_HWLITERAL
:
17319 case BFD_RELOC_ARM_LITERAL
:
17320 case BFD_RELOC_ARM_CP_OFF_IMM
:
17324 /* Other PC-relative relocations are un-offset. */
17330 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17331 Otherwise we have no need to default values of symbols. */
17334 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
17337 if (name
[0] == '_' && name
[1] == 'G'
17338 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
17342 if (symbol_find (name
))
17343 as_bad ("GOT already in the symbol table");
17345 GOT_symbol
= symbol_new (name
, undefined_section
,
17346 (valueT
) 0, & zero_address_frag
);
17356 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17357 computed as two separate immediate values, added together. We
17358 already know that this value cannot be computed by just one ARM
17361 static unsigned int
17362 validate_immediate_twopart (unsigned int val
,
17363 unsigned int * highpart
)
17368 for (i
= 0; i
< 32; i
+= 2)
17369 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
17375 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
17377 else if (a
& 0xff0000)
17379 if (a
& 0xff000000)
17381 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
17385 assert (a
& 0xff000000);
17386 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
17389 return (a
& 0xff) | (i
<< 7);
17396 validate_offset_imm (unsigned int val
, int hwse
)
17398 if ((hwse
&& val
> 255) || val
> 4095)
17403 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17404 negative immediate constant by altering the instruction. A bit of
17409 by inverting the second operand, and
17412 by negating the second operand. */
17415 negate_data_op (unsigned long * instruction
,
17416 unsigned long value
)
17419 unsigned long negated
, inverted
;
17421 negated
= encode_arm_immediate (-value
);
17422 inverted
= encode_arm_immediate (~value
);
17424 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
17427 /* First negates. */
17428 case OPCODE_SUB
: /* ADD <-> SUB */
17429 new_inst
= OPCODE_ADD
;
17434 new_inst
= OPCODE_SUB
;
17438 case OPCODE_CMP
: /* CMP <-> CMN */
17439 new_inst
= OPCODE_CMN
;
17444 new_inst
= OPCODE_CMP
;
17448 /* Now Inverted ops. */
17449 case OPCODE_MOV
: /* MOV <-> MVN */
17450 new_inst
= OPCODE_MVN
;
17455 new_inst
= OPCODE_MOV
;
17459 case OPCODE_AND
: /* AND <-> BIC */
17460 new_inst
= OPCODE_BIC
;
17465 new_inst
= OPCODE_AND
;
17469 case OPCODE_ADC
: /* ADC <-> SBC */
17470 new_inst
= OPCODE_SBC
;
17475 new_inst
= OPCODE_ADC
;
17479 /* We cannot do anything. */
17484 if (value
== (unsigned) FAIL
)
17487 *instruction
&= OPCODE_MASK
;
17488 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
17492 /* Like negate_data_op, but for Thumb-2. */
17494 static unsigned int
17495 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
17499 unsigned int negated
, inverted
;
17501 negated
= encode_thumb32_immediate (-value
);
17502 inverted
= encode_thumb32_immediate (~value
);
17504 rd
= (*instruction
>> 8) & 0xf;
17505 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
17508 /* ADD <-> SUB. Includes CMP <-> CMN. */
17509 case T2_OPCODE_SUB
:
17510 new_inst
= T2_OPCODE_ADD
;
17514 case T2_OPCODE_ADD
:
17515 new_inst
= T2_OPCODE_SUB
;
17519 /* ORR <-> ORN. Includes MOV <-> MVN. */
17520 case T2_OPCODE_ORR
:
17521 new_inst
= T2_OPCODE_ORN
;
17525 case T2_OPCODE_ORN
:
17526 new_inst
= T2_OPCODE_ORR
;
17530 /* AND <-> BIC. TST has no inverted equivalent. */
17531 case T2_OPCODE_AND
:
17532 new_inst
= T2_OPCODE_BIC
;
17539 case T2_OPCODE_BIC
:
17540 new_inst
= T2_OPCODE_AND
;
17545 case T2_OPCODE_ADC
:
17546 new_inst
= T2_OPCODE_SBC
;
17550 case T2_OPCODE_SBC
:
17551 new_inst
= T2_OPCODE_ADC
;
17555 /* We cannot do anything. */
17560 if (value
== (unsigned int)FAIL
)
17563 *instruction
&= T2_OPCODE_MASK
;
17564 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
17568 /* Read a 32-bit thumb instruction from buf. */
17569 static unsigned long
17570 get_thumb32_insn (char * buf
)
17572 unsigned long insn
;
17573 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
17574 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
17580 /* We usually want to set the low bit on the address of thumb function
17581 symbols. In particular .word foo - . should have the low bit set.
17582 Generic code tries to fold the difference of two symbols to
17583 a constant. Prevent this and force a relocation when the first symbols
17584 is a thumb function. */
17586 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
17588 if (op
== O_subtract
17589 && l
->X_op
== O_symbol
17590 && r
->X_op
== O_symbol
17591 && THUMB_IS_FUNC (l
->X_add_symbol
))
17593 l
->X_op
= O_subtract
;
17594 l
->X_op_symbol
= r
->X_add_symbol
;
17595 l
->X_add_number
-= r
->X_add_number
;
17598 /* Process as normal. */
17603 md_apply_fix (fixS
* fixP
,
17607 offsetT value
= * valP
;
17609 unsigned int newimm
;
17610 unsigned long temp
;
17612 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
17614 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
17616 /* Note whether this will delete the relocation. */
17618 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
17621 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17622 consistency with the behavior on 32-bit hosts. Remember value
17624 value
&= 0xffffffff;
17625 value
^= 0x80000000;
17626 value
-= 0x80000000;
17629 fixP
->fx_addnumber
= value
;
17631 /* Same treatment for fixP->fx_offset. */
17632 fixP
->fx_offset
&= 0xffffffff;
17633 fixP
->fx_offset
^= 0x80000000;
17634 fixP
->fx_offset
-= 0x80000000;
17636 switch (fixP
->fx_r_type
)
17638 case BFD_RELOC_NONE
:
17639 /* This will need to go in the object file. */
17643 case BFD_RELOC_ARM_IMMEDIATE
:
17644 /* We claim that this fixup has been processed here,
17645 even if in fact we generate an error because we do
17646 not have a reloc for it, so tc_gen_reloc will reject it. */
17650 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17652 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17653 _("undefined symbol %s used as an immediate value"),
17654 S_GET_NAME (fixP
->fx_addsy
));
17658 newimm
= encode_arm_immediate (value
);
17659 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17661 /* If the instruction will fail, see if we can fix things up by
17662 changing the opcode. */
17663 if (newimm
== (unsigned int) FAIL
17664 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
17666 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17667 _("invalid constant (%lx) after fixup"),
17668 (unsigned long) value
);
17672 newimm
|= (temp
& 0xfffff000);
17673 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
17676 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
17678 unsigned int highpart
= 0;
17679 unsigned int newinsn
= 0xe1a00000; /* nop. */
17681 newimm
= encode_arm_immediate (value
);
17682 temp
= md_chars_to_number (buf
, INSN_SIZE
);
17684 /* If the instruction will fail, see if we can fix things up by
17685 changing the opcode. */
17686 if (newimm
== (unsigned int) FAIL
17687 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
17689 /* No ? OK - try using two ADD instructions to generate
17691 newimm
= validate_immediate_twopart (value
, & highpart
);
17693 /* Yes - then make sure that the second instruction is
17695 if (newimm
!= (unsigned int) FAIL
)
17697 /* Still No ? Try using a negated value. */
17698 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
17699 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
17700 /* Otherwise - give up. */
17703 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17704 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17709 /* Replace the first operand in the 2nd instruction (which
17710 is the PC) with the destination register. We have
17711 already added in the PC in the first instruction and we
17712 do not want to do it again. */
17713 newinsn
&= ~ 0xf0000;
17714 newinsn
|= ((newinsn
& 0x0f000) << 4);
17717 newimm
|= (temp
& 0xfffff000);
17718 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
17720 highpart
|= (newinsn
& 0xfffff000);
17721 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
17725 case BFD_RELOC_ARM_OFFSET_IMM
:
17726 if (!fixP
->fx_done
&& seg
->use_rela_p
)
17729 case BFD_RELOC_ARM_LITERAL
:
17735 if (validate_offset_imm (value
, 0) == FAIL
)
17737 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
17738 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17739 _("invalid literal constant: pool needs to be closer"));
17741 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17742 _("bad immediate value for offset (%ld)"),
17747 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17748 newval
&= 0xff7ff000;
17749 newval
|= value
| (sign
? INDEX_UP
: 0);
17750 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17753 case BFD_RELOC_ARM_OFFSET_IMM8
:
17754 case BFD_RELOC_ARM_HWLITERAL
:
17760 if (validate_offset_imm (value
, 1) == FAIL
)
17762 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
17763 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17764 _("invalid literal constant: pool needs to be closer"));
17766 as_bad (_("bad immediate value for half-word offset (%ld)"),
17771 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17772 newval
&= 0xff7ff0f0;
17773 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
17774 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17777 case BFD_RELOC_ARM_T32_OFFSET_U8
:
17778 if (value
< 0 || value
> 1020 || value
% 4 != 0)
17779 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17780 _("bad immediate value for offset (%ld)"), (long) value
);
17783 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
17785 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
17788 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
17789 /* This is a complicated relocation used for all varieties of Thumb32
17790 load/store instruction with immediate offset:
17792 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17793 *4, optional writeback(W)
17794 (doubleword load/store)
17796 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17797 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17798 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17799 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17800 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17802 Uppercase letters indicate bits that are already encoded at
17803 this point. Lowercase letters are our problem. For the
17804 second block of instructions, the secondary opcode nybble
17805 (bits 8..11) is present, and bit 23 is zero, even if this is
17806 a PC-relative operation. */
17807 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17809 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
17811 if ((newval
& 0xf0000000) == 0xe0000000)
17813 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17815 newval
|= (1 << 23);
17818 if (value
% 4 != 0)
17820 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17821 _("offset not a multiple of 4"));
17827 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17828 _("offset out of range"));
17833 else if ((newval
& 0x000f0000) == 0x000f0000)
17835 /* PC-relative, 12-bit offset. */
17837 newval
|= (1 << 23);
17842 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17843 _("offset out of range"));
17848 else if ((newval
& 0x00000100) == 0x00000100)
17850 /* Writeback: 8-bit, +/- offset. */
17852 newval
|= (1 << 9);
17857 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17858 _("offset out of range"));
17863 else if ((newval
& 0x00000f00) == 0x00000e00)
17865 /* T-instruction: positive 8-bit offset. */
17866 if (value
< 0 || value
> 0xff)
17868 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17869 _("offset out of range"));
17877 /* Positive 12-bit or negative 8-bit offset. */
17881 newval
|= (1 << 23);
17891 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17892 _("offset out of range"));
17899 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
17900 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
17903 case BFD_RELOC_ARM_SHIFT_IMM
:
17904 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17905 if (((unsigned long) value
) > 32
17907 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
17909 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17910 _("shift expression is too large"));
17915 /* Shifts of zero must be done as lsl. */
17917 else if (value
== 32)
17919 newval
&= 0xfffff07f;
17920 newval
|= (value
& 0x1f) << 7;
17921 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17924 case BFD_RELOC_ARM_T32_IMMEDIATE
:
17925 case BFD_RELOC_ARM_T32_ADD_IMM
:
17926 case BFD_RELOC_ARM_T32_IMM12
:
17927 case BFD_RELOC_ARM_T32_ADD_PC12
:
17928 /* We claim that this fixup has been processed here,
17929 even if in fact we generate an error because we do
17930 not have a reloc for it, so tc_gen_reloc will reject it. */
17934 && ! S_IS_DEFINED (fixP
->fx_addsy
))
17936 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17937 _("undefined symbol %s used as an immediate value"),
17938 S_GET_NAME (fixP
->fx_addsy
));
17942 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
17944 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
17947 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
17948 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
17950 newimm
= encode_thumb32_immediate (value
);
17951 if (newimm
== (unsigned int) FAIL
)
17952 newimm
= thumb32_negate_data_op (&newval
, value
);
17954 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
17955 && newimm
== (unsigned int) FAIL
)
17957 /* Turn add/sum into addw/subw. */
17958 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
17959 newval
= (newval
& 0xfeffffff) | 0x02000000;
17961 /* 12 bit immediate for addw/subw. */
17965 newval
^= 0x00a00000;
17968 newimm
= (unsigned int) FAIL
;
17973 if (newimm
== (unsigned int)FAIL
)
17975 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17976 _("invalid constant (%lx) after fixup"),
17977 (unsigned long) value
);
17981 newval
|= (newimm
& 0x800) << 15;
17982 newval
|= (newimm
& 0x700) << 4;
17983 newval
|= (newimm
& 0x0ff);
17985 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
17986 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
17989 case BFD_RELOC_ARM_SMC
:
17990 if (((unsigned long) value
) > 0xffff)
17991 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
17992 _("invalid smc expression"));
17993 newval
= md_chars_to_number (buf
, INSN_SIZE
);
17994 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
17995 md_number_to_chars (buf
, newval
, INSN_SIZE
);
17998 case BFD_RELOC_ARM_SWI
:
17999 if (fixP
->tc_fix_data
!= 0)
18001 if (((unsigned long) value
) > 0xff)
18002 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18003 _("invalid swi expression"));
18004 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18006 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18010 if (((unsigned long) value
) > 0x00ffffff)
18011 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18012 _("invalid swi expression"));
18013 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18015 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18019 case BFD_RELOC_ARM_MULTI
:
18020 if (((unsigned long) value
) > 0xffff)
18021 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18022 _("invalid expression in load/store multiple"));
18023 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
18024 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18028 case BFD_RELOC_ARM_PCREL_CALL
:
18029 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18030 if ((newval
& 0xf0000000) == 0xf0000000)
18034 goto arm_branch_common
;
18036 case BFD_RELOC_ARM_PCREL_JUMP
:
18037 case BFD_RELOC_ARM_PLT32
:
18039 case BFD_RELOC_ARM_PCREL_BRANCH
:
18041 goto arm_branch_common
;
18043 case BFD_RELOC_ARM_PCREL_BLX
:
18046 /* We are going to store value (shifted right by two) in the
18047 instruction, in a 24 bit, signed field. Bits 26 through 32 either
18048 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
18049 also be be clear. */
18051 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18052 _("misaligned branch destination"));
18053 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
18054 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
18055 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18056 _("branch out of range"));
18058 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18060 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18061 newval
|= (value
>> 2) & 0x00ffffff;
18062 /* Set the H bit on BLX instructions. */
18066 newval
|= 0x01000000;
18068 newval
&= ~0x01000000;
18070 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18074 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
18075 /* CBZ can only branch forward. */
18077 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18078 _("branch out of range"));
18080 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18082 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18083 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
18084 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18088 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
18089 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
18090 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18091 _("branch out of range"));
18093 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18095 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18096 newval
|= (value
& 0x1ff) >> 1;
18097 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18101 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
18102 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
18103 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18104 _("branch out of range"));
18106 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18108 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18109 newval
|= (value
& 0xfff) >> 1;
18110 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18114 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
18115 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
18116 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18117 _("conditional branch out of range"));
18119 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18122 addressT S
, J1
, J2
, lo
, hi
;
18124 S
= (value
& 0x00100000) >> 20;
18125 J2
= (value
& 0x00080000) >> 19;
18126 J1
= (value
& 0x00040000) >> 18;
18127 hi
= (value
& 0x0003f000) >> 12;
18128 lo
= (value
& 0x00000ffe) >> 1;
18130 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18131 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18132 newval
|= (S
<< 10) | hi
;
18133 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
18134 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18135 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18139 case BFD_RELOC_THUMB_PCREL_BLX
:
18140 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
18141 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
18142 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18143 _("branch out of range"));
18145 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
18146 /* For a BLX instruction, make sure that the relocation is rounded up
18147 to a word boundary. This follows the semantics of the instruction
18148 which specifies that bit 1 of the target address will come from bit
18149 1 of the base address. */
18150 value
= (value
+ 1) & ~ 1;
18152 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18156 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18157 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18158 newval
|= (value
& 0x7fffff) >> 12;
18159 newval2
|= (value
& 0xfff) >> 1;
18160 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18161 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18165 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
18166 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
18167 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18168 _("branch out of range"));
18170 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18173 addressT S
, I1
, I2
, lo
, hi
;
18175 S
= (value
& 0x01000000) >> 24;
18176 I1
= (value
& 0x00800000) >> 23;
18177 I2
= (value
& 0x00400000) >> 22;
18178 hi
= (value
& 0x003ff000) >> 12;
18179 lo
= (value
& 0x00000ffe) >> 1;
18184 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18185 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18186 newval
|= (S
<< 10) | hi
;
18187 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
18188 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18189 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
18194 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18195 md_number_to_chars (buf
, value
, 1);
18199 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18200 md_number_to_chars (buf
, value
, 2);
18204 case BFD_RELOC_ARM_TLS_GD32
:
18205 case BFD_RELOC_ARM_TLS_LE32
:
18206 case BFD_RELOC_ARM_TLS_IE32
:
18207 case BFD_RELOC_ARM_TLS_LDM32
:
18208 case BFD_RELOC_ARM_TLS_LDO32
:
18209 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
18212 case BFD_RELOC_ARM_GOT32
:
18213 case BFD_RELOC_ARM_GOTOFF
:
18214 case BFD_RELOC_ARM_TARGET2
:
18215 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18216 md_number_to_chars (buf
, 0, 4);
18220 case BFD_RELOC_RVA
:
18222 case BFD_RELOC_ARM_TARGET1
:
18223 case BFD_RELOC_ARM_ROSEGREL32
:
18224 case BFD_RELOC_ARM_SBREL32
:
18225 case BFD_RELOC_32_PCREL
:
18227 case BFD_RELOC_32_SECREL
:
18229 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18231 /* For WinCE we only do this for pcrel fixups. */
18232 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
18234 md_number_to_chars (buf
, value
, 4);
18238 case BFD_RELOC_ARM_PREL31
:
18239 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18241 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
18242 if ((value
^ (value
>> 1)) & 0x40000000)
18244 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18245 _("rel31 relocation overflow"));
18247 newval
|= value
& 0x7fffffff;
18248 md_number_to_chars (buf
, newval
, 4);
18253 case BFD_RELOC_ARM_CP_OFF_IMM
:
18254 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
18255 if (value
< -1023 || value
> 1023 || (value
& 3))
18256 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18257 _("co-processor offset out of range"));
18262 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
18263 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
18264 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18266 newval
= get_thumb32_insn (buf
);
18267 newval
&= 0xff7fff00;
18268 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
18269 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
18270 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
18271 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18273 put_thumb32_insn (buf
, newval
);
18276 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
18277 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
18278 if (value
< -255 || value
> 255)
18279 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18280 _("co-processor offset out of range"));
18282 goto cp_off_common
;
18284 case BFD_RELOC_ARM_THUMB_OFFSET
:
18285 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18286 /* Exactly what ranges, and where the offset is inserted depends
18287 on the type of instruction, we can establish this from the
18289 switch (newval
>> 12)
18291 case 4: /* PC load. */
18292 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18293 forced to zero for these loads; md_pcrel_from has already
18294 compensated for this. */
18296 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18297 _("invalid offset, target not word aligned (0x%08lX)"),
18298 (((unsigned long) fixP
->fx_frag
->fr_address
18299 + (unsigned long) fixP
->fx_where
) & ~3)
18300 + (unsigned long) value
);
18302 if (value
& ~0x3fc)
18303 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18304 _("invalid offset, value too big (0x%08lX)"),
18307 newval
|= value
>> 2;
18310 case 9: /* SP load/store. */
18311 if (value
& ~0x3fc)
18312 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18313 _("invalid offset, value too big (0x%08lX)"),
18315 newval
|= value
>> 2;
18318 case 6: /* Word load/store. */
18320 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18321 _("invalid offset, value too big (0x%08lX)"),
18323 newval
|= value
<< 4; /* 6 - 2. */
18326 case 7: /* Byte load/store. */
18328 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18329 _("invalid offset, value too big (0x%08lX)"),
18331 newval
|= value
<< 6;
18334 case 8: /* Halfword load/store. */
18336 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18337 _("invalid offset, value too big (0x%08lX)"),
18339 newval
|= value
<< 5; /* 6 - 1. */
18343 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18344 "Unable to process relocation for thumb opcode: %lx",
18345 (unsigned long) newval
);
18348 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18351 case BFD_RELOC_ARM_THUMB_ADD
:
18352 /* This is a complicated relocation, since we use it for all of
18353 the following immediate relocations:
18357 9bit ADD/SUB SP word-aligned
18358 10bit ADD PC/SP word-aligned
18360 The type of instruction being processed is encoded in the
18367 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18369 int rd
= (newval
>> 4) & 0xf;
18370 int rs
= newval
& 0xf;
18371 int subtract
= !!(newval
& 0x8000);
18373 /* Check for HI regs, only very restricted cases allowed:
18374 Adjusting SP, and using PC or SP to get an address. */
18375 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
18376 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
18377 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18378 _("invalid Hi register with immediate"));
18380 /* If value is negative, choose the opposite instruction. */
18384 subtract
= !subtract
;
18386 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18387 _("immediate value out of range"));
18392 if (value
& ~0x1fc)
18393 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18394 _("invalid immediate for stack address calculation"));
18395 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
18396 newval
|= value
>> 2;
18398 else if (rs
== REG_PC
|| rs
== REG_SP
)
18400 if (subtract
|| value
& ~0x3fc)
18401 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18402 _("invalid immediate for address calculation (value = 0x%08lX)"),
18403 (unsigned long) value
);
18404 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
18406 newval
|= value
>> 2;
18411 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18412 _("immediate value out of range"));
18413 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
18414 newval
|= (rd
<< 8) | value
;
18419 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18420 _("immediate value out of range"));
18421 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
18422 newval
|= rd
| (rs
<< 3) | (value
<< 6);
18425 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18428 case BFD_RELOC_ARM_THUMB_IMM
:
18429 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18430 if (value
< 0 || value
> 255)
18431 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18432 _("invalid immediate: %ld is too large"),
18435 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18438 case BFD_RELOC_ARM_THUMB_SHIFT
:
18439 /* 5bit shift value (0..32). LSL cannot take 32. */
18440 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
18441 temp
= newval
& 0xf800;
18442 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
18443 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18444 _("invalid shift value: %ld"), (long) value
);
18445 /* Shifts of zero must be encoded as LSL. */
18447 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
18448 /* Shifts of 32 are encoded as zero. */
18449 else if (value
== 32)
18451 newval
|= value
<< 6;
18452 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18455 case BFD_RELOC_VTABLE_INHERIT
:
18456 case BFD_RELOC_VTABLE_ENTRY
:
18460 case BFD_RELOC_ARM_MOVW
:
18461 case BFD_RELOC_ARM_MOVT
:
18462 case BFD_RELOC_ARM_THUMB_MOVW
:
18463 case BFD_RELOC_ARM_THUMB_MOVT
:
18464 if (fixP
->fx_done
|| !seg
->use_rela_p
)
18466 /* REL format relocations are limited to a 16-bit addend. */
18467 if (!fixP
->fx_done
)
18469 if (value
< -0x1000 || value
> 0xffff)
18470 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18471 _("offset too big"));
18473 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
18474 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
18479 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
18480 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
18482 newval
= get_thumb32_insn (buf
);
18483 newval
&= 0xfbf08f00;
18484 newval
|= (value
& 0xf000) << 4;
18485 newval
|= (value
& 0x0800) << 15;
18486 newval
|= (value
& 0x0700) << 4;
18487 newval
|= (value
& 0x00ff);
18488 put_thumb32_insn (buf
, newval
);
18492 newval
= md_chars_to_number (buf
, 4);
18493 newval
&= 0xfff0f000;
18494 newval
|= value
& 0x0fff;
18495 newval
|= (value
& 0xf000) << 4;
18496 md_number_to_chars (buf
, newval
, 4);
18501 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
18502 case BFD_RELOC_ARM_ALU_PC_G0
:
18503 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
18504 case BFD_RELOC_ARM_ALU_PC_G1
:
18505 case BFD_RELOC_ARM_ALU_PC_G2
:
18506 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
18507 case BFD_RELOC_ARM_ALU_SB_G0
:
18508 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
18509 case BFD_RELOC_ARM_ALU_SB_G1
:
18510 case BFD_RELOC_ARM_ALU_SB_G2
:
18511 assert (!fixP
->fx_done
);
18512 if (!seg
->use_rela_p
)
18515 bfd_vma encoded_addend
;
18516 bfd_vma addend_abs
= abs (value
);
18518 /* Check that the absolute value of the addend can be
18519 expressed as an 8-bit constant plus a rotation. */
18520 encoded_addend
= encode_arm_immediate (addend_abs
);
18521 if (encoded_addend
== (unsigned int) FAIL
)
18522 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18523 _("the offset 0x%08lX is not representable"),
18526 /* Extract the instruction. */
18527 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18529 /* If the addend is positive, use an ADD instruction.
18530 Otherwise use a SUB. Take care not to destroy the S bit. */
18531 insn
&= 0xff1fffff;
18537 /* Place the encoded addend into the first 12 bits of the
18539 insn
&= 0xfffff000;
18540 insn
|= encoded_addend
;
18542 /* Update the instruction. */
18543 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18547 case BFD_RELOC_ARM_LDR_PC_G0
:
18548 case BFD_RELOC_ARM_LDR_PC_G1
:
18549 case BFD_RELOC_ARM_LDR_PC_G2
:
18550 case BFD_RELOC_ARM_LDR_SB_G0
:
18551 case BFD_RELOC_ARM_LDR_SB_G1
:
18552 case BFD_RELOC_ARM_LDR_SB_G2
:
18553 assert (!fixP
->fx_done
);
18554 if (!seg
->use_rela_p
)
18557 bfd_vma addend_abs
= abs (value
);
18559 /* Check that the absolute value of the addend can be
18560 encoded in 12 bits. */
18561 if (addend_abs
>= 0x1000)
18562 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18563 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18566 /* Extract the instruction. */
18567 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18569 /* If the addend is negative, clear bit 23 of the instruction.
18570 Otherwise set it. */
18572 insn
&= ~(1 << 23);
18576 /* Place the absolute value of the addend into the first 12 bits
18577 of the instruction. */
18578 insn
&= 0xfffff000;
18579 insn
|= addend_abs
;
18581 /* Update the instruction. */
18582 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18586 case BFD_RELOC_ARM_LDRS_PC_G0
:
18587 case BFD_RELOC_ARM_LDRS_PC_G1
:
18588 case BFD_RELOC_ARM_LDRS_PC_G2
:
18589 case BFD_RELOC_ARM_LDRS_SB_G0
:
18590 case BFD_RELOC_ARM_LDRS_SB_G1
:
18591 case BFD_RELOC_ARM_LDRS_SB_G2
:
18592 assert (!fixP
->fx_done
);
18593 if (!seg
->use_rela_p
)
18596 bfd_vma addend_abs
= abs (value
);
18598 /* Check that the absolute value of the addend can be
18599 encoded in 8 bits. */
18600 if (addend_abs
>= 0x100)
18601 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18602 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18605 /* Extract the instruction. */
18606 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18608 /* If the addend is negative, clear bit 23 of the instruction.
18609 Otherwise set it. */
18611 insn
&= ~(1 << 23);
18615 /* Place the first four bits of the absolute value of the addend
18616 into the first 4 bits of the instruction, and the remaining
18617 four into bits 8 .. 11. */
18618 insn
&= 0xfffff0f0;
18619 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
18621 /* Update the instruction. */
18622 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18626 case BFD_RELOC_ARM_LDC_PC_G0
:
18627 case BFD_RELOC_ARM_LDC_PC_G1
:
18628 case BFD_RELOC_ARM_LDC_PC_G2
:
18629 case BFD_RELOC_ARM_LDC_SB_G0
:
18630 case BFD_RELOC_ARM_LDC_SB_G1
:
18631 case BFD_RELOC_ARM_LDC_SB_G2
:
18632 assert (!fixP
->fx_done
);
18633 if (!seg
->use_rela_p
)
18636 bfd_vma addend_abs
= abs (value
);
18638 /* Check that the absolute value of the addend is a multiple of
18639 four and, when divided by four, fits in 8 bits. */
18640 if (addend_abs
& 0x3)
18641 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18642 _("bad offset 0x%08lX (must be word-aligned)"),
18645 if ((addend_abs
>> 2) > 0xff)
18646 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18647 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18650 /* Extract the instruction. */
18651 insn
= md_chars_to_number (buf
, INSN_SIZE
);
18653 /* If the addend is negative, clear bit 23 of the instruction.
18654 Otherwise set it. */
18656 insn
&= ~(1 << 23);
18660 /* Place the addend (divided by four) into the first eight
18661 bits of the instruction. */
18662 insn
&= 0xfffffff0;
18663 insn
|= addend_abs
>> 2;
18665 /* Update the instruction. */
18666 md_number_to_chars (buf
, insn
, INSN_SIZE
);
18670 case BFD_RELOC_UNUSED
:
18672 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18673 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
18677 /* Translate internal representation of relocation info to BFD target
18681 tc_gen_reloc (asection
*section
, fixS
*fixp
)
18684 bfd_reloc_code_real_type code
;
18686 reloc
= xmalloc (sizeof (arelent
));
18688 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
18689 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
18690 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
18692 if (fixp
->fx_pcrel
)
18694 if (section
->use_rela_p
)
18695 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
18697 fixp
->fx_offset
= reloc
->address
;
18699 reloc
->addend
= fixp
->fx_offset
;
18701 switch (fixp
->fx_r_type
)
18704 if (fixp
->fx_pcrel
)
18706 code
= BFD_RELOC_8_PCREL
;
18711 if (fixp
->fx_pcrel
)
18713 code
= BFD_RELOC_16_PCREL
;
18718 if (fixp
->fx_pcrel
)
18720 code
= BFD_RELOC_32_PCREL
;
18724 case BFD_RELOC_ARM_MOVW
:
18725 if (fixp
->fx_pcrel
)
18727 code
= BFD_RELOC_ARM_MOVW_PCREL
;
18731 case BFD_RELOC_ARM_MOVT
:
18732 if (fixp
->fx_pcrel
)
18734 code
= BFD_RELOC_ARM_MOVT_PCREL
;
18738 case BFD_RELOC_ARM_THUMB_MOVW
:
18739 if (fixp
->fx_pcrel
)
18741 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
18745 case BFD_RELOC_ARM_THUMB_MOVT
:
18746 if (fixp
->fx_pcrel
)
18748 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
18752 case BFD_RELOC_NONE
:
18753 case BFD_RELOC_ARM_PCREL_BRANCH
:
18754 case BFD_RELOC_ARM_PCREL_BLX
:
18755 case BFD_RELOC_RVA
:
18756 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
18757 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
18758 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
18759 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
18760 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
18761 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
18762 case BFD_RELOC_THUMB_PCREL_BLX
:
18763 case BFD_RELOC_VTABLE_ENTRY
:
18764 case BFD_RELOC_VTABLE_INHERIT
:
18766 case BFD_RELOC_32_SECREL
:
18768 code
= fixp
->fx_r_type
;
18771 case BFD_RELOC_ARM_LITERAL
:
18772 case BFD_RELOC_ARM_HWLITERAL
:
18773 /* If this is called then the a literal has
18774 been referenced across a section boundary. */
18775 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18776 _("literal referenced across section boundary"));
18780 case BFD_RELOC_ARM_GOT32
:
18781 case BFD_RELOC_ARM_GOTOFF
:
18782 case BFD_RELOC_ARM_PLT32
:
18783 case BFD_RELOC_ARM_TARGET1
:
18784 case BFD_RELOC_ARM_ROSEGREL32
:
18785 case BFD_RELOC_ARM_SBREL32
:
18786 case BFD_RELOC_ARM_PREL31
:
18787 case BFD_RELOC_ARM_TARGET2
:
18788 case BFD_RELOC_ARM_TLS_LE32
:
18789 case BFD_RELOC_ARM_TLS_LDO32
:
18790 case BFD_RELOC_ARM_PCREL_CALL
:
18791 case BFD_RELOC_ARM_PCREL_JUMP
:
18792 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
18793 case BFD_RELOC_ARM_ALU_PC_G0
:
18794 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
18795 case BFD_RELOC_ARM_ALU_PC_G1
:
18796 case BFD_RELOC_ARM_ALU_PC_G2
:
18797 case BFD_RELOC_ARM_LDR_PC_G0
:
18798 case BFD_RELOC_ARM_LDR_PC_G1
:
18799 case BFD_RELOC_ARM_LDR_PC_G2
:
18800 case BFD_RELOC_ARM_LDRS_PC_G0
:
18801 case BFD_RELOC_ARM_LDRS_PC_G1
:
18802 case BFD_RELOC_ARM_LDRS_PC_G2
:
18803 case BFD_RELOC_ARM_LDC_PC_G0
:
18804 case BFD_RELOC_ARM_LDC_PC_G1
:
18805 case BFD_RELOC_ARM_LDC_PC_G2
:
18806 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
18807 case BFD_RELOC_ARM_ALU_SB_G0
:
18808 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
18809 case BFD_RELOC_ARM_ALU_SB_G1
:
18810 case BFD_RELOC_ARM_ALU_SB_G2
:
18811 case BFD_RELOC_ARM_LDR_SB_G0
:
18812 case BFD_RELOC_ARM_LDR_SB_G1
:
18813 case BFD_RELOC_ARM_LDR_SB_G2
:
18814 case BFD_RELOC_ARM_LDRS_SB_G0
:
18815 case BFD_RELOC_ARM_LDRS_SB_G1
:
18816 case BFD_RELOC_ARM_LDRS_SB_G2
:
18817 case BFD_RELOC_ARM_LDC_SB_G0
:
18818 case BFD_RELOC_ARM_LDC_SB_G1
:
18819 case BFD_RELOC_ARM_LDC_SB_G2
:
18820 code
= fixp
->fx_r_type
;
18823 case BFD_RELOC_ARM_TLS_GD32
:
18824 case BFD_RELOC_ARM_TLS_IE32
:
18825 case BFD_RELOC_ARM_TLS_LDM32
:
18826 /* BFD will include the symbol's address in the addend.
18827 But we don't want that, so subtract it out again here. */
18828 if (!S_IS_COMMON (fixp
->fx_addsy
))
18829 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
18830 code
= fixp
->fx_r_type
;
18834 case BFD_RELOC_ARM_IMMEDIATE
:
18835 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18836 _("internal relocation (type: IMMEDIATE) not fixed up"));
18839 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
18840 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18841 _("ADRL used for a symbol not defined in the same file"));
18844 case BFD_RELOC_ARM_OFFSET_IMM
:
18845 if (section
->use_rela_p
)
18847 code
= fixp
->fx_r_type
;
18851 if (fixp
->fx_addsy
!= NULL
18852 && !S_IS_DEFINED (fixp
->fx_addsy
)
18853 && S_IS_LOCAL (fixp
->fx_addsy
))
18855 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18856 _("undefined local label `%s'"),
18857 S_GET_NAME (fixp
->fx_addsy
));
18861 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18862 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18869 switch (fixp
->fx_r_type
)
18871 case BFD_RELOC_NONE
: type
= "NONE"; break;
18872 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
18873 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
18874 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
18875 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
18876 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
18877 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
18878 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
18879 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
18880 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
18881 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
18882 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
18883 default: type
= _("<unknown>"); break;
18885 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18886 _("cannot represent %s relocation in this object file format"),
18893 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
18895 && fixp
->fx_addsy
== GOT_symbol
)
18897 code
= BFD_RELOC_ARM_GOTPC
;
18898 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
18902 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
18904 if (reloc
->howto
== NULL
)
18906 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
18907 _("cannot represent %s relocation in this object file format"),
18908 bfd_get_reloc_code_name (code
));
18912 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
18913 vtable entry to be used in the relocation's section offset. */
18914 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
18915 reloc
->address
= fixp
->fx_offset
;
18920 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
18923 cons_fix_new_arm (fragS
* frag
,
18928 bfd_reloc_code_real_type type
;
18932 FIXME: @@ Should look at CPU word size. */
18936 type
= BFD_RELOC_8
;
18939 type
= BFD_RELOC_16
;
18943 type
= BFD_RELOC_32
;
18946 type
= BFD_RELOC_64
;
18951 if (exp
->X_op
== O_secrel
)
18953 exp
->X_op
= O_symbol
;
18954 type
= BFD_RELOC_32_SECREL
;
18958 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
18961 #if defined OBJ_COFF || defined OBJ_ELF
18963 arm_validate_fix (fixS
* fixP
)
18965 /* If the destination of the branch is a defined symbol which does not have
18966 the THUMB_FUNC attribute, then we must be calling a function which has
18967 the (interfacearm) attribute. We look for the Thumb entry point to that
18968 function and change the branch to refer to that function instead. */
18969 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
18970 && fixP
->fx_addsy
!= NULL
18971 && S_IS_DEFINED (fixP
->fx_addsy
)
18972 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
18974 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
18980 arm_force_relocation (struct fix
* fixp
)
18982 #if defined (OBJ_COFF) && defined (TE_PE)
18983 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
18987 /* Resolve these relocations even if the symbol is extern or weak. */
18988 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
18989 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
18990 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
18991 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
18992 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
18993 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
18994 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
18997 /* Always leave these relocations for the linker. */
18998 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
18999 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
19000 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
19003 /* Always generate relocations against function symbols. */
19004 if (fixp
->fx_r_type
== BFD_RELOC_32
19006 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
19009 return generic_force_reloc (fixp
);
19012 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19013 /* Relocations against function names must be left unadjusted,
19014 so that the linker can use this information to generate interworking
19015 stubs. The MIPS version of this function
19016 also prevents relocations that are mips-16 specific, but I do not
19017 know why it does this.
19020 There is one other problem that ought to be addressed here, but
19021 which currently is not: Taking the address of a label (rather
19022 than a function) and then later jumping to that address. Such
19023 addresses also ought to have their bottom bit set (assuming that
19024 they reside in Thumb code), but at the moment they will not. */
19027 arm_fix_adjustable (fixS
* fixP
)
19029 if (fixP
->fx_addsy
== NULL
)
19032 /* Preserve relocations against symbols with function type. */
19033 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
19036 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
19037 && fixP
->fx_subsy
== NULL
)
19040 /* We need the symbol name for the VTABLE entries. */
19041 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
19042 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
19045 /* Don't allow symbols to be discarded on GOT related relocs. */
19046 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
19047 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
19048 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
19049 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
19050 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
19051 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
19052 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
19053 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
19054 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
19057 /* Similarly for group relocations. */
19058 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
19059 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
19060 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
19065 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
19070 elf32_arm_target_format (void)
19073 return (target_big_endian
19074 ? "elf32-bigarm-symbian"
19075 : "elf32-littlearm-symbian");
19076 #elif defined (TE_VXWORKS)
19077 return (target_big_endian
19078 ? "elf32-bigarm-vxworks"
19079 : "elf32-littlearm-vxworks");
19081 if (target_big_endian
)
19082 return "elf32-bigarm";
19084 return "elf32-littlearm";
19089 armelf_frob_symbol (symbolS
* symp
,
19092 elf_frob_symbol (symp
, puntp
);
19096 /* MD interface: Finalization. */
19098 /* A good place to do this, although this was probably not intended
19099 for this kind of use. We need to dump the literal pool before
19100 references are made to a null symbol pointer. */
19105 literal_pool
* pool
;
19107 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
19109 /* Put it at the end of the relevent section. */
19110 subseg_set (pool
->section
, pool
->sub_section
);
19112 arm_elf_change_section ();
19118 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19122 arm_adjust_symtab (void)
19127 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
19129 if (ARM_IS_THUMB (sym
))
19131 if (THUMB_IS_FUNC (sym
))
19133 /* Mark the symbol as a Thumb function. */
19134 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
19135 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
19136 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
19138 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
19139 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
19141 as_bad (_("%s: unexpected function type: %d"),
19142 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
19144 else switch (S_GET_STORAGE_CLASS (sym
))
19147 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
19150 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
19153 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
19161 if (ARM_IS_INTERWORK (sym
))
19162 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
19169 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
19171 if (ARM_IS_THUMB (sym
))
19173 elf_symbol_type
* elf_sym
;
19175 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
19176 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
19178 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
19179 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
19181 /* If it's a .thumb_func, declare it as so,
19182 otherwise tag label as .code 16. */
19183 if (THUMB_IS_FUNC (sym
))
19184 elf_sym
->internal_elf_sym
.st_info
=
19185 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
19186 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
19187 elf_sym
->internal_elf_sym
.st_info
=
19188 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
19195 /* MD interface: Initialization. */
19198 set_constant_flonums (void)
19202 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
19203 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
19207 /* Auto-select Thumb mode if it's the only available instruction set for the
19208 given architecture. */
19211 autoselect_thumb_from_cpu_variant (void)
19213 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19214 opcode_select (16);
19223 if ( (arm_ops_hsh
= hash_new ()) == NULL
19224 || (arm_cond_hsh
= hash_new ()) == NULL
19225 || (arm_shift_hsh
= hash_new ()) == NULL
19226 || (arm_psr_hsh
= hash_new ()) == NULL
19227 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
19228 || (arm_reg_hsh
= hash_new ()) == NULL
19229 || (arm_reloc_hsh
= hash_new ()) == NULL
19230 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
19231 as_fatal (_("virtual memory exhausted"));
19233 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
19234 hash_insert (arm_ops_hsh
, insns
[i
].template, (PTR
) (insns
+ i
));
19235 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
19236 hash_insert (arm_cond_hsh
, conds
[i
].template, (PTR
) (conds
+ i
));
19237 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
19238 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (PTR
) (shift_names
+ i
));
19239 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
19240 hash_insert (arm_psr_hsh
, psrs
[i
].template, (PTR
) (psrs
+ i
));
19241 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
19242 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (PTR
) (v7m_psrs
+ i
));
19243 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
19244 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (PTR
) (reg_names
+ i
));
19246 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
19248 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
19249 (PTR
) (barrier_opt_names
+ i
));
19251 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
19252 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (PTR
) (reloc_names
+ i
));
19255 set_constant_flonums ();
19257 /* Set the cpu variant based on the command-line options. We prefer
19258 -mcpu= over -march= if both are set (as for GCC); and we prefer
19259 -mfpu= over any other way of setting the floating point unit.
19260 Use of legacy options with new options are faulted. */
19263 if (mcpu_cpu_opt
|| march_cpu_opt
)
19264 as_bad (_("use of old and new-style options to set CPU type"));
19266 mcpu_cpu_opt
= legacy_cpu
;
19268 else if (!mcpu_cpu_opt
)
19269 mcpu_cpu_opt
= march_cpu_opt
;
19274 as_bad (_("use of old and new-style options to set FPU type"));
19276 mfpu_opt
= legacy_fpu
;
19278 else if (!mfpu_opt
)
19280 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19281 /* Some environments specify a default FPU. If they don't, infer it
19282 from the processor. */
19284 mfpu_opt
= mcpu_fpu_opt
;
19286 mfpu_opt
= march_fpu_opt
;
19288 mfpu_opt
= &fpu_default
;
19294 if (mcpu_cpu_opt
!= NULL
)
19295 mfpu_opt
= &fpu_default
;
19296 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
19297 mfpu_opt
= &fpu_arch_vfp_v2
;
19299 mfpu_opt
= &fpu_arch_fpa
;
19305 mcpu_cpu_opt
= &cpu_default
;
19306 selected_cpu
= cpu_default
;
19310 selected_cpu
= *mcpu_cpu_opt
;
19312 mcpu_cpu_opt
= &arm_arch_any
;
19315 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
19317 autoselect_thumb_from_cpu_variant ();
19319 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
19321 #if defined OBJ_COFF || defined OBJ_ELF
19323 unsigned int flags
= 0;
19325 #if defined OBJ_ELF
19326 flags
= meabi_flags
;
19328 switch (meabi_flags
)
19330 case EF_ARM_EABI_UNKNOWN
:
19332 /* Set the flags in the private structure. */
19333 if (uses_apcs_26
) flags
|= F_APCS26
;
19334 if (support_interwork
) flags
|= F_INTERWORK
;
19335 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
19336 if (pic_code
) flags
|= F_PIC
;
19337 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
19338 flags
|= F_SOFT_FLOAT
;
19340 switch (mfloat_abi_opt
)
19342 case ARM_FLOAT_ABI_SOFT
:
19343 case ARM_FLOAT_ABI_SOFTFP
:
19344 flags
|= F_SOFT_FLOAT
;
19347 case ARM_FLOAT_ABI_HARD
:
19348 if (flags
& F_SOFT_FLOAT
)
19349 as_bad (_("hard-float conflicts with specified fpu"));
19353 /* Using pure-endian doubles (even if soft-float). */
19354 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
19355 flags
|= F_VFP_FLOAT
;
19357 #if defined OBJ_ELF
19358 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
19359 flags
|= EF_ARM_MAVERICK_FLOAT
;
19362 case EF_ARM_EABI_VER4
:
19363 case EF_ARM_EABI_VER5
:
19364 /* No additional flags to set. */
19371 bfd_set_private_flags (stdoutput
, flags
);
19373 /* We have run out flags in the COFF header to encode the
19374 status of ATPCS support, so instead we create a dummy,
19375 empty, debug section called .arm.atpcs. */
19380 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
19384 bfd_set_section_flags
19385 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
19386 bfd_set_section_size (stdoutput
, sec
, 0);
19387 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
19393 /* Record the CPU type as well. */
19394 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
19395 mach
= bfd_mach_arm_iWMMXt2
;
19396 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
19397 mach
= bfd_mach_arm_iWMMXt
;
19398 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
19399 mach
= bfd_mach_arm_XScale
;
19400 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
19401 mach
= bfd_mach_arm_ep9312
;
19402 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
19403 mach
= bfd_mach_arm_5TE
;
19404 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
19406 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
19407 mach
= bfd_mach_arm_5T
;
19409 mach
= bfd_mach_arm_5
;
19411 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
19413 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
19414 mach
= bfd_mach_arm_4T
;
19416 mach
= bfd_mach_arm_4
;
19418 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
19419 mach
= bfd_mach_arm_3M
;
19420 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
19421 mach
= bfd_mach_arm_3
;
19422 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
19423 mach
= bfd_mach_arm_2a
;
19424 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
19425 mach
= bfd_mach_arm_2
;
19427 mach
= bfd_mach_arm_unknown
;
19429 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
19432 /* Command line processing. */
19435 Invocation line includes a switch not recognized by the base assembler.
19436 See if it's a processor-specific option.
19438 This routine is somewhat complicated by the need for backwards
19439 compatibility (since older releases of gcc can't be changed).
19440 The new options try to make the interface as compatible as
19443 New options (supported) are:
19445 -mcpu=<cpu name> Assemble for selected processor
19446 -march=<architecture name> Assemble for selected architecture
19447 -mfpu=<fpu architecture> Assemble for selected FPU.
19448 -EB/-mbig-endian Big-endian
19449 -EL/-mlittle-endian Little-endian
19450 -k Generate PIC code
19451 -mthumb Start in Thumb mode
19452 -mthumb-interwork Code supports ARM/Thumb interworking
19454 For now we will also provide support for:
19456 -mapcs-32 32-bit Program counter
19457 -mapcs-26 26-bit Program counter
19458 -macps-float Floats passed in FP registers
19459 -mapcs-reentrant Reentrant code
19461 (sometime these will probably be replaced with -mapcs=<list of options>
19462 and -matpcs=<list of options>)
19464 The remaining options are only supported for back-wards compatibility.
19465 Cpu variants, the arm part is optional:
19466 -m[arm]1 Currently not supported.
19467 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19468 -m[arm]3 Arm 3 processor
19469 -m[arm]6[xx], Arm 6 processors
19470 -m[arm]7[xx][t][[d]m] Arm 7 processors
19471 -m[arm]8[10] Arm 8 processors
19472 -m[arm]9[20][tdmi] Arm 9 processors
19473 -mstrongarm[110[0]] StrongARM processors
19474 -mxscale XScale processors
19475 -m[arm]v[2345[t[e]]] Arm architectures
19476 -mall All (except the ARM1)
19478 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19479 -mfpe-old (No float load/store multiples)
19480 -mvfpxd VFP Single precision
19482 -mno-fpu Disable all floating point instructions
19484 The following CPU names are recognized:
19485 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19486 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19487 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19488 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19489 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19490 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19491 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19495 const char * md_shortopts
= "m:k";
19497 #ifdef ARM_BI_ENDIAN
19498 #define OPTION_EB (OPTION_MD_BASE + 0)
19499 #define OPTION_EL (OPTION_MD_BASE + 1)
19501 #if TARGET_BYTES_BIG_ENDIAN
19502 #define OPTION_EB (OPTION_MD_BASE + 0)
19504 #define OPTION_EL (OPTION_MD_BASE + 1)
19508 struct option md_longopts
[] =
19511 {"EB", no_argument
, NULL
, OPTION_EB
},
19514 {"EL", no_argument
, NULL
, OPTION_EL
},
19516 {NULL
, no_argument
, NULL
, 0}
19519 size_t md_longopts_size
= sizeof (md_longopts
);
19521 struct arm_option_table
19523 char *option
; /* Option name to match. */
19524 char *help
; /* Help information. */
19525 int *var
; /* Variable to change. */
19526 int value
; /* What to change it to. */
19527 char *deprecated
; /* If non-null, print this message. */
19530 struct arm_option_table arm_opts
[] =
19532 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
19533 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
19534 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19535 &support_interwork
, 1, NULL
},
19536 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
19537 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
19538 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
19540 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
19541 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
19542 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
19543 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
19546 /* These are recognized by the assembler, but have no affect on code. */
19547 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
19548 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
19549 {NULL
, NULL
, NULL
, 0, NULL
}
19552 struct arm_legacy_option_table
19554 char *option
; /* Option name to match. */
19555 const arm_feature_set
**var
; /* Variable to change. */
19556 const arm_feature_set value
; /* What to change it to. */
19557 char *deprecated
; /* If non-null, print this message. */
19560 const struct arm_legacy_option_table arm_legacy_opts
[] =
19562 /* DON'T add any new processors to this list -- we want the whole list
19563 to go away... Add them to the processors table instead. */
19564 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
19565 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
19566 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
19567 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
19568 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
19569 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
19570 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
19571 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
19572 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
19573 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
19574 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
19575 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
19576 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
19577 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
19578 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
19579 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
19580 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
19581 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
19582 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
19583 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
19584 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
19585 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
19586 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
19587 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
19588 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
19589 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
19590 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
19591 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
19592 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
19593 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
19594 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
19595 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
19596 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
19597 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
19598 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
19599 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
19600 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
19601 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
19602 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
19603 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
19604 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
19605 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
19606 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
19607 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
19608 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
19609 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
19610 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19611 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19612 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19613 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
19614 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
19615 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
19616 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
19617 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
19618 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
19619 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
19620 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
19621 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
19622 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
19623 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
19624 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
19625 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
19626 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
19627 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
19628 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
19629 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
19630 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
19631 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
19632 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
19633 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
19634 N_("use -mcpu=strongarm110")},
19635 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
19636 N_("use -mcpu=strongarm1100")},
19637 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
19638 N_("use -mcpu=strongarm1110")},
19639 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
19640 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
19641 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
19643 /* Architecture variants -- don't add any more to this list either. */
19644 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
19645 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
19646 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
19647 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
19648 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
19649 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
19650 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
19651 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
19652 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
19653 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
19654 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
19655 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
19656 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
19657 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
19658 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
19659 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
19660 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
19661 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
19663 /* Floating point variants -- don't add any more to this list either. */
19664 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
19665 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
19666 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
19667 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
19668 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19670 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
19673 struct arm_cpu_option_table
19676 const arm_feature_set value
;
19677 /* For some CPUs we assume an FPU unless the user explicitly sets
19679 const arm_feature_set default_fpu
;
19680 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19682 const char *canonical_name
;
19685 /* This list should, at a minimum, contain all the cpu names
19686 recognized by GCC. */
19687 static const struct arm_cpu_option_table arm_cpus
[] =
19689 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
19690 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
19691 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
19692 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
19693 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
19694 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19695 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19696 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19697 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19698 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19699 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19700 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
19701 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19702 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
19703 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19704 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
19705 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19706 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19707 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19708 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19709 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19710 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19711 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19712 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19713 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19714 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19715 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19716 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
19717 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19718 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19719 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19720 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19721 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19722 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19723 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19724 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19725 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19726 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
19727 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19728 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
19729 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19730 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19731 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19732 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
19733 /* For V5 or later processors we default to using VFP; but the user
19734 should really set the FPU type explicitly. */
19735 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
19736 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19737 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
19738 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
19739 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
19740 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
19741 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
19742 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19743 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
19744 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
19745 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19746 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19747 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
19748 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
19749 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19750 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
19751 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
19752 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19753 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
19754 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
19755 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
19756 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
19757 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
19758 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
19759 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
19760 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
19761 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
19762 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
19763 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
19764 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
19765 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
19766 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
19767 | FPU_NEON_EXT_V1
),
19769 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
19770 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
19771 /* ??? XSCALE is really an architecture. */
19772 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
19773 /* ??? iwmmxt is not a processor. */
19774 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
19775 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
19776 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
19778 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
19779 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
19782 struct arm_arch_option_table
19785 const arm_feature_set value
;
19786 const arm_feature_set default_fpu
;
19789 /* This list should, at a minimum, contain all the architecture names
19790 recognized by GCC. */
19791 static const struct arm_arch_option_table arm_archs
[] =
19793 {"all", ARM_ANY
, FPU_ARCH_FPA
},
19794 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
19795 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
19796 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
19797 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
19798 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
19799 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
19800 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
19801 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
19802 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
19803 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
19804 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
19805 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
19806 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
19807 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
19808 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
19809 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
19810 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
19811 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
19812 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
19813 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
19814 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
19815 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
19816 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
19817 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
19818 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
19819 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
19820 /* The official spelling of the ARMv7 profile variants is the dashed form.
19821 Accept the non-dashed form for compatibility with old toolchains. */
19822 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
19823 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
19824 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
19825 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
19826 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
19827 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
19828 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
19829 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
19830 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
19831 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
19834 /* ISA extensions in the co-processor space. */
19835 struct arm_option_cpu_value_table
19838 const arm_feature_set value
;
19841 static const struct arm_option_cpu_value_table arm_extensions
[] =
19843 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
19844 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
19845 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
19846 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
19847 {NULL
, ARM_ARCH_NONE
}
19850 /* This list should, at a minimum, contain all the fpu names
19851 recognized by GCC. */
19852 static const struct arm_option_cpu_value_table arm_fpus
[] =
19854 {"softfpa", FPU_NONE
},
19855 {"fpe", FPU_ARCH_FPE
},
19856 {"fpe2", FPU_ARCH_FPE
},
19857 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
19858 {"fpa", FPU_ARCH_FPA
},
19859 {"fpa10", FPU_ARCH_FPA
},
19860 {"fpa11", FPU_ARCH_FPA
},
19861 {"arm7500fe", FPU_ARCH_FPA
},
19862 {"softvfp", FPU_ARCH_VFP
},
19863 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
19864 {"vfp", FPU_ARCH_VFP_V2
},
19865 {"vfp9", FPU_ARCH_VFP_V2
},
19866 {"vfp3", FPU_ARCH_VFP_V3
},
19867 {"vfp10", FPU_ARCH_VFP_V2
},
19868 {"vfp10-r0", FPU_ARCH_VFP_V1
},
19869 {"vfpxd", FPU_ARCH_VFP_V1xD
},
19870 {"arm1020t", FPU_ARCH_VFP_V1
},
19871 {"arm1020e", FPU_ARCH_VFP_V2
},
19872 {"arm1136jfs", FPU_ARCH_VFP_V2
},
19873 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
19874 {"maverick", FPU_ARCH_MAVERICK
},
19875 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
19876 {NULL
, ARM_ARCH_NONE
}
19879 struct arm_option_value_table
19885 static const struct arm_option_value_table arm_float_abis
[] =
19887 {"hard", ARM_FLOAT_ABI_HARD
},
19888 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
19889 {"soft", ARM_FLOAT_ABI_SOFT
},
19894 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19895 static const struct arm_option_value_table arm_eabis
[] =
19897 {"gnu", EF_ARM_EABI_UNKNOWN
},
19898 {"4", EF_ARM_EABI_VER4
},
19899 {"5", EF_ARM_EABI_VER5
},
19904 struct arm_long_option_table
19906 char * option
; /* Substring to match. */
19907 char * help
; /* Help information. */
19908 int (* func
) (char * subopt
); /* Function to decode sub-option. */
19909 char * deprecated
; /* If non-null, print this message. */
19913 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
19915 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
19917 /* Copy the feature set, so that we can modify it. */
19918 *ext_set
= **opt_p
;
19921 while (str
!= NULL
&& *str
!= 0)
19923 const struct arm_option_cpu_value_table
* opt
;
19929 as_bad (_("invalid architectural extension"));
19934 ext
= strchr (str
, '+');
19937 optlen
= ext
- str
;
19939 optlen
= strlen (str
);
19943 as_bad (_("missing architectural extension"));
19947 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
19948 if (strncmp (opt
->name
, str
, optlen
) == 0)
19950 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
19954 if (opt
->name
== NULL
)
19956 as_bad (_("unknown architectural extnsion `%s'"), str
);
19967 arm_parse_cpu (char * str
)
19969 const struct arm_cpu_option_table
* opt
;
19970 char * ext
= strchr (str
, '+');
19974 optlen
= ext
- str
;
19976 optlen
= strlen (str
);
19980 as_bad (_("missing cpu name `%s'"), str
);
19984 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
19985 if (strncmp (opt
->name
, str
, optlen
) == 0)
19987 mcpu_cpu_opt
= &opt
->value
;
19988 mcpu_fpu_opt
= &opt
->default_fpu
;
19989 if (opt
->canonical_name
)
19990 strcpy(selected_cpu_name
, opt
->canonical_name
);
19994 for (i
= 0; i
< optlen
; i
++)
19995 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
19996 selected_cpu_name
[i
] = 0;
20000 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
20005 as_bad (_("unknown cpu `%s'"), str
);
20010 arm_parse_arch (char * str
)
20012 const struct arm_arch_option_table
*opt
;
20013 char *ext
= strchr (str
, '+');
20017 optlen
= ext
- str
;
20019 optlen
= strlen (str
);
20023 as_bad (_("missing architecture name `%s'"), str
);
20027 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
20028 if (streq (opt
->name
, str
))
20030 march_cpu_opt
= &opt
->value
;
20031 march_fpu_opt
= &opt
->default_fpu
;
20032 strcpy(selected_cpu_name
, opt
->name
);
20035 return arm_parse_extension (ext
, &march_cpu_opt
);
20040 as_bad (_("unknown architecture `%s'\n"), str
);
20045 arm_parse_fpu (char * str
)
20047 const struct arm_option_cpu_value_table
* opt
;
20049 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
20050 if (streq (opt
->name
, str
))
20052 mfpu_opt
= &opt
->value
;
20056 as_bad (_("unknown floating point format `%s'\n"), str
);
20061 arm_parse_float_abi (char * str
)
20063 const struct arm_option_value_table
* opt
;
20065 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
20066 if (streq (opt
->name
, str
))
20068 mfloat_abi_opt
= opt
->value
;
20072 as_bad (_("unknown floating point abi `%s'\n"), str
);
20078 arm_parse_eabi (char * str
)
20080 const struct arm_option_value_table
*opt
;
20082 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
20083 if (streq (opt
->name
, str
))
20085 meabi_flags
= opt
->value
;
20088 as_bad (_("unknown EABI `%s'\n"), str
);
20093 struct arm_long_option_table arm_long_opts
[] =
20095 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
20096 arm_parse_cpu
, NULL
},
20097 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
20098 arm_parse_arch
, NULL
},
20099 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
20100 arm_parse_fpu
, NULL
},
20101 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
20102 arm_parse_float_abi
, NULL
},
20104 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
20105 arm_parse_eabi
, NULL
},
20107 {NULL
, NULL
, 0, NULL
}
20111 md_parse_option (int c
, char * arg
)
20113 struct arm_option_table
*opt
;
20114 const struct arm_legacy_option_table
*fopt
;
20115 struct arm_long_option_table
*lopt
;
20121 target_big_endian
= 1;
20127 target_big_endian
= 0;
20132 /* Listing option. Just ignore these, we don't support additional
20137 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
20139 if (c
== opt
->option
[0]
20140 && ((arg
== NULL
&& opt
->option
[1] == 0)
20141 || streq (arg
, opt
->option
+ 1)))
20143 #if WARN_DEPRECATED
20144 /* If the option is deprecated, tell the user. */
20145 if (opt
->deprecated
!= NULL
)
20146 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
20147 arg
? arg
: "", _(opt
->deprecated
));
20150 if (opt
->var
!= NULL
)
20151 *opt
->var
= opt
->value
;
20157 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
20159 if (c
== fopt
->option
[0]
20160 && ((arg
== NULL
&& fopt
->option
[1] == 0)
20161 || streq (arg
, fopt
->option
+ 1)))
20163 #if WARN_DEPRECATED
20164 /* If the option is deprecated, tell the user. */
20165 if (fopt
->deprecated
!= NULL
)
20166 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
20167 arg
? arg
: "", _(fopt
->deprecated
));
20170 if (fopt
->var
!= NULL
)
20171 *fopt
->var
= &fopt
->value
;
20177 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
20179 /* These options are expected to have an argument. */
20180 if (c
== lopt
->option
[0]
20182 && strncmp (arg
, lopt
->option
+ 1,
20183 strlen (lopt
->option
+ 1)) == 0)
20185 #if WARN_DEPRECATED
20186 /* If the option is deprecated, tell the user. */
20187 if (lopt
->deprecated
!= NULL
)
20188 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
20189 _(lopt
->deprecated
));
20192 /* Call the sup-option parser. */
20193 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
20204 md_show_usage (FILE * fp
)
20206 struct arm_option_table
*opt
;
20207 struct arm_long_option_table
*lopt
;
20209 fprintf (fp
, _(" ARM-specific assembler options:\n"));
20211 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
20212 if (opt
->help
!= NULL
)
20213 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
20215 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
20216 if (lopt
->help
!= NULL
)
20217 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
20221 -EB assemble code for a big-endian cpu\n"));
20226 -EL assemble code for a little-endian cpu\n"));
20235 arm_feature_set flags
;
20236 } cpu_arch_ver_table
;
20238 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20239 least features first. */
20240 static const cpu_arch_ver_table cpu_arch_ver
[] =
20245 {4, ARM_ARCH_V5TE
},
20246 {5, ARM_ARCH_V5TEJ
},
20250 {9, ARM_ARCH_V6T2
},
20251 {10, ARM_ARCH_V7A
},
20252 {10, ARM_ARCH_V7R
},
20253 {10, ARM_ARCH_V7M
},
20257 /* Set the public EABI object attributes. */
20259 aeabi_set_public_attributes (void)
20262 arm_feature_set flags
;
20263 arm_feature_set tmp
;
20264 const cpu_arch_ver_table
*p
;
20266 /* Choose the architecture based on the capabilities of the requested cpu
20267 (if any) and/or the instructions actually used. */
20268 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
20269 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
20270 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
20271 /*Allow the user to override the reported architecture. */
20274 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
20275 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
20280 for (p
= cpu_arch_ver
; p
->val
; p
++)
20282 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
20285 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
20289 /* Tag_CPU_name. */
20290 if (selected_cpu_name
[0])
20294 p
= selected_cpu_name
;
20295 if (strncmp(p
, "armv", 4) == 0)
20300 for (i
= 0; p
[i
]; i
++)
20301 p
[i
] = TOUPPER (p
[i
]);
20303 elf32_arm_add_eabi_attr_string (stdoutput
, 5, p
);
20305 /* Tag_CPU_arch. */
20306 elf32_arm_add_eabi_attr_int (stdoutput
, 6, arch
);
20307 /* Tag_CPU_arch_profile. */
20308 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
20309 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'A');
20310 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
20311 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'R');
20312 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
))
20313 elf32_arm_add_eabi_attr_int (stdoutput
, 7, 'M');
20314 /* Tag_ARM_ISA_use. */
20315 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_full
))
20316 elf32_arm_add_eabi_attr_int (stdoutput
, 8, 1);
20317 /* Tag_THUMB_ISA_use. */
20318 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_full
))
20319 elf32_arm_add_eabi_attr_int (stdoutput
, 9,
20320 ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
) ? 2 : 1);
20321 /* Tag_VFP_arch. */
20322 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v3
)
20323 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v3
))
20324 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 3);
20325 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v2
)
20326 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v2
))
20327 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 2);
20328 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1
)
20329 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1
)
20330 || ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_vfp_ext_v1xd
)
20331 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_vfp_ext_v1xd
))
20332 elf32_arm_add_eabi_attr_int (stdoutput
, 10, 1);
20333 /* Tag_WMMX_arch. */
20334 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_cext_iwmmxt
)
20335 || ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_cext_iwmmxt
))
20336 elf32_arm_add_eabi_attr_int (stdoutput
, 11, 1);
20337 /* Tag_NEON_arch. */
20338 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, fpu_neon_ext_v1
)
20339 || ARM_CPU_HAS_FEATURE (arm_arch_used
, fpu_neon_ext_v1
))
20340 elf32_arm_add_eabi_attr_int (stdoutput
, 12, 1);
20343 /* Add the .ARM.attributes section. */
20352 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
20355 aeabi_set_public_attributes ();
20356 size
= elf32_arm_eabi_attr_size (stdoutput
);
20357 s
= subseg_new (".ARM.attributes", 0);
20358 bfd_set_section_flags (stdoutput
, s
, SEC_READONLY
| SEC_DATA
);
20359 addr
= frag_now_fix ();
20360 p
= frag_more (size
);
20361 elf32_arm_set_eabi_attr_contents (stdoutput
, (bfd_byte
*)p
, size
);
20363 #endif /* OBJ_ELF */
20366 /* Parse a .cpu directive. */
20369 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
20371 const struct arm_cpu_option_table
*opt
;
20375 name
= input_line_pointer
;
20376 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20377 input_line_pointer
++;
20378 saved_char
= *input_line_pointer
;
20379 *input_line_pointer
= 0;
20381 /* Skip the first "all" entry. */
20382 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
20383 if (streq (opt
->name
, name
))
20385 mcpu_cpu_opt
= &opt
->value
;
20386 selected_cpu
= opt
->value
;
20387 if (opt
->canonical_name
)
20388 strcpy(selected_cpu_name
, opt
->canonical_name
);
20392 for (i
= 0; opt
->name
[i
]; i
++)
20393 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
20394 selected_cpu_name
[i
] = 0;
20396 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20397 *input_line_pointer
= saved_char
;
20398 demand_empty_rest_of_line ();
20401 as_bad (_("unknown cpu `%s'"), name
);
20402 *input_line_pointer
= saved_char
;
20403 ignore_rest_of_line ();
20407 /* Parse a .arch directive. */
20410 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
20412 const struct arm_arch_option_table
*opt
;
20416 name
= input_line_pointer
;
20417 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20418 input_line_pointer
++;
20419 saved_char
= *input_line_pointer
;
20420 *input_line_pointer
= 0;
20422 /* Skip the first "all" entry. */
20423 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
20424 if (streq (opt
->name
, name
))
20426 mcpu_cpu_opt
= &opt
->value
;
20427 selected_cpu
= opt
->value
;
20428 strcpy(selected_cpu_name
, opt
->name
);
20429 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20430 *input_line_pointer
= saved_char
;
20431 demand_empty_rest_of_line ();
20435 as_bad (_("unknown architecture `%s'\n"), name
);
20436 *input_line_pointer
= saved_char
;
20437 ignore_rest_of_line ();
20441 /* Parse a .object_arch directive. */
20444 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
20446 const struct arm_arch_option_table
*opt
;
20450 name
= input_line_pointer
;
20451 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20452 input_line_pointer
++;
20453 saved_char
= *input_line_pointer
;
20454 *input_line_pointer
= 0;
20456 /* Skip the first "all" entry. */
20457 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
20458 if (streq (opt
->name
, name
))
20460 object_arch
= &opt
->value
;
20461 *input_line_pointer
= saved_char
;
20462 demand_empty_rest_of_line ();
20466 as_bad (_("unknown architecture `%s'\n"), name
);
20467 *input_line_pointer
= saved_char
;
20468 ignore_rest_of_line ();
20472 /* Parse a .fpu directive. */
20475 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
20477 const struct arm_option_cpu_value_table
*opt
;
20481 name
= input_line_pointer
;
20482 while (*input_line_pointer
&& !ISSPACE(*input_line_pointer
))
20483 input_line_pointer
++;
20484 saved_char
= *input_line_pointer
;
20485 *input_line_pointer
= 0;
20487 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
20488 if (streq (opt
->name
, name
))
20490 mfpu_opt
= &opt
->value
;
20491 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20492 *input_line_pointer
= saved_char
;
20493 demand_empty_rest_of_line ();
20497 as_bad (_("unknown floating point format `%s'\n"), name
);
20498 *input_line_pointer
= saved_char
;
20499 ignore_rest_of_line ();