1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
54 symbolS
* table_entry
;
55 symbolS
* personality_routine
;
56 int personality_index
;
57 /* The segment containing the function. */
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes
;
64 /* The number of bytes pushed to the stack. */
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset
;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
74 /* Nonzero if an unwind_setfp directive has been seen. */
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored
:1;
80 /* Bit N indicates that an R_ARM_NONE relocation has been output for
81 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
82 emitted only once per section, to save unnecessary bloat. */
83 static unsigned int marked_pr_dependency
= 0;
87 /* Results from operand parsing worker functions. */
91 PARSE_OPERAND_SUCCESS
,
93 PARSE_OPERAND_FAIL_NO_BACKTRACK
94 } parse_operand_result
;
103 /* Types of processor to assemble for. */
105 #if defined __XSCALE__
106 #define CPU_DEFAULT ARM_ARCH_XSCALE
108 #if defined __thumb__
109 #define CPU_DEFAULT ARM_ARCH_V5T
116 # define FPU_DEFAULT FPU_ARCH_FPA
117 # elif defined (TE_NetBSD)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
121 /* Legacy a.out format. */
122 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
124 # elif defined (TE_VXWORKS)
125 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
127 /* For backwards compatibility, default to FPA. */
128 # define FPU_DEFAULT FPU_ARCH_FPA
130 #endif /* ifndef FPU_DEFAULT */
132 #define streq(a, b) (strcmp (a, b) == 0)
134 static arm_feature_set cpu_variant
;
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
149 /* Variables that we set while parsing command-line options. Once all
150 options have been read we re-process these values to set the real
152 static const arm_feature_set
*legacy_cpu
= NULL
;
153 static const arm_feature_set
*legacy_fpu
= NULL
;
155 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
156 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
157 static const arm_feature_set
*march_cpu_opt
= NULL
;
158 static const arm_feature_set
*march_fpu_opt
= NULL
;
159 static const arm_feature_set
*mfpu_opt
= NULL
;
160 static const arm_feature_set
*object_arch
= NULL
;
162 /* Constants for known architecture features. */
163 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
164 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
165 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
166 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
167 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
168 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
169 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
170 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
171 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
174 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
177 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
178 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
179 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
180 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
181 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
182 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
183 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
184 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
185 static const arm_feature_set arm_ext_v4t_5
=
186 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
187 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
188 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
189 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
190 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
191 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
192 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
193 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
194 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
195 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
196 static const arm_feature_set arm_ext_barrier
= ARM_FEATURE (ARM_EXT_BARRIER
, 0);
197 static const arm_feature_set arm_ext_msr
= ARM_FEATURE (ARM_EXT_THUMB_MSR
, 0);
198 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
199 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
200 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
201 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
202 static const arm_feature_set arm_ext_m
=
203 ARM_FEATURE (ARM_EXT_V6M
| ARM_EXT_V7M
, 0);
205 static const arm_feature_set arm_arch_any
= ARM_ANY
;
206 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
207 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
208 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
210 static const arm_feature_set arm_cext_iwmmxt2
=
211 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
212 static const arm_feature_set arm_cext_iwmmxt
=
213 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
214 static const arm_feature_set arm_cext_xscale
=
215 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
216 static const arm_feature_set arm_cext_maverick
=
217 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
218 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
219 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
220 static const arm_feature_set fpu_vfp_ext_v1xd
=
221 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
222 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
223 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
224 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
225 static const arm_feature_set fpu_vfp_ext_d32
=
226 ARM_FEATURE (0, FPU_VFP_EXT_D32
);
227 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
228 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
229 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
230 static const arm_feature_set fpu_neon_fp16
= ARM_FEATURE (0, FPU_NEON_FP16
);
232 static int mfloat_abi_opt
= -1;
233 /* Record user cpu selection for object attributes. */
234 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
235 /* Must be long enough to hold any of the names in arm_cpus. */
236 static char selected_cpu_name
[16];
239 static int meabi_flags
= EABI_DEFAULT
;
241 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
244 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
249 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
254 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
255 symbolS
* GOT_symbol
;
258 /* 0: assemble for ARM,
259 1: assemble for Thumb,
260 2: assemble for Thumb even though target CPU does not support thumb
262 static int thumb_mode
= 0;
263 /* A value distinct from the possible values for thumb_mode that we
264 can use to record whether thumb_mode has been copied into the
265 tc_frag_data field of a frag. */
266 #define MODE_RECORDED (1 << 4)
268 /* If unified_syntax is true, we are processing the new unified
269 ARM/Thumb syntax. Important differences from the old ARM mode:
271 - Immediate operands do not require a # prefix.
272 - Conditional affixes always appear at the end of the
273 instruction. (For backward compatibility, those instructions
274 that formerly had them in the middle, continue to accept them
276 - The IT instruction may appear, and if it does is validated
277 against subsequent conditional affixes. It does not generate
280 Important differences from the old Thumb mode:
282 - Immediate operands do not require a # prefix.
283 - Most of the V6T2 instructions are only available in unified mode.
284 - The .N and .W suffixes are recognized and honored (it is an error
285 if they cannot be honored).
286 - All instructions set the flags if and only if they have an 's' affix.
287 - Conditional affixes may be used. They are validated against
288 preceding IT instructions. Unlike ARM mode, you cannot use a
289 conditional affix except in the scope of an IT instruction. */
291 static bfd_boolean unified_syntax
= FALSE
;
306 enum neon_el_type type
;
310 #define NEON_MAX_TYPE_ELS 4
314 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
321 unsigned long instruction
;
325 /* "uncond_value" is set to the value in place of the conditional field in
326 unconditional versions of the instruction, or -1 if nothing is
329 struct neon_type vectype
;
330 /* Set to the opcode if the instruction needs relaxation.
331 Zero if the instruction is not relaxed. */
335 bfd_reloc_code_real_type type
;
344 struct neon_type_el vectype
;
345 unsigned present
: 1; /* Operand present. */
346 unsigned isreg
: 1; /* Operand was a register. */
347 unsigned immisreg
: 1; /* .imm field is a second register. */
348 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
349 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
350 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
351 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
352 instructions. This allows us to disambiguate ARM <-> vector insns. */
353 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
354 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
355 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
356 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
357 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
358 unsigned writeback
: 1; /* Operand has trailing ! */
359 unsigned preind
: 1; /* Preindexed address. */
360 unsigned postind
: 1; /* Postindexed address. */
361 unsigned negative
: 1; /* Index register was negated. */
362 unsigned shifted
: 1; /* Shift applied to operation. */
363 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
367 static struct arm_it inst
;
369 #define NUM_FLOAT_VALS 8
371 const char * fp_const
[] =
373 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
376 /* Number of littlenums required to hold an extended precision number. */
377 #define MAX_LITTLENUMS 6
379 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
389 #define CP_T_X 0x00008000
390 #define CP_T_Y 0x00400000
392 #define CONDS_BIT 0x00100000
393 #define LOAD_BIT 0x00100000
395 #define DOUBLE_LOAD_FLAG 0x00000001
399 const char * template;
403 #define COND_ALWAYS 0xE
407 const char *template;
411 struct asm_barrier_opt
413 const char *template;
417 /* The bit that distinguishes CPSR and SPSR. */
418 #define SPSR_BIT (1 << 22)
420 /* The individual PSR flag bits. */
421 #define PSR_c (1 << 16)
422 #define PSR_x (1 << 17)
423 #define PSR_s (1 << 18)
424 #define PSR_f (1 << 19)
429 bfd_reloc_code_real_type reloc
;
434 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
435 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
440 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
443 /* Bits for DEFINED field in neon_typed_alias. */
444 #define NTA_HASTYPE 1
445 #define NTA_HASINDEX 2
447 struct neon_typed_alias
449 unsigned char defined
;
451 struct neon_type_el eltype
;
454 /* ARM register categories. This includes coprocessor numbers and various
455 architecture extensions' registers. */
481 /* Structure for a hash table entry for a register.
482 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
483 information which states whether a vector type or index is specified (for a
484 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
488 unsigned char number
;
490 unsigned char builtin
;
491 struct neon_typed_alias
*neon
;
494 /* Diagnostics used when we don't get a register of the expected type. */
495 const char *const reg_expected_msgs
[] =
497 N_("ARM register expected"),
498 N_("bad or missing co-processor number"),
499 N_("co-processor register expected"),
500 N_("FPA register expected"),
501 N_("VFP single precision register expected"),
502 N_("VFP/Neon double precision register expected"),
503 N_("Neon quad precision register expected"),
504 N_("VFP single or double precision register expected"),
505 N_("Neon double or quad precision register expected"),
506 N_("VFP single, double or Neon quad precision register expected"),
507 N_("VFP system register expected"),
508 N_("Maverick MVF register expected"),
509 N_("Maverick MVD register expected"),
510 N_("Maverick MVFX register expected"),
511 N_("Maverick MVDX register expected"),
512 N_("Maverick MVAX register expected"),
513 N_("Maverick DSPSC register expected"),
514 N_("iWMMXt data register expected"),
515 N_("iWMMXt control register expected"),
516 N_("iWMMXt scalar register expected"),
517 N_("XScale accumulator register expected"),
520 /* Some well known registers that we refer to directly elsewhere. */
525 /* ARM instructions take 4bytes in the object file, Thumb instructions
531 /* Basic string to match. */
532 const char *template;
534 /* Parameters to instruction. */
535 unsigned char operands
[8];
537 /* Conditional tag - see opcode_lookup. */
538 unsigned int tag
: 4;
540 /* Basic instruction code. */
541 unsigned int avalue
: 28;
543 /* Thumb-format instruction code. */
546 /* Which architecture variant provides this instruction. */
547 const arm_feature_set
*avariant
;
548 const arm_feature_set
*tvariant
;
550 /* Function to call to encode instruction in ARM format. */
551 void (* aencode
) (void);
553 /* Function to call to encode instruction in Thumb format. */
554 void (* tencode
) (void);
557 /* Defines for various bits that we will want to toggle. */
558 #define INST_IMMEDIATE 0x02000000
559 #define OFFSET_REG 0x02000000
560 #define HWOFFSET_IMM 0x00400000
561 #define SHIFT_BY_REG 0x00000010
562 #define PRE_INDEX 0x01000000
563 #define INDEX_UP 0x00800000
564 #define WRITE_BACK 0x00200000
565 #define LDM_TYPE_2_OR_3 0x00400000
566 #define CPSI_MMOD 0x00020000
568 #define LITERAL_MASK 0xf000f000
569 #define OPCODE_MASK 0xfe1fffff
570 #define V4_STR_BIT 0x00000020
572 #define T2_SUBS_PC_LR 0xf3de8f00
574 #define DATA_OP_SHIFT 21
576 #define T2_OPCODE_MASK 0xfe1fffff
577 #define T2_DATA_OP_SHIFT 21
579 /* Codes to distinguish the arithmetic instructions. */
590 #define OPCODE_CMP 10
591 #define OPCODE_CMN 11
592 #define OPCODE_ORR 12
593 #define OPCODE_MOV 13
594 #define OPCODE_BIC 14
595 #define OPCODE_MVN 15
597 #define T2_OPCODE_AND 0
598 #define T2_OPCODE_BIC 1
599 #define T2_OPCODE_ORR 2
600 #define T2_OPCODE_ORN 3
601 #define T2_OPCODE_EOR 4
602 #define T2_OPCODE_ADD 8
603 #define T2_OPCODE_ADC 10
604 #define T2_OPCODE_SBC 11
605 #define T2_OPCODE_SUB 13
606 #define T2_OPCODE_RSB 14
608 #define T_OPCODE_MUL 0x4340
609 #define T_OPCODE_TST 0x4200
610 #define T_OPCODE_CMN 0x42c0
611 #define T_OPCODE_NEG 0x4240
612 #define T_OPCODE_MVN 0x43c0
614 #define T_OPCODE_ADD_R3 0x1800
615 #define T_OPCODE_SUB_R3 0x1a00
616 #define T_OPCODE_ADD_HI 0x4400
617 #define T_OPCODE_ADD_ST 0xb000
618 #define T_OPCODE_SUB_ST 0xb080
619 #define T_OPCODE_ADD_SP 0xa800
620 #define T_OPCODE_ADD_PC 0xa000
621 #define T_OPCODE_ADD_I8 0x3000
622 #define T_OPCODE_SUB_I8 0x3800
623 #define T_OPCODE_ADD_I3 0x1c00
624 #define T_OPCODE_SUB_I3 0x1e00
626 #define T_OPCODE_ASR_R 0x4100
627 #define T_OPCODE_LSL_R 0x4080
628 #define T_OPCODE_LSR_R 0x40c0
629 #define T_OPCODE_ROR_R 0x41c0
630 #define T_OPCODE_ASR_I 0x1000
631 #define T_OPCODE_LSL_I 0x0000
632 #define T_OPCODE_LSR_I 0x0800
634 #define T_OPCODE_MOV_I8 0x2000
635 #define T_OPCODE_CMP_I8 0x2800
636 #define T_OPCODE_CMP_LR 0x4280
637 #define T_OPCODE_MOV_HR 0x4600
638 #define T_OPCODE_CMP_HR 0x4500
640 #define T_OPCODE_LDR_PC 0x4800
641 #define T_OPCODE_LDR_SP 0x9800
642 #define T_OPCODE_STR_SP 0x9000
643 #define T_OPCODE_LDR_IW 0x6800
644 #define T_OPCODE_STR_IW 0x6000
645 #define T_OPCODE_LDR_IH 0x8800
646 #define T_OPCODE_STR_IH 0x8000
647 #define T_OPCODE_LDR_IB 0x7800
648 #define T_OPCODE_STR_IB 0x7000
649 #define T_OPCODE_LDR_RW 0x5800
650 #define T_OPCODE_STR_RW 0x5000
651 #define T_OPCODE_LDR_RH 0x5a00
652 #define T_OPCODE_STR_RH 0x5200
653 #define T_OPCODE_LDR_RB 0x5c00
654 #define T_OPCODE_STR_RB 0x5400
656 #define T_OPCODE_PUSH 0xb400
657 #define T_OPCODE_POP 0xbc00
659 #define T_OPCODE_BRANCH 0xe000
661 #define THUMB_SIZE 2 /* Size of thumb instruction. */
662 #define THUMB_PP_PC_LR 0x0100
663 #define THUMB_LOAD_BIT 0x0800
664 #define THUMB2_LOAD_BIT 0x00100000
666 #define BAD_ARGS _("bad arguments to instruction")
667 #define BAD_SP _("r13 not allowed here")
668 #define BAD_PC _("r15 not allowed here")
669 #define BAD_COND _("instruction cannot be conditional")
670 #define BAD_OVERLAP _("registers may not be the same")
671 #define BAD_HIREG _("lo register required")
672 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
673 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
674 #define BAD_BRANCH _("branch must be last instruction in IT block")
675 #define BAD_NOT_IT _("instruction not allowed in IT block")
676 #define BAD_FPU _("selected FPU does not support instruction")
678 static struct hash_control
*arm_ops_hsh
;
679 static struct hash_control
*arm_cond_hsh
;
680 static struct hash_control
*arm_shift_hsh
;
681 static struct hash_control
*arm_psr_hsh
;
682 static struct hash_control
*arm_v7m_psr_hsh
;
683 static struct hash_control
*arm_reg_hsh
;
684 static struct hash_control
*arm_reloc_hsh
;
685 static struct hash_control
*arm_barrier_opt_hsh
;
687 /* Stuff needed to resolve the label ambiguity
696 symbolS
* last_label_seen
;
697 static int label_is_thumb_function_name
= FALSE
;
699 /* Literal pool structure. Held on a per-section
700 and per-sub-section basis. */
702 #define MAX_LITERAL_POOL_SIZE 1024
703 typedef struct literal_pool
705 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
706 unsigned int next_free_entry
;
711 struct literal_pool
* next
;
714 /* Pointer to a linked list of literal pools. */
715 literal_pool
* list_of_pools
= NULL
;
717 /* State variables for IT block handling. */
718 static bfd_boolean current_it_mask
= 0;
719 static int current_cc
;
723 /* This array holds the chars that always start a comment. If the
724 pre-processor is disabled, these aren't very useful. */
725 const char comment_chars
[] = "@";
727 /* This array holds the chars that only start a comment at the beginning of
728 a line. If the line seems to have the form '# 123 filename'
729 .line and .file directives will appear in the pre-processed output. */
730 /* Note that input_file.c hand checks for '#' at the beginning of the
731 first line of the input file. This is because the compiler outputs
732 #NO_APP at the beginning of its output. */
733 /* Also note that comments like this one will always work. */
734 const char line_comment_chars
[] = "#";
736 const char line_separator_chars
[] = ";";
738 /* Chars that can be used to separate mant
739 from exp in floating point numbers. */
740 const char EXP_CHARS
[] = "eE";
742 /* Chars that mean this number is a floating point constant. */
746 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
748 /* Prefix characters that indicate the start of an immediate
750 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
752 /* Separator character handling. */
754 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
757 skip_past_char (char ** str
, char c
)
767 #define skip_past_comma(str) skip_past_char (str, ',')
769 /* Arithmetic expressions (possibly involving symbols). */
771 /* Return TRUE if anything in the expression is a bignum. */
774 walk_no_bignums (symbolS
* sp
)
776 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
779 if (symbol_get_value_expression (sp
)->X_add_symbol
)
781 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
782 || (symbol_get_value_expression (sp
)->X_op_symbol
783 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
789 static int in_my_get_expression
= 0;
791 /* Third argument to my_get_expression. */
792 #define GE_NO_PREFIX 0
793 #define GE_IMM_PREFIX 1
794 #define GE_OPT_PREFIX 2
795 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
796 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
797 #define GE_OPT_PREFIX_BIG 3
800 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
805 /* In unified syntax, all prefixes are optional. */
807 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
812 case GE_NO_PREFIX
: break;
814 if (!is_immediate_prefix (**str
))
816 inst
.error
= _("immediate expression requires a # prefix");
822 case GE_OPT_PREFIX_BIG
:
823 if (is_immediate_prefix (**str
))
829 memset (ep
, 0, sizeof (expressionS
));
831 save_in
= input_line_pointer
;
832 input_line_pointer
= *str
;
833 in_my_get_expression
= 1;
834 seg
= expression (ep
);
835 in_my_get_expression
= 0;
837 if (ep
->X_op
== O_illegal
)
839 /* We found a bad expression in md_operand(). */
840 *str
= input_line_pointer
;
841 input_line_pointer
= save_in
;
842 if (inst
.error
== NULL
)
843 inst
.error
= _("bad expression");
848 if (seg
!= absolute_section
849 && seg
!= text_section
850 && seg
!= data_section
851 && seg
!= bss_section
852 && seg
!= undefined_section
)
854 inst
.error
= _("bad segment");
855 *str
= input_line_pointer
;
856 input_line_pointer
= save_in
;
861 /* Get rid of any bignums now, so that we don't generate an error for which
862 we can't establish a line number later on. Big numbers are never valid
863 in instructions, which is where this routine is always called. */
864 if (prefix_mode
!= GE_OPT_PREFIX_BIG
865 && (ep
->X_op
== O_big
867 && (walk_no_bignums (ep
->X_add_symbol
)
869 && walk_no_bignums (ep
->X_op_symbol
))))))
871 inst
.error
= _("invalid constant");
872 *str
= input_line_pointer
;
873 input_line_pointer
= save_in
;
877 *str
= input_line_pointer
;
878 input_line_pointer
= save_in
;
882 /* Turn a string in input_line_pointer into a floating point constant
883 of type TYPE, and store the appropriate bytes in *LITP. The number
884 of LITTLENUMS emitted is stored in *SIZEP. An error message is
885 returned, or NULL on OK.
887 Note that fp constants aren't represent in the normal way on the ARM.
888 In big endian mode, things are as expected. However, in little endian
889 mode fp constants are big-endian word-wise, and little-endian byte-wise
890 within the words. For example, (double) 1.1 in big endian mode is
891 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
892 the byte sequence 99 99 f1 3f 9a 99 99 99.
894 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
897 md_atof (int type
, char * litP
, int * sizeP
)
900 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
932 return _("Unrecognized or unsupported floating point constant");
935 t
= atof_ieee (input_line_pointer
, type
, words
);
937 input_line_pointer
= t
;
938 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
940 if (target_big_endian
)
942 for (i
= 0; i
< prec
; i
++)
944 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
945 litP
+= sizeof (LITTLENUM_TYPE
);
950 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
951 for (i
= prec
- 1; i
>= 0; i
--)
953 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
954 litP
+= sizeof (LITTLENUM_TYPE
);
957 /* For a 4 byte float the order of elements in `words' is 1 0.
958 For an 8 byte float the order is 1 0 3 2. */
959 for (i
= 0; i
< prec
; i
+= 2)
961 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
962 sizeof (LITTLENUM_TYPE
));
963 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
964 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
965 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
972 /* We handle all bad expressions here, so that we can report the faulty
973 instruction in the error message. */
975 md_operand (expressionS
* expr
)
977 if (in_my_get_expression
)
978 expr
->X_op
= O_illegal
;
981 /* Immediate values. */
983 /* Generic immediate-value read function for use in directives.
984 Accepts anything that 'expression' can fold to a constant.
985 *val receives the number. */
988 immediate_for_directive (int *val
)
991 exp
.X_op
= O_illegal
;
993 if (is_immediate_prefix (*input_line_pointer
))
995 input_line_pointer
++;
999 if (exp
.X_op
!= O_constant
)
1001 as_bad (_("expected #constant"));
1002 ignore_rest_of_line ();
1005 *val
= exp
.X_add_number
;
1010 /* Register parsing. */
1012 /* Generic register parser. CCP points to what should be the
1013 beginning of a register name. If it is indeed a valid register
1014 name, advance CCP over it and return the reg_entry structure;
1015 otherwise return NULL. Does not issue diagnostics. */
1017 static struct reg_entry
*
1018 arm_reg_parse_multi (char **ccp
)
1022 struct reg_entry
*reg
;
1024 #ifdef REGISTER_PREFIX
1025 if (*start
!= REGISTER_PREFIX
)
1029 #ifdef OPTIONAL_REGISTER_PREFIX
1030 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1035 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1040 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1042 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1052 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1053 enum arm_reg_type type
)
1055 /* Alternative syntaxes are accepted for a few register classes. */
1062 /* Generic coprocessor register names are allowed for these. */
1063 if (reg
&& reg
->type
== REG_TYPE_CN
)
1068 /* For backward compatibility, a bare number is valid here. */
1070 unsigned long processor
= strtoul (start
, ccp
, 10);
1071 if (*ccp
!= start
&& processor
<= 15)
1075 case REG_TYPE_MMXWC
:
1076 /* WC includes WCG. ??? I'm not sure this is true for all
1077 instructions that take WC registers. */
1078 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1089 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1090 return value is the register number or FAIL. */
1093 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1096 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1099 /* Do not allow a scalar (reg+index) to parse as a register. */
1100 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1103 if (reg
&& reg
->type
== type
)
1106 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1113 /* Parse a Neon type specifier. *STR should point at the leading '.'
1114 character. Does no verification at this stage that the type fits the opcode
1121 Can all be legally parsed by this function.
1123 Fills in neon_type struct pointer with parsed information, and updates STR
1124 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1125 type, FAIL if not. */
1128 parse_neon_type (struct neon_type
*type
, char **str
)
1135 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1137 enum neon_el_type thistype
= NT_untyped
;
1138 unsigned thissize
= -1u;
1145 /* Just a size without an explicit type. */
1149 switch (TOLOWER (*ptr
))
1151 case 'i': thistype
= NT_integer
; break;
1152 case 'f': thistype
= NT_float
; break;
1153 case 'p': thistype
= NT_poly
; break;
1154 case 's': thistype
= NT_signed
; break;
1155 case 'u': thistype
= NT_unsigned
; break;
1157 thistype
= NT_float
;
1162 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1168 /* .f is an abbreviation for .f32. */
1169 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1174 thissize
= strtoul (ptr
, &ptr
, 10);
1176 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1179 as_bad (_("bad size %d in type specifier"), thissize
);
1187 type
->el
[type
->elems
].type
= thistype
;
1188 type
->el
[type
->elems
].size
= thissize
;
1193 /* Empty/missing type is not a successful parse. */
1194 if (type
->elems
== 0)
1202 /* Errors may be set multiple times during parsing or bit encoding
1203 (particularly in the Neon bits), but usually the earliest error which is set
1204 will be the most meaningful. Avoid overwriting it with later (cascading)
1205 errors by calling this function. */
1208 first_error (const char *err
)
1214 /* Parse a single type, e.g. ".s32", leading period included. */
1216 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1219 struct neon_type optype
;
1223 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1225 if (optype
.elems
== 1)
1226 *vectype
= optype
.el
[0];
1229 first_error (_("only one type should be specified for operand"));
1235 first_error (_("vector type expected"));
1247 /* Special meanings for indices (which have a range of 0-7), which will fit into
1250 #define NEON_ALL_LANES 15
1251 #define NEON_INTERLEAVE_LANES 14
1253 /* Parse either a register or a scalar, with an optional type. Return the
1254 register number, and optionally fill in the actual type of the register
1255 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1256 type/index information in *TYPEINFO. */
1259 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1260 enum arm_reg_type
*rtype
,
1261 struct neon_typed_alias
*typeinfo
)
1264 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1265 struct neon_typed_alias atype
;
1266 struct neon_type_el parsetype
;
1270 atype
.eltype
.type
= NT_invtype
;
1271 atype
.eltype
.size
= -1;
1273 /* Try alternate syntax for some types of register. Note these are mutually
1274 exclusive with the Neon syntax extensions. */
1277 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1285 /* Undo polymorphism when a set of register types may be accepted. */
1286 if ((type
== REG_TYPE_NDQ
1287 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1288 || (type
== REG_TYPE_VFSD
1289 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1290 || (type
== REG_TYPE_NSDQ
1291 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1292 || reg
->type
== REG_TYPE_NQ
))
1293 || (type
== REG_TYPE_MMXWC
1294 && (reg
->type
== REG_TYPE_MMXWCG
)))
1297 if (type
!= reg
->type
)
1303 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1305 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1307 first_error (_("can't redefine type for operand"));
1310 atype
.defined
|= NTA_HASTYPE
;
1311 atype
.eltype
= parsetype
;
1314 if (skip_past_char (&str
, '[') == SUCCESS
)
1316 if (type
!= REG_TYPE_VFD
)
1318 first_error (_("only D registers may be indexed"));
1322 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1324 first_error (_("can't change index for operand"));
1328 atype
.defined
|= NTA_HASINDEX
;
1330 if (skip_past_char (&str
, ']') == SUCCESS
)
1331 atype
.index
= NEON_ALL_LANES
;
1336 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1338 if (exp
.X_op
!= O_constant
)
1340 first_error (_("constant expression required"));
1344 if (skip_past_char (&str
, ']') == FAIL
)
1347 atype
.index
= exp
.X_add_number
;
1362 /* Like arm_reg_parse, but allow allow the following extra features:
1363 - If RTYPE is non-zero, return the (possibly restricted) type of the
1364 register (e.g. Neon double or quad reg when either has been requested).
1365 - If this is a Neon vector type with additional type information, fill
1366 in the struct pointed to by VECTYPE (if non-NULL).
1367 This function will fault on encountering a scalar. */
1370 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1371 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1373 struct neon_typed_alias atype
;
1375 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1380 /* Do not allow a scalar (reg+index) to parse as a register. */
1381 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1383 first_error (_("register operand expected, but got scalar"));
1388 *vectype
= atype
.eltype
;
1395 #define NEON_SCALAR_REG(X) ((X) >> 4)
1396 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1398 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1399 have enough information to be able to do a good job bounds-checking. So, we
1400 just do easy checks here, and do further checks later. */
1403 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1407 struct neon_typed_alias atype
;
1409 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1411 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1414 if (atype
.index
== NEON_ALL_LANES
)
1416 first_error (_("scalar must have an index"));
1419 else if (atype
.index
>= 64 / elsize
)
1421 first_error (_("scalar index out of range"));
1426 *type
= atype
.eltype
;
1430 return reg
* 16 + atype
.index
;
1433 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1435 parse_reg_list (char ** strp
)
1437 char * str
= * strp
;
1441 /* We come back here if we get ranges concatenated by '+' or '|'. */
1456 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1458 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1468 first_error (_("bad range in register list"));
1472 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1474 if (range
& (1 << i
))
1476 (_("Warning: duplicated register (r%d) in register list"),
1484 if (range
& (1 << reg
))
1485 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1487 else if (reg
<= cur_reg
)
1488 as_tsktsk (_("Warning: register range not in ascending order"));
1493 while (skip_past_comma (&str
) != FAIL
1494 || (in_range
= 1, *str
++ == '-'));
1499 first_error (_("missing `}'"));
1507 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1510 if (expr
.X_op
== O_constant
)
1512 if (expr
.X_add_number
1513 != (expr
.X_add_number
& 0x0000ffff))
1515 inst
.error
= _("invalid register mask");
1519 if ((range
& expr
.X_add_number
) != 0)
1521 int regno
= range
& expr
.X_add_number
;
1524 regno
= (1 << regno
) - 1;
1526 (_("Warning: duplicated register (r%d) in register list"),
1530 range
|= expr
.X_add_number
;
1534 if (inst
.reloc
.type
!= 0)
1536 inst
.error
= _("expression too complex");
1540 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1541 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1542 inst
.reloc
.pc_rel
= 0;
1546 if (*str
== '|' || *str
== '+')
1552 while (another_range
);
1558 /* Types of registers in a list. */
1567 /* Parse a VFP register list. If the string is invalid return FAIL.
1568 Otherwise return the number of registers, and set PBASE to the first
1569 register. Parses registers of type ETYPE.
1570 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1571 - Q registers can be used to specify pairs of D registers
1572 - { } can be omitted from around a singleton register list
1573 FIXME: This is not implemented, as it would require backtracking in
1576 This could be done (the meaning isn't really ambiguous), but doesn't
1577 fit in well with the current parsing framework.
1578 - 32 D registers may be used (also true for VFPv3).
1579 FIXME: Types are ignored in these register lists, which is probably a
1583 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1588 enum arm_reg_type regtype
= 0;
1592 unsigned long mask
= 0;
1597 inst
.error
= _("expecting {");
1606 regtype
= REG_TYPE_VFS
;
1611 regtype
= REG_TYPE_VFD
;
1614 case REGLIST_NEON_D
:
1615 regtype
= REG_TYPE_NDQ
;
1619 if (etype
!= REGLIST_VFP_S
)
1621 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1622 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1626 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1629 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1636 base_reg
= max_regs
;
1640 int setmask
= 1, addregs
= 1;
1642 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1644 if (new_base
== FAIL
)
1646 first_error (_(reg_expected_msgs
[regtype
]));
1650 if (new_base
>= max_regs
)
1652 first_error (_("register out of range in list"));
1656 /* Note: a value of 2 * n is returned for the register Q<n>. */
1657 if (regtype
== REG_TYPE_NQ
)
1663 if (new_base
< base_reg
)
1664 base_reg
= new_base
;
1666 if (mask
& (setmask
<< new_base
))
1668 first_error (_("invalid register list"));
1672 if ((mask
>> new_base
) != 0 && ! warned
)
1674 as_tsktsk (_("register list not in ascending order"));
1678 mask
|= setmask
<< new_base
;
1681 if (*str
== '-') /* We have the start of a range expression */
1687 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1690 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1694 if (high_range
>= max_regs
)
1696 first_error (_("register out of range in list"));
1700 if (regtype
== REG_TYPE_NQ
)
1701 high_range
= high_range
+ 1;
1703 if (high_range
<= new_base
)
1705 inst
.error
= _("register range not in ascending order");
1709 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1711 if (mask
& (setmask
<< new_base
))
1713 inst
.error
= _("invalid register list");
1717 mask
|= setmask
<< new_base
;
1722 while (skip_past_comma (&str
) != FAIL
);
1726 /* Sanity check -- should have raised a parse error above. */
1727 if (count
== 0 || count
> max_regs
)
1732 /* Final test -- the registers must be consecutive. */
1734 for (i
= 0; i
< count
; i
++)
1736 if ((mask
& (1u << i
)) == 0)
1738 inst
.error
= _("non-contiguous register range");
1748 /* True if two alias types are the same. */
1751 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1759 if (a
->defined
!= b
->defined
)
1762 if ((a
->defined
& NTA_HASTYPE
) != 0
1763 && (a
->eltype
.type
!= b
->eltype
.type
1764 || a
->eltype
.size
!= b
->eltype
.size
))
1767 if ((a
->defined
& NTA_HASINDEX
) != 0
1768 && (a
->index
!= b
->index
))
1774 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1775 The base register is put in *PBASE.
1776 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1778 The register stride (minus one) is put in bit 4 of the return value.
1779 Bits [6:5] encode the list length (minus one).
1780 The type of the list elements is put in *ELTYPE, if non-NULL. */
1782 #define NEON_LANE(X) ((X) & 0xf)
1783 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1784 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1787 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1788 struct neon_type_el
*eltype
)
1795 int leading_brace
= 0;
1796 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1798 const char *const incr_error
= "register stride must be 1 or 2";
1799 const char *const type_error
= "mismatched element/structure types in list";
1800 struct neon_typed_alias firsttype
;
1802 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1807 struct neon_typed_alias atype
;
1808 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1812 first_error (_(reg_expected_msgs
[rtype
]));
1819 if (rtype
== REG_TYPE_NQ
)
1826 else if (reg_incr
== -1)
1828 reg_incr
= getreg
- base_reg
;
1829 if (reg_incr
< 1 || reg_incr
> 2)
1831 first_error (_(incr_error
));
1835 else if (getreg
!= base_reg
+ reg_incr
* count
)
1837 first_error (_(incr_error
));
1841 if (!neon_alias_types_same (&atype
, &firsttype
))
1843 first_error (_(type_error
));
1847 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1851 struct neon_typed_alias htype
;
1852 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1854 lane
= NEON_INTERLEAVE_LANES
;
1855 else if (lane
!= NEON_INTERLEAVE_LANES
)
1857 first_error (_(type_error
));
1862 else if (reg_incr
!= 1)
1864 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1868 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1871 first_error (_(reg_expected_msgs
[rtype
]));
1874 if (!neon_alias_types_same (&htype
, &firsttype
))
1876 first_error (_(type_error
));
1879 count
+= hireg
+ dregs
- getreg
;
1883 /* If we're using Q registers, we can't use [] or [n] syntax. */
1884 if (rtype
== REG_TYPE_NQ
)
1890 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1894 else if (lane
!= atype
.index
)
1896 first_error (_(type_error
));
1900 else if (lane
== -1)
1901 lane
= NEON_INTERLEAVE_LANES
;
1902 else if (lane
!= NEON_INTERLEAVE_LANES
)
1904 first_error (_(type_error
));
1909 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1911 /* No lane set by [x]. We must be interleaving structures. */
1913 lane
= NEON_INTERLEAVE_LANES
;
1916 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1917 || (count
> 1 && reg_incr
== -1))
1919 first_error (_("error parsing element/structure list"));
1923 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
1925 first_error (_("expected }"));
1933 *eltype
= firsttype
.eltype
;
1938 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
1941 /* Parse an explicit relocation suffix on an expression. This is
1942 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1943 arm_reloc_hsh contains no entries, so this function can only
1944 succeed if there is no () after the word. Returns -1 on error,
1945 BFD_RELOC_UNUSED if there wasn't any suffix. */
1947 parse_reloc (char **str
)
1949 struct reloc_entry
*r
;
1953 return BFD_RELOC_UNUSED
;
1958 while (*q
&& *q
!= ')' && *q
!= ',')
1963 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
1970 /* Directives: register aliases. */
1972 static struct reg_entry
*
1973 insert_reg_alias (char *str
, int number
, int type
)
1975 struct reg_entry
*new;
1978 if ((new = hash_find (arm_reg_hsh
, str
)) != 0)
1981 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
1983 /* Only warn about a redefinition if it's not defined as the
1985 else if (new->number
!= number
|| new->type
!= type
)
1986 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
1991 name
= xstrdup (str
);
1992 new = xmalloc (sizeof (struct reg_entry
));
1995 new->number
= number
;
1997 new->builtin
= FALSE
;
2000 if (hash_insert (arm_reg_hsh
, name
, (void *) new))
2007 insert_neon_reg_alias (char *str
, int number
, int type
,
2008 struct neon_typed_alias
*atype
)
2010 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2014 first_error (_("attempt to redefine typed alias"));
2020 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
2021 *reg
->neon
= *atype
;
2025 /* Look for the .req directive. This is of the form:
2027 new_register_name .req existing_register_name
2029 If we find one, or if it looks sufficiently like one that we want to
2030 handle any error here, return TRUE. Otherwise return FALSE. */
2033 create_register_alias (char * newname
, char *p
)
2035 struct reg_entry
*old
;
2036 char *oldname
, *nbuf
;
2039 /* The input scrubber ensures that whitespace after the mnemonic is
2040 collapsed to single spaces. */
2042 if (strncmp (oldname
, " .req ", 6) != 0)
2046 if (*oldname
== '\0')
2049 old
= hash_find (arm_reg_hsh
, oldname
);
2052 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2056 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2057 the desired alias name, and p points to its end. If not, then
2058 the desired alias name is in the global original_case_string. */
2059 #ifdef TC_CASE_SENSITIVE
2062 newname
= original_case_string
;
2063 nlen
= strlen (newname
);
2066 nbuf
= alloca (nlen
+ 1);
2067 memcpy (nbuf
, newname
, nlen
);
2070 /* Create aliases under the new name as stated; an all-lowercase
2071 version of the new name; and an all-uppercase version of the new
2073 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2075 for (p
= nbuf
; *p
; p
++)
2078 if (strncmp (nbuf
, newname
, nlen
))
2080 /* If this attempt to create an additional alias fails, do not bother
2081 trying to create the all-lower case alias. We will fail and issue
2082 a second, duplicate error message. This situation arises when the
2083 programmer does something like:
2086 The second .req creates the "Foo" alias but then fails to create
2087 the artificial FOO alias because it has already been created by the
2089 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2093 for (p
= nbuf
; *p
; p
++)
2096 if (strncmp (nbuf
, newname
, nlen
))
2097 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2103 /* Create a Neon typed/indexed register alias using directives, e.g.:
2108 These typed registers can be used instead of the types specified after the
2109 Neon mnemonic, so long as all operands given have types. Types can also be
2110 specified directly, e.g.:
2111 vadd d0.s32, d1.s32, d2.s32 */
2114 create_neon_reg_alias (char *newname
, char *p
)
2116 enum arm_reg_type basetype
;
2117 struct reg_entry
*basereg
;
2118 struct reg_entry mybasereg
;
2119 struct neon_type ntype
;
2120 struct neon_typed_alias typeinfo
;
2121 char *namebuf
, *nameend
;
2124 typeinfo
.defined
= 0;
2125 typeinfo
.eltype
.type
= NT_invtype
;
2126 typeinfo
.eltype
.size
= -1;
2127 typeinfo
.index
= -1;
2131 if (strncmp (p
, " .dn ", 5) == 0)
2132 basetype
= REG_TYPE_VFD
;
2133 else if (strncmp (p
, " .qn ", 5) == 0)
2134 basetype
= REG_TYPE_NQ
;
2143 basereg
= arm_reg_parse_multi (&p
);
2145 if (basereg
&& basereg
->type
!= basetype
)
2147 as_bad (_("bad type for register"));
2151 if (basereg
== NULL
)
2154 /* Try parsing as an integer. */
2155 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2156 if (exp
.X_op
!= O_constant
)
2158 as_bad (_("expression must be constant"));
2161 basereg
= &mybasereg
;
2162 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2168 typeinfo
= *basereg
->neon
;
2170 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2172 /* We got a type. */
2173 if (typeinfo
.defined
& NTA_HASTYPE
)
2175 as_bad (_("can't redefine the type of a register alias"));
2179 typeinfo
.defined
|= NTA_HASTYPE
;
2180 if (ntype
.elems
!= 1)
2182 as_bad (_("you must specify a single type only"));
2185 typeinfo
.eltype
= ntype
.el
[0];
2188 if (skip_past_char (&p
, '[') == SUCCESS
)
2191 /* We got a scalar index. */
2193 if (typeinfo
.defined
& NTA_HASINDEX
)
2195 as_bad (_("can't redefine the index of a scalar alias"));
2199 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2201 if (exp
.X_op
!= O_constant
)
2203 as_bad (_("scalar index must be constant"));
2207 typeinfo
.defined
|= NTA_HASINDEX
;
2208 typeinfo
.index
= exp
.X_add_number
;
2210 if (skip_past_char (&p
, ']') == FAIL
)
2212 as_bad (_("expecting ]"));
2217 namelen
= nameend
- newname
;
2218 namebuf
= alloca (namelen
+ 1);
2219 strncpy (namebuf
, newname
, namelen
);
2220 namebuf
[namelen
] = '\0';
2222 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2223 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2225 /* Insert name in all uppercase. */
2226 for (p
= namebuf
; *p
; p
++)
2229 if (strncmp (namebuf
, newname
, namelen
))
2230 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2231 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2233 /* Insert name in all lowercase. */
2234 for (p
= namebuf
; *p
; p
++)
2237 if (strncmp (namebuf
, newname
, namelen
))
2238 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2239 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2244 /* Should never be called, as .req goes between the alias and the
2245 register name, not at the beginning of the line. */
2247 s_req (int a ATTRIBUTE_UNUSED
)
2249 as_bad (_("invalid syntax for .req directive"));
2253 s_dn (int a ATTRIBUTE_UNUSED
)
2255 as_bad (_("invalid syntax for .dn directive"));
2259 s_qn (int a ATTRIBUTE_UNUSED
)
2261 as_bad (_("invalid syntax for .qn directive"));
2264 /* The .unreq directive deletes an alias which was previously defined
2265 by .req. For example:
2271 s_unreq (int a ATTRIBUTE_UNUSED
)
2276 name
= input_line_pointer
;
2278 while (*input_line_pointer
!= 0
2279 && *input_line_pointer
!= ' '
2280 && *input_line_pointer
!= '\n')
2281 ++input_line_pointer
;
2283 saved_char
= *input_line_pointer
;
2284 *input_line_pointer
= 0;
2287 as_bad (_("invalid syntax for .unreq directive"));
2290 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2293 as_bad (_("unknown register alias '%s'"), name
);
2294 else if (reg
->builtin
)
2295 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2302 hash_delete (arm_reg_hsh
, name
, FALSE
);
2303 free ((char *) reg
->name
);
2308 /* Also locate the all upper case and all lower case versions.
2309 Do not complain if we cannot find one or the other as it
2310 was probably deleted above. */
2312 nbuf
= strdup (name
);
2313 for (p
= nbuf
; *p
; p
++)
2315 reg
= hash_find (arm_reg_hsh
, nbuf
);
2318 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2319 free ((char *) reg
->name
);
2325 for (p
= nbuf
; *p
; p
++)
2327 reg
= hash_find (arm_reg_hsh
, nbuf
);
2330 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2331 free ((char *) reg
->name
);
2341 *input_line_pointer
= saved_char
;
2342 demand_empty_rest_of_line ();
2345 /* Directives: Instruction set selection. */
2348 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2349 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2350 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2351 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2353 static enum mstate mapstate
= MAP_UNDEFINED
;
2356 mapping_state (enum mstate state
)
2359 const char * symname
;
2362 if (mapstate
== state
)
2363 /* The mapping symbol has already been emitted.
2364 There is nothing else to do. */
2373 type
= BSF_NO_FLAGS
;
2377 type
= BSF_NO_FLAGS
;
2381 type
= BSF_NO_FLAGS
;
2389 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2391 symbolP
= symbol_new (symname
, now_seg
, (valueT
) frag_now_fix (), frag_now
);
2392 symbol_table_insert (symbolP
);
2393 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2398 THUMB_SET_FUNC (symbolP
, 0);
2399 ARM_SET_THUMB (symbolP
, 0);
2400 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2404 THUMB_SET_FUNC (symbolP
, 1);
2405 ARM_SET_THUMB (symbolP
, 1);
2406 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2415 #define mapping_state(x) /* nothing */
2418 /* Find the real, Thumb encoded start of a Thumb function. */
2422 find_real_start (symbolS
* symbolP
)
2425 const char * name
= S_GET_NAME (symbolP
);
2426 symbolS
* new_target
;
2428 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2429 #define STUB_NAME ".real_start_of"
2434 /* The compiler may generate BL instructions to local labels because
2435 it needs to perform a branch to a far away location. These labels
2436 do not have a corresponding ".real_start_of" label. We check
2437 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2438 the ".real_start_of" convention for nonlocal branches. */
2439 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2442 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2443 new_target
= symbol_find (real_start
);
2445 if (new_target
== NULL
)
2447 as_warn (_("Failed to find real start of function: %s\n"), name
);
2448 new_target
= symbolP
;
2456 opcode_select (int width
)
2463 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2464 as_bad (_("selected processor does not support THUMB opcodes"));
2467 /* No need to force the alignment, since we will have been
2468 coming from ARM mode, which is word-aligned. */
2469 record_alignment (now_seg
, 1);
2471 mapping_state (MAP_THUMB
);
2477 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2478 as_bad (_("selected processor does not support ARM opcodes"));
2483 frag_align (2, 0, 0);
2485 record_alignment (now_seg
, 1);
2487 mapping_state (MAP_ARM
);
2491 as_bad (_("invalid instruction size selected (%d)"), width
);
2496 s_arm (int ignore ATTRIBUTE_UNUSED
)
2499 demand_empty_rest_of_line ();
2503 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2506 demand_empty_rest_of_line ();
2510 s_code (int unused ATTRIBUTE_UNUSED
)
2514 temp
= get_absolute_expression ();
2519 opcode_select (temp
);
2523 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2528 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2530 /* If we are not already in thumb mode go into it, EVEN if
2531 the target processor does not support thumb instructions.
2532 This is used by gcc/config/arm/lib1funcs.asm for example
2533 to compile interworking support functions even if the
2534 target processor should not support interworking. */
2538 record_alignment (now_seg
, 1);
2541 demand_empty_rest_of_line ();
2545 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2549 /* The following label is the name/address of the start of a Thumb function.
2550 We need to know this for the interworking support. */
2551 label_is_thumb_function_name
= TRUE
;
2554 /* Perform a .set directive, but also mark the alias as
2555 being a thumb function. */
2558 s_thumb_set (int equiv
)
2560 /* XXX the following is a duplicate of the code for s_set() in read.c
2561 We cannot just call that code as we need to get at the symbol that
2568 /* Especial apologies for the random logic:
2569 This just grew, and could be parsed much more simply!
2571 name
= input_line_pointer
;
2572 delim
= get_symbol_end ();
2573 end_name
= input_line_pointer
;
2576 if (*input_line_pointer
!= ',')
2579 as_bad (_("expected comma after name \"%s\""), name
);
2581 ignore_rest_of_line ();
2585 input_line_pointer
++;
2588 if (name
[0] == '.' && name
[1] == '\0')
2590 /* XXX - this should not happen to .thumb_set. */
2594 if ((symbolP
= symbol_find (name
)) == NULL
2595 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2598 /* When doing symbol listings, play games with dummy fragments living
2599 outside the normal fragment chain to record the file and line info
2601 if (listing
& LISTING_SYMBOLS
)
2603 extern struct list_info_struct
* listing_tail
;
2604 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2606 memset (dummy_frag
, 0, sizeof (fragS
));
2607 dummy_frag
->fr_type
= rs_fill
;
2608 dummy_frag
->line
= listing_tail
;
2609 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2610 dummy_frag
->fr_symbol
= symbolP
;
2614 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2617 /* "set" symbols are local unless otherwise specified. */
2618 SF_SET_LOCAL (symbolP
);
2619 #endif /* OBJ_COFF */
2620 } /* Make a new symbol. */
2622 symbol_table_insert (symbolP
);
2627 && S_IS_DEFINED (symbolP
)
2628 && S_GET_SEGMENT (symbolP
) != reg_section
)
2629 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2631 pseudo_set (symbolP
);
2633 demand_empty_rest_of_line ();
2635 /* XXX Now we come to the Thumb specific bit of code. */
2637 THUMB_SET_FUNC (symbolP
, 1);
2638 ARM_SET_THUMB (symbolP
, 1);
2639 #if defined OBJ_ELF || defined OBJ_COFF
2640 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2644 /* Directives: Mode selection. */
2646 /* .syntax [unified|divided] - choose the new unified syntax
2647 (same for Arm and Thumb encoding, modulo slight differences in what
2648 can be represented) or the old divergent syntax for each mode. */
2650 s_syntax (int unused ATTRIBUTE_UNUSED
)
2654 name
= input_line_pointer
;
2655 delim
= get_symbol_end ();
2657 if (!strcasecmp (name
, "unified"))
2658 unified_syntax
= TRUE
;
2659 else if (!strcasecmp (name
, "divided"))
2660 unified_syntax
= FALSE
;
2663 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2666 *input_line_pointer
= delim
;
2667 demand_empty_rest_of_line ();
2670 /* Directives: sectioning and alignment. */
2672 /* Same as s_align_ptwo but align 0 => align 2. */
2675 s_align (int unused ATTRIBUTE_UNUSED
)
2680 long max_alignment
= 15;
2682 temp
= get_absolute_expression ();
2683 if (temp
> max_alignment
)
2684 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2687 as_bad (_("alignment negative. 0 assumed."));
2691 if (*input_line_pointer
== ',')
2693 input_line_pointer
++;
2694 temp_fill
= get_absolute_expression ();
2706 /* Only make a frag if we HAVE to. */
2707 if (temp
&& !need_pass_2
)
2709 if (!fill_p
&& subseg_text_p (now_seg
))
2710 frag_align_code (temp
, 0);
2712 frag_align (temp
, (int) temp_fill
, 0);
2714 demand_empty_rest_of_line ();
2716 record_alignment (now_seg
, temp
);
2720 s_bss (int ignore ATTRIBUTE_UNUSED
)
2722 /* We don't support putting frags in the BSS segment, we fake it by
2723 marking in_bss, then looking at s_skip for clues. */
2724 subseg_set (bss_section
, 0);
2725 demand_empty_rest_of_line ();
2726 mapping_state (MAP_DATA
);
2730 s_even (int ignore ATTRIBUTE_UNUSED
)
2732 /* Never make frag if expect extra pass. */
2734 frag_align (1, 0, 0);
2736 record_alignment (now_seg
, 1);
2738 demand_empty_rest_of_line ();
2741 /* Directives: Literal pools. */
2743 static literal_pool
*
2744 find_literal_pool (void)
2746 literal_pool
* pool
;
2748 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2750 if (pool
->section
== now_seg
2751 && pool
->sub_section
== now_subseg
)
2758 static literal_pool
*
2759 find_or_make_literal_pool (void)
2761 /* Next literal pool ID number. */
2762 static unsigned int latest_pool_num
= 1;
2763 literal_pool
* pool
;
2765 pool
= find_literal_pool ();
2769 /* Create a new pool. */
2770 pool
= xmalloc (sizeof (* pool
));
2774 pool
->next_free_entry
= 0;
2775 pool
->section
= now_seg
;
2776 pool
->sub_section
= now_subseg
;
2777 pool
->next
= list_of_pools
;
2778 pool
->symbol
= NULL
;
2780 /* Add it to the list. */
2781 list_of_pools
= pool
;
2784 /* New pools, and emptied pools, will have a NULL symbol. */
2785 if (pool
->symbol
== NULL
)
2787 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2788 (valueT
) 0, &zero_address_frag
);
2789 pool
->id
= latest_pool_num
++;
2796 /* Add the literal in the global 'inst'
2797 structure to the relevant literal pool. */
2800 add_to_lit_pool (void)
2802 literal_pool
* pool
;
2805 pool
= find_or_make_literal_pool ();
2807 /* Check if this literal value is already in the pool. */
2808 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2810 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2811 && (inst
.reloc
.exp
.X_op
== O_constant
)
2812 && (pool
->literals
[entry
].X_add_number
2813 == inst
.reloc
.exp
.X_add_number
)
2814 && (pool
->literals
[entry
].X_unsigned
2815 == inst
.reloc
.exp
.X_unsigned
))
2818 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2819 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2820 && (pool
->literals
[entry
].X_add_number
2821 == inst
.reloc
.exp
.X_add_number
)
2822 && (pool
->literals
[entry
].X_add_symbol
2823 == inst
.reloc
.exp
.X_add_symbol
)
2824 && (pool
->literals
[entry
].X_op_symbol
2825 == inst
.reloc
.exp
.X_op_symbol
))
2829 /* Do we need to create a new entry? */
2830 if (entry
== pool
->next_free_entry
)
2832 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2834 inst
.error
= _("literal pool overflow");
2838 pool
->literals
[entry
] = inst
.reloc
.exp
;
2839 pool
->next_free_entry
+= 1;
2842 inst
.reloc
.exp
.X_op
= O_symbol
;
2843 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
2844 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
2849 /* Can't use symbol_new here, so have to create a symbol and then at
2850 a later date assign it a value. Thats what these functions do. */
2853 symbol_locate (symbolS
* symbolP
,
2854 const char * name
, /* It is copied, the caller can modify. */
2855 segT segment
, /* Segment identifier (SEG_<something>). */
2856 valueT valu
, /* Symbol value. */
2857 fragS
* frag
) /* Associated fragment. */
2859 unsigned int name_length
;
2860 char * preserved_copy_of_name
;
2862 name_length
= strlen (name
) + 1; /* +1 for \0. */
2863 obstack_grow (¬es
, name
, name_length
);
2864 preserved_copy_of_name
= obstack_finish (¬es
);
2866 #ifdef tc_canonicalize_symbol_name
2867 preserved_copy_of_name
=
2868 tc_canonicalize_symbol_name (preserved_copy_of_name
);
2871 S_SET_NAME (symbolP
, preserved_copy_of_name
);
2873 S_SET_SEGMENT (symbolP
, segment
);
2874 S_SET_VALUE (symbolP
, valu
);
2875 symbol_clear_list_pointers (symbolP
);
2877 symbol_set_frag (symbolP
, frag
);
2879 /* Link to end of symbol chain. */
2881 extern int symbol_table_frozen
;
2883 if (symbol_table_frozen
)
2887 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
2889 obj_symbol_new_hook (symbolP
);
2891 #ifdef tc_symbol_new_hook
2892 tc_symbol_new_hook (symbolP
);
2896 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
2897 #endif /* DEBUG_SYMS */
2902 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
2905 literal_pool
* pool
;
2908 pool
= find_literal_pool ();
2910 || pool
->symbol
== NULL
2911 || pool
->next_free_entry
== 0)
2914 mapping_state (MAP_DATA
);
2916 /* Align pool as you have word accesses.
2917 Only make a frag if we have to. */
2919 frag_align (2, 0, 0);
2921 record_alignment (now_seg
, 2);
2923 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
2925 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
2926 (valueT
) frag_now_fix (), frag_now
);
2927 symbol_table_insert (pool
->symbol
);
2929 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
2931 #if defined OBJ_COFF || defined OBJ_ELF
2932 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
2935 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2936 /* First output the expression in the instruction to the pool. */
2937 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
2939 /* Mark the pool as empty. */
2940 pool
->next_free_entry
= 0;
2941 pool
->symbol
= NULL
;
2945 /* Forward declarations for functions below, in the MD interface
2947 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
2948 static valueT
create_unwind_entry (int);
2949 static void start_unwind_section (const segT
, int);
2950 static void add_unwind_opcode (valueT
, int);
2951 static void flush_pending_unwind (void);
2953 /* Directives: Data. */
2956 s_arm_elf_cons (int nbytes
)
2960 #ifdef md_flush_pending_output
2961 md_flush_pending_output ();
2964 if (is_it_end_of_statement ())
2966 demand_empty_rest_of_line ();
2970 #ifdef md_cons_align
2971 md_cons_align (nbytes
);
2974 mapping_state (MAP_DATA
);
2978 char *base
= input_line_pointer
;
2982 if (exp
.X_op
!= O_symbol
)
2983 emit_expr (&exp
, (unsigned int) nbytes
);
2986 char *before_reloc
= input_line_pointer
;
2987 reloc
= parse_reloc (&input_line_pointer
);
2990 as_bad (_("unrecognized relocation suffix"));
2991 ignore_rest_of_line ();
2994 else if (reloc
== BFD_RELOC_UNUSED
)
2995 emit_expr (&exp
, (unsigned int) nbytes
);
2998 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
2999 int size
= bfd_get_reloc_size (howto
);
3001 if (reloc
== BFD_RELOC_ARM_PLT32
)
3003 as_bad (_("(plt) is only valid on branch targets"));
3004 reloc
= BFD_RELOC_UNUSED
;
3009 as_bad (_("%s relocations do not fit in %d bytes"),
3010 howto
->name
, nbytes
);
3013 /* We've parsed an expression stopping at O_symbol.
3014 But there may be more expression left now that we
3015 have parsed the relocation marker. Parse it again.
3016 XXX Surely there is a cleaner way to do this. */
3017 char *p
= input_line_pointer
;
3019 char *save_buf
= alloca (input_line_pointer
- base
);
3020 memcpy (save_buf
, base
, input_line_pointer
- base
);
3021 memmove (base
+ (input_line_pointer
- before_reloc
),
3022 base
, before_reloc
- base
);
3024 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3026 memcpy (base
, save_buf
, p
- base
);
3028 offset
= nbytes
- size
;
3029 p
= frag_more ((int) nbytes
);
3030 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3031 size
, &exp
, 0, reloc
);
3036 while (*input_line_pointer
++ == ',');
3038 /* Put terminator back into stream. */
3039 input_line_pointer
--;
3040 demand_empty_rest_of_line ();
3044 /* Parse a .rel31 directive. */
3047 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3054 if (*input_line_pointer
== '1')
3055 highbit
= 0x80000000;
3056 else if (*input_line_pointer
!= '0')
3057 as_bad (_("expected 0 or 1"));
3059 input_line_pointer
++;
3060 if (*input_line_pointer
!= ',')
3061 as_bad (_("missing comma"));
3062 input_line_pointer
++;
3064 #ifdef md_flush_pending_output
3065 md_flush_pending_output ();
3068 #ifdef md_cons_align
3072 mapping_state (MAP_DATA
);
3077 md_number_to_chars (p
, highbit
, 4);
3078 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3079 BFD_RELOC_ARM_PREL31
);
3081 demand_empty_rest_of_line ();
3084 /* Directives: AEABI stack-unwind tables. */
3086 /* Parse an unwind_fnstart directive. Simply records the current location. */
3089 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3091 demand_empty_rest_of_line ();
3092 /* Mark the start of the function. */
3093 unwind
.proc_start
= expr_build_dot ();
3095 /* Reset the rest of the unwind info. */
3096 unwind
.opcode_count
= 0;
3097 unwind
.table_entry
= NULL
;
3098 unwind
.personality_routine
= NULL
;
3099 unwind
.personality_index
= -1;
3100 unwind
.frame_size
= 0;
3101 unwind
.fp_offset
= 0;
3102 unwind
.fp_reg
= REG_SP
;
3104 unwind
.sp_restored
= 0;
3108 /* Parse a handlerdata directive. Creates the exception handling table entry
3109 for the function. */
3112 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3114 demand_empty_rest_of_line ();
3115 if (unwind
.table_entry
)
3116 as_bad (_("duplicate .handlerdata directive"));
3118 create_unwind_entry (1);
3121 /* Parse an unwind_fnend directive. Generates the index table entry. */
3124 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3130 demand_empty_rest_of_line ();
3132 /* Add eh table entry. */
3133 if (unwind
.table_entry
== NULL
)
3134 val
= create_unwind_entry (0);
3138 /* Add index table entry. This is two words. */
3139 start_unwind_section (unwind
.saved_seg
, 1);
3140 frag_align (2, 0, 0);
3141 record_alignment (now_seg
, 2);
3143 ptr
= frag_more (8);
3144 where
= frag_now_fix () - 8;
3146 /* Self relative offset of the function start. */
3147 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3148 BFD_RELOC_ARM_PREL31
);
3150 /* Indicate dependency on EHABI-defined personality routines to the
3151 linker, if it hasn't been done already. */
3152 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3153 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3155 static const char *const name
[] =
3157 "__aeabi_unwind_cpp_pr0",
3158 "__aeabi_unwind_cpp_pr1",
3159 "__aeabi_unwind_cpp_pr2"
3161 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3162 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3163 marked_pr_dependency
|= 1 << unwind
.personality_index
;
3164 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3165 = marked_pr_dependency
;
3169 /* Inline exception table entry. */
3170 md_number_to_chars (ptr
+ 4, val
, 4);
3172 /* Self relative offset of the table entry. */
3173 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3174 BFD_RELOC_ARM_PREL31
);
3176 /* Restore the original section. */
3177 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3181 /* Parse an unwind_cantunwind directive. */
3184 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3186 demand_empty_rest_of_line ();
3187 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3188 as_bad (_("personality routine specified for cantunwind frame"));
3190 unwind
.personality_index
= -2;
3194 /* Parse a personalityindex directive. */
3197 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3201 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3202 as_bad (_("duplicate .personalityindex directive"));
3206 if (exp
.X_op
!= O_constant
3207 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3209 as_bad (_("bad personality routine number"));
3210 ignore_rest_of_line ();
3214 unwind
.personality_index
= exp
.X_add_number
;
3216 demand_empty_rest_of_line ();
3220 /* Parse a personality directive. */
3223 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3227 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3228 as_bad (_("duplicate .personality directive"));
3230 name
= input_line_pointer
;
3231 c
= get_symbol_end ();
3232 p
= input_line_pointer
;
3233 unwind
.personality_routine
= symbol_find_or_make (name
);
3235 demand_empty_rest_of_line ();
3239 /* Parse a directive saving core registers. */
3242 s_arm_unwind_save_core (void)
3248 range
= parse_reg_list (&input_line_pointer
);
3251 as_bad (_("expected register list"));
3252 ignore_rest_of_line ();
3256 demand_empty_rest_of_line ();
3258 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3259 into .unwind_save {..., sp...}. We aren't bothered about the value of
3260 ip because it is clobbered by calls. */
3261 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3262 && (range
& 0x3000) == 0x1000)
3264 unwind
.opcode_count
--;
3265 unwind
.sp_restored
= 0;
3266 range
= (range
| 0x2000) & ~0x1000;
3267 unwind
.pending_offset
= 0;
3273 /* See if we can use the short opcodes. These pop a block of up to 8
3274 registers starting with r4, plus maybe r14. */
3275 for (n
= 0; n
< 8; n
++)
3277 /* Break at the first non-saved register. */
3278 if ((range
& (1 << (n
+ 4))) == 0)
3281 /* See if there are any other bits set. */
3282 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3284 /* Use the long form. */
3285 op
= 0x8000 | ((range
>> 4) & 0xfff);
3286 add_unwind_opcode (op
, 2);
3290 /* Use the short form. */
3292 op
= 0xa8; /* Pop r14. */
3294 op
= 0xa0; /* Do not pop r14. */
3296 add_unwind_opcode (op
, 1);
3303 op
= 0xb100 | (range
& 0xf);
3304 add_unwind_opcode (op
, 2);
3307 /* Record the number of bytes pushed. */
3308 for (n
= 0; n
< 16; n
++)
3310 if (range
& (1 << n
))
3311 unwind
.frame_size
+= 4;
3316 /* Parse a directive saving FPA registers. */
3319 s_arm_unwind_save_fpa (int reg
)
3325 /* Get Number of registers to transfer. */
3326 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3329 exp
.X_op
= O_illegal
;
3331 if (exp
.X_op
!= O_constant
)
3333 as_bad (_("expected , <constant>"));
3334 ignore_rest_of_line ();
3338 num_regs
= exp
.X_add_number
;
3340 if (num_regs
< 1 || num_regs
> 4)
3342 as_bad (_("number of registers must be in the range [1:4]"));
3343 ignore_rest_of_line ();
3347 demand_empty_rest_of_line ();
3352 op
= 0xb4 | (num_regs
- 1);
3353 add_unwind_opcode (op
, 1);
3358 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3359 add_unwind_opcode (op
, 2);
3361 unwind
.frame_size
+= num_regs
* 12;
3365 /* Parse a directive saving VFP registers for ARMv6 and above. */
3368 s_arm_unwind_save_vfp_armv6 (void)
3373 int num_vfpv3_regs
= 0;
3374 int num_regs_below_16
;
3376 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3379 as_bad (_("expected register list"));
3380 ignore_rest_of_line ();
3384 demand_empty_rest_of_line ();
3386 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3387 than FSTMX/FLDMX-style ones). */
3389 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3391 num_vfpv3_regs
= count
;
3392 else if (start
+ count
> 16)
3393 num_vfpv3_regs
= start
+ count
- 16;
3395 if (num_vfpv3_regs
> 0)
3397 int start_offset
= start
> 16 ? start
- 16 : 0;
3398 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3399 add_unwind_opcode (op
, 2);
3402 /* Generate opcode for registers numbered in the range 0 .. 15. */
3403 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3404 assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3405 if (num_regs_below_16
> 0)
3407 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3408 add_unwind_opcode (op
, 2);
3411 unwind
.frame_size
+= count
* 8;
3415 /* Parse a directive saving VFP registers for pre-ARMv6. */
3418 s_arm_unwind_save_vfp (void)
3424 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3427 as_bad (_("expected register list"));
3428 ignore_rest_of_line ();
3432 demand_empty_rest_of_line ();
3437 op
= 0xb8 | (count
- 1);
3438 add_unwind_opcode (op
, 1);
3443 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3444 add_unwind_opcode (op
, 2);
3446 unwind
.frame_size
+= count
* 8 + 4;
3450 /* Parse a directive saving iWMMXt data registers. */
3453 s_arm_unwind_save_mmxwr (void)
3461 if (*input_line_pointer
== '{')
3462 input_line_pointer
++;
3466 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3470 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3475 as_tsktsk (_("register list not in ascending order"));
3478 if (*input_line_pointer
== '-')
3480 input_line_pointer
++;
3481 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3484 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3487 else if (reg
>= hi_reg
)
3489 as_bad (_("bad register range"));
3492 for (; reg
< hi_reg
; reg
++)
3496 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3498 if (*input_line_pointer
== '}')
3499 input_line_pointer
++;
3501 demand_empty_rest_of_line ();
3503 /* Generate any deferred opcodes because we're going to be looking at
3505 flush_pending_unwind ();
3507 for (i
= 0; i
< 16; i
++)
3509 if (mask
& (1 << i
))
3510 unwind
.frame_size
+= 8;
3513 /* Attempt to combine with a previous opcode. We do this because gcc
3514 likes to output separate unwind directives for a single block of
3516 if (unwind
.opcode_count
> 0)
3518 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3519 if ((i
& 0xf8) == 0xc0)
3522 /* Only merge if the blocks are contiguous. */
3525 if ((mask
& 0xfe00) == (1 << 9))
3527 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3528 unwind
.opcode_count
--;
3531 else if (i
== 6 && unwind
.opcode_count
>= 2)
3533 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3537 op
= 0xffff << (reg
- 1);
3539 && ((mask
& op
) == (1u << (reg
- 1))))
3541 op
= (1 << (reg
+ i
+ 1)) - 1;
3542 op
&= ~((1 << reg
) - 1);
3544 unwind
.opcode_count
-= 2;
3551 /* We want to generate opcodes in the order the registers have been
3552 saved, ie. descending order. */
3553 for (reg
= 15; reg
>= -1; reg
--)
3555 /* Save registers in blocks. */
3557 || !(mask
& (1 << reg
)))
3559 /* We found an unsaved reg. Generate opcodes to save the
3566 op
= 0xc0 | (hi_reg
- 10);
3567 add_unwind_opcode (op
, 1);
3572 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3573 add_unwind_opcode (op
, 2);
3582 ignore_rest_of_line ();
3586 s_arm_unwind_save_mmxwcg (void)
3593 if (*input_line_pointer
== '{')
3594 input_line_pointer
++;
3598 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3602 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3608 as_tsktsk (_("register list not in ascending order"));
3611 if (*input_line_pointer
== '-')
3613 input_line_pointer
++;
3614 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3617 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3620 else if (reg
>= hi_reg
)
3622 as_bad (_("bad register range"));
3625 for (; reg
< hi_reg
; reg
++)
3629 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3631 if (*input_line_pointer
== '}')
3632 input_line_pointer
++;
3634 demand_empty_rest_of_line ();
3636 /* Generate any deferred opcodes because we're going to be looking at
3638 flush_pending_unwind ();
3640 for (reg
= 0; reg
< 16; reg
++)
3642 if (mask
& (1 << reg
))
3643 unwind
.frame_size
+= 4;
3646 add_unwind_opcode (op
, 2);
3649 ignore_rest_of_line ();
3653 /* Parse an unwind_save directive.
3654 If the argument is non-zero, this is a .vsave directive. */
3657 s_arm_unwind_save (int arch_v6
)
3660 struct reg_entry
*reg
;
3661 bfd_boolean had_brace
= FALSE
;
3663 /* Figure out what sort of save we have. */
3664 peek
= input_line_pointer
;
3672 reg
= arm_reg_parse_multi (&peek
);
3676 as_bad (_("register expected"));
3677 ignore_rest_of_line ();
3686 as_bad (_("FPA .unwind_save does not take a register list"));
3687 ignore_rest_of_line ();
3690 input_line_pointer
= peek
;
3691 s_arm_unwind_save_fpa (reg
->number
);
3694 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
3697 s_arm_unwind_save_vfp_armv6 ();
3699 s_arm_unwind_save_vfp ();
3701 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
3702 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
3705 as_bad (_(".unwind_save does not support this kind of register"));
3706 ignore_rest_of_line ();
3711 /* Parse an unwind_movsp directive. */
3714 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
3720 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3723 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
3724 ignore_rest_of_line ();
3728 /* Optional constant. */
3729 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3731 if (immediate_for_directive (&offset
) == FAIL
)
3737 demand_empty_rest_of_line ();
3739 if (reg
== REG_SP
|| reg
== REG_PC
)
3741 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3745 if (unwind
.fp_reg
!= REG_SP
)
3746 as_bad (_("unexpected .unwind_movsp directive"));
3748 /* Generate opcode to restore the value. */
3750 add_unwind_opcode (op
, 1);
3752 /* Record the information for later. */
3753 unwind
.fp_reg
= reg
;
3754 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3755 unwind
.sp_restored
= 1;
3758 /* Parse an unwind_pad directive. */
3761 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
3765 if (immediate_for_directive (&offset
) == FAIL
)
3770 as_bad (_("stack increment must be multiple of 4"));
3771 ignore_rest_of_line ();
3775 /* Don't generate any opcodes, just record the details for later. */
3776 unwind
.frame_size
+= offset
;
3777 unwind
.pending_offset
+= offset
;
3779 demand_empty_rest_of_line ();
3782 /* Parse an unwind_setfp directive. */
3785 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
3791 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3792 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3795 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
3797 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
3799 as_bad (_("expected <reg>, <reg>"));
3800 ignore_rest_of_line ();
3804 /* Optional constant. */
3805 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3807 if (immediate_for_directive (&offset
) == FAIL
)
3813 demand_empty_rest_of_line ();
3815 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
3817 as_bad (_("register must be either sp or set by a previous"
3818 "unwind_movsp directive"));
3822 /* Don't generate any opcodes, just record the information for later. */
3823 unwind
.fp_reg
= fp_reg
;
3825 if (sp_reg
== REG_SP
)
3826 unwind
.fp_offset
= unwind
.frame_size
- offset
;
3828 unwind
.fp_offset
-= offset
;
3831 /* Parse an unwind_raw directive. */
3834 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
3837 /* This is an arbitrary limit. */
3838 unsigned char op
[16];
3842 if (exp
.X_op
== O_constant
3843 && skip_past_comma (&input_line_pointer
) != FAIL
)
3845 unwind
.frame_size
+= exp
.X_add_number
;
3849 exp
.X_op
= O_illegal
;
3851 if (exp
.X_op
!= O_constant
)
3853 as_bad (_("expected <offset>, <opcode>"));
3854 ignore_rest_of_line ();
3860 /* Parse the opcode. */
3865 as_bad (_("unwind opcode too long"));
3866 ignore_rest_of_line ();
3868 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
3870 as_bad (_("invalid unwind opcode"));
3871 ignore_rest_of_line ();
3874 op
[count
++] = exp
.X_add_number
;
3876 /* Parse the next byte. */
3877 if (skip_past_comma (&input_line_pointer
) == FAIL
)
3883 /* Add the opcode bytes in reverse order. */
3885 add_unwind_opcode (op
[count
], 1);
3887 demand_empty_rest_of_line ();
3891 /* Parse a .eabi_attribute directive. */
3894 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
3896 int tag
= s_vendor_attribute (OBJ_ATTR_PROC
);
3898 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
3899 attributes_set_explicitly
[tag
] = 1;
3901 #endif /* OBJ_ELF */
3903 static void s_arm_arch (int);
3904 static void s_arm_object_arch (int);
3905 static void s_arm_cpu (int);
3906 static void s_arm_fpu (int);
3911 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
3918 if (exp
.X_op
== O_symbol
)
3919 exp
.X_op
= O_secrel
;
3921 emit_expr (&exp
, 4);
3923 while (*input_line_pointer
++ == ',');
3925 input_line_pointer
--;
3926 demand_empty_rest_of_line ();
3930 /* This table describes all the machine specific pseudo-ops the assembler
3931 has to support. The fields are:
3932 pseudo-op name without dot
3933 function to call to execute this pseudo-op
3934 Integer arg to pass to the function. */
3936 const pseudo_typeS md_pseudo_table
[] =
3938 /* Never called because '.req' does not start a line. */
3939 { "req", s_req
, 0 },
3940 /* Following two are likewise never called. */
3943 { "unreq", s_unreq
, 0 },
3944 { "bss", s_bss
, 0 },
3945 { "align", s_align
, 0 },
3946 { "arm", s_arm
, 0 },
3947 { "thumb", s_thumb
, 0 },
3948 { "code", s_code
, 0 },
3949 { "force_thumb", s_force_thumb
, 0 },
3950 { "thumb_func", s_thumb_func
, 0 },
3951 { "thumb_set", s_thumb_set
, 0 },
3952 { "even", s_even
, 0 },
3953 { "ltorg", s_ltorg
, 0 },
3954 { "pool", s_ltorg
, 0 },
3955 { "syntax", s_syntax
, 0 },
3956 { "cpu", s_arm_cpu
, 0 },
3957 { "arch", s_arm_arch
, 0 },
3958 { "object_arch", s_arm_object_arch
, 0 },
3959 { "fpu", s_arm_fpu
, 0 },
3961 { "word", s_arm_elf_cons
, 4 },
3962 { "long", s_arm_elf_cons
, 4 },
3963 { "rel31", s_arm_rel31
, 0 },
3964 { "fnstart", s_arm_unwind_fnstart
, 0 },
3965 { "fnend", s_arm_unwind_fnend
, 0 },
3966 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
3967 { "personality", s_arm_unwind_personality
, 0 },
3968 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
3969 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
3970 { "save", s_arm_unwind_save
, 0 },
3971 { "vsave", s_arm_unwind_save
, 1 },
3972 { "movsp", s_arm_unwind_movsp
, 0 },
3973 { "pad", s_arm_unwind_pad
, 0 },
3974 { "setfp", s_arm_unwind_setfp
, 0 },
3975 { "unwind_raw", s_arm_unwind_raw
, 0 },
3976 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
3980 /* These are used for dwarf. */
3984 /* These are used for dwarf2. */
3985 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
3986 { "loc", dwarf2_directive_loc
, 0 },
3987 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
3989 { "extend", float_cons
, 'x' },
3990 { "ldouble", float_cons
, 'x' },
3991 { "packed", float_cons
, 'p' },
3993 {"secrel32", pe_directive_secrel
, 0},
3998 /* Parser functions used exclusively in instruction operands. */
4000 /* Generic immediate-value read function for use in insn parsing.
4001 STR points to the beginning of the immediate (the leading #);
4002 VAL receives the value; if the value is outside [MIN, MAX]
4003 issue an error. PREFIX_OPT is true if the immediate prefix is
4007 parse_immediate (char **str
, int *val
, int min
, int max
,
4008 bfd_boolean prefix_opt
)
4011 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4012 if (exp
.X_op
!= O_constant
)
4014 inst
.error
= _("constant expression required");
4018 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4020 inst
.error
= _("immediate value out of range");
4024 *val
= exp
.X_add_number
;
4028 /* Less-generic immediate-value read function with the possibility of loading a
4029 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4030 instructions. Puts the result directly in inst.operands[i]. */
4033 parse_big_immediate (char **str
, int i
)
4038 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4040 if (exp
.X_op
== O_constant
)
4042 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4043 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4044 O_constant. We have to be careful not to break compilation for
4045 32-bit X_add_number, though. */
4046 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4048 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4049 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4050 inst
.operands
[i
].regisimm
= 1;
4053 else if (exp
.X_op
== O_big
4054 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
4055 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
4057 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4058 /* Bignums have their least significant bits in
4059 generic_bignum[0]. Make sure we put 32 bits in imm and
4060 32 bits in reg, in a (hopefully) portable way. */
4061 assert (parts
!= 0);
4062 inst
.operands
[i
].imm
= 0;
4063 for (j
= 0; j
< parts
; j
++, idx
++)
4064 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4065 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4066 inst
.operands
[i
].reg
= 0;
4067 for (j
= 0; j
< parts
; j
++, idx
++)
4068 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4069 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4070 inst
.operands
[i
].regisimm
= 1;
4080 /* Returns the pseudo-register number of an FPA immediate constant,
4081 or FAIL if there isn't a valid constant here. */
4084 parse_fpa_immediate (char ** str
)
4086 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4092 /* First try and match exact strings, this is to guarantee
4093 that some formats will work even for cross assembly. */
4095 for (i
= 0; fp_const
[i
]; i
++)
4097 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4101 *str
+= strlen (fp_const
[i
]);
4102 if (is_end_of_line
[(unsigned char) **str
])
4108 /* Just because we didn't get a match doesn't mean that the constant
4109 isn't valid, just that it is in a format that we don't
4110 automatically recognize. Try parsing it with the standard
4111 expression routines. */
4113 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4115 /* Look for a raw floating point number. */
4116 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4117 && is_end_of_line
[(unsigned char) *save_in
])
4119 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4121 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4123 if (words
[j
] != fp_values
[i
][j
])
4127 if (j
== MAX_LITTLENUMS
)
4135 /* Try and parse a more complex expression, this will probably fail
4136 unless the code uses a floating point prefix (eg "0f"). */
4137 save_in
= input_line_pointer
;
4138 input_line_pointer
= *str
;
4139 if (expression (&exp
) == absolute_section
4140 && exp
.X_op
== O_big
4141 && exp
.X_add_number
< 0)
4143 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4145 if (gen_to_words (words
, 5, (long) 15) == 0)
4147 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4149 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4151 if (words
[j
] != fp_values
[i
][j
])
4155 if (j
== MAX_LITTLENUMS
)
4157 *str
= input_line_pointer
;
4158 input_line_pointer
= save_in
;
4165 *str
= input_line_pointer
;
4166 input_line_pointer
= save_in
;
4167 inst
.error
= _("invalid FPA immediate expression");
4171 /* Returns 1 if a number has "quarter-precision" float format
4172 0baBbbbbbc defgh000 00000000 00000000. */
4175 is_quarter_float (unsigned imm
)
4177 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4178 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4181 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4182 0baBbbbbbc defgh000 00000000 00000000.
4183 The zero and minus-zero cases need special handling, since they can't be
4184 encoded in the "quarter-precision" float format, but can nonetheless be
4185 loaded as integer constants. */
4188 parse_qfloat_immediate (char **ccp
, int *immed
)
4192 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4193 int found_fpchar
= 0;
4195 skip_past_char (&str
, '#');
4197 /* We must not accidentally parse an integer as a floating-point number. Make
4198 sure that the value we parse is not an integer by checking for special
4199 characters '.' or 'e'.
4200 FIXME: This is a horrible hack, but doing better is tricky because type
4201 information isn't in a very usable state at parse time. */
4203 skip_whitespace (fpnum
);
4205 if (strncmp (fpnum
, "0x", 2) == 0)
4209 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4210 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4220 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4222 unsigned fpword
= 0;
4225 /* Our FP word must be 32 bits (single-precision FP). */
4226 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4228 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4232 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
4245 /* Shift operands. */
4248 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4251 struct asm_shift_name
4254 enum shift_kind kind
;
4257 /* Third argument to parse_shift. */
4258 enum parse_shift_mode
4260 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4261 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4262 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4263 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4264 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4267 /* Parse a <shift> specifier on an ARM data processing instruction.
4268 This has three forms:
4270 (LSL|LSR|ASL|ASR|ROR) Rs
4271 (LSL|LSR|ASL|ASR|ROR) #imm
4274 Note that ASL is assimilated to LSL in the instruction encoding, and
4275 RRX to ROR #0 (which cannot be written as such). */
4278 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4280 const struct asm_shift_name
*shift_name
;
4281 enum shift_kind shift
;
4286 for (p
= *str
; ISALPHA (*p
); p
++)
4291 inst
.error
= _("shift expression expected");
4295 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4297 if (shift_name
== NULL
)
4299 inst
.error
= _("shift expression expected");
4303 shift
= shift_name
->kind
;
4307 case NO_SHIFT_RESTRICT
:
4308 case SHIFT_IMMEDIATE
: break;
4310 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4311 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4313 inst
.error
= _("'LSL' or 'ASR' required");
4318 case SHIFT_LSL_IMMEDIATE
:
4319 if (shift
!= SHIFT_LSL
)
4321 inst
.error
= _("'LSL' required");
4326 case SHIFT_ASR_IMMEDIATE
:
4327 if (shift
!= SHIFT_ASR
)
4329 inst
.error
= _("'ASR' required");
4337 if (shift
!= SHIFT_RRX
)
4339 /* Whitespace can appear here if the next thing is a bare digit. */
4340 skip_whitespace (p
);
4342 if (mode
== NO_SHIFT_RESTRICT
4343 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4345 inst
.operands
[i
].imm
= reg
;
4346 inst
.operands
[i
].immisreg
= 1;
4348 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4351 inst
.operands
[i
].shift_kind
= shift
;
4352 inst
.operands
[i
].shifted
= 1;
4357 /* Parse a <shifter_operand> for an ARM data processing instruction:
4360 #<immediate>, <rotate>
4364 where <shift> is defined by parse_shift above, and <rotate> is a
4365 multiple of 2 between 0 and 30. Validation of immediate operands
4366 is deferred to md_apply_fix. */
4369 parse_shifter_operand (char **str
, int i
)
4374 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4376 inst
.operands
[i
].reg
= value
;
4377 inst
.operands
[i
].isreg
= 1;
4379 /* parse_shift will override this if appropriate */
4380 inst
.reloc
.exp
.X_op
= O_constant
;
4381 inst
.reloc
.exp
.X_add_number
= 0;
4383 if (skip_past_comma (str
) == FAIL
)
4386 /* Shift operation on register. */
4387 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4390 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4393 if (skip_past_comma (str
) == SUCCESS
)
4395 /* #x, y -- ie explicit rotation by Y. */
4396 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4399 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4401 inst
.error
= _("constant expression expected");
4405 value
= expr
.X_add_number
;
4406 if (value
< 0 || value
> 30 || value
% 2 != 0)
4408 inst
.error
= _("invalid rotation");
4411 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4413 inst
.error
= _("invalid constant");
4417 /* Convert to decoded value. md_apply_fix will put it back. */
4418 inst
.reloc
.exp
.X_add_number
4419 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4420 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4423 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4424 inst
.reloc
.pc_rel
= 0;
4428 /* Group relocation information. Each entry in the table contains the
4429 textual name of the relocation as may appear in assembler source
4430 and must end with a colon.
4431 Along with this textual name are the relocation codes to be used if
4432 the corresponding instruction is an ALU instruction (ADD or SUB only),
4433 an LDR, an LDRS, or an LDC. */
4435 struct group_reloc_table_entry
4446 /* Varieties of non-ALU group relocation. */
4453 static struct group_reloc_table_entry group_reloc_table
[] =
4454 { /* Program counter relative: */
4456 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4461 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4462 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4463 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4464 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4466 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4471 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4472 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4473 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4474 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4476 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4477 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4478 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4479 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4480 /* Section base relative */
4482 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4487 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4488 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4489 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4490 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4492 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4497 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4498 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4499 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4500 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4502 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4503 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4504 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4505 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4507 /* Given the address of a pointer pointing to the textual name of a group
4508 relocation as may appear in assembler source, attempt to find its details
4509 in group_reloc_table. The pointer will be updated to the character after
4510 the trailing colon. On failure, FAIL will be returned; SUCCESS
4511 otherwise. On success, *entry will be updated to point at the relevant
4512 group_reloc_table entry. */
4515 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4518 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4520 int length
= strlen (group_reloc_table
[i
].name
);
4522 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
4523 && (*str
)[length
] == ':')
4525 *out
= &group_reloc_table
[i
];
4526 *str
+= (length
+ 1);
4534 /* Parse a <shifter_operand> for an ARM data processing instruction
4535 (as for parse_shifter_operand) where group relocations are allowed:
4538 #<immediate>, <rotate>
4539 #:<group_reloc>:<expression>
4543 where <group_reloc> is one of the strings defined in group_reloc_table.
4544 The hashes are optional.
4546 Everything else is as for parse_shifter_operand. */
4548 static parse_operand_result
4549 parse_shifter_operand_group_reloc (char **str
, int i
)
4551 /* Determine if we have the sequence of characters #: or just :
4552 coming next. If we do, then we check for a group relocation.
4553 If we don't, punt the whole lot to parse_shifter_operand. */
4555 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4556 || (*str
)[0] == ':')
4558 struct group_reloc_table_entry
*entry
;
4560 if ((*str
)[0] == '#')
4565 /* Try to parse a group relocation. Anything else is an error. */
4566 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4568 inst
.error
= _("unknown group relocation");
4569 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4572 /* We now have the group relocation table entry corresponding to
4573 the name in the assembler source. Next, we parse the expression. */
4574 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4575 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4577 /* Record the relocation type (always the ALU variant here). */
4578 inst
.reloc
.type
= entry
->alu_code
;
4579 assert (inst
.reloc
.type
!= 0);
4581 return PARSE_OPERAND_SUCCESS
;
4584 return parse_shifter_operand (str
, i
) == SUCCESS
4585 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4587 /* Never reached. */
4590 /* Parse all forms of an ARM address expression. Information is written
4591 to inst.operands[i] and/or inst.reloc.
4593 Preindexed addressing (.preind=1):
4595 [Rn, #offset] .reg=Rn .reloc.exp=offset
4596 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4597 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4598 .shift_kind=shift .reloc.exp=shift_imm
4600 These three may have a trailing ! which causes .writeback to be set also.
4602 Postindexed addressing (.postind=1, .writeback=1):
4604 [Rn], #offset .reg=Rn .reloc.exp=offset
4605 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4606 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4607 .shift_kind=shift .reloc.exp=shift_imm
4609 Unindexed addressing (.preind=0, .postind=0):
4611 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4615 [Rn]{!} shorthand for [Rn,#0]{!}
4616 =immediate .isreg=0 .reloc.exp=immediate
4617 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4619 It is the caller's responsibility to check for addressing modes not
4620 supported by the instruction, and to set inst.reloc.type. */
4622 static parse_operand_result
4623 parse_address_main (char **str
, int i
, int group_relocations
,
4624 group_reloc_type group_type
)
4629 if (skip_past_char (&p
, '[') == FAIL
)
4631 if (skip_past_char (&p
, '=') == FAIL
)
4633 /* bare address - translate to PC-relative offset */
4634 inst
.reloc
.pc_rel
= 1;
4635 inst
.operands
[i
].reg
= REG_PC
;
4636 inst
.operands
[i
].isreg
= 1;
4637 inst
.operands
[i
].preind
= 1;
4639 /* else a load-constant pseudo op, no special treatment needed here */
4641 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4642 return PARSE_OPERAND_FAIL
;
4645 return PARSE_OPERAND_SUCCESS
;
4648 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4650 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4651 return PARSE_OPERAND_FAIL
;
4653 inst
.operands
[i
].reg
= reg
;
4654 inst
.operands
[i
].isreg
= 1;
4656 if (skip_past_comma (&p
) == SUCCESS
)
4658 inst
.operands
[i
].preind
= 1;
4661 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4663 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4665 inst
.operands
[i
].imm
= reg
;
4666 inst
.operands
[i
].immisreg
= 1;
4668 if (skip_past_comma (&p
) == SUCCESS
)
4669 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4670 return PARSE_OPERAND_FAIL
;
4672 else if (skip_past_char (&p
, ':') == SUCCESS
)
4674 /* FIXME: '@' should be used here, but it's filtered out by generic
4675 code before we get to see it here. This may be subject to
4678 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
4679 if (exp
.X_op
!= O_constant
)
4681 inst
.error
= _("alignment must be constant");
4682 return PARSE_OPERAND_FAIL
;
4684 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
4685 inst
.operands
[i
].immisalign
= 1;
4686 /* Alignments are not pre-indexes. */
4687 inst
.operands
[i
].preind
= 0;
4691 if (inst
.operands
[i
].negative
)
4693 inst
.operands
[i
].negative
= 0;
4697 if (group_relocations
4698 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
4700 struct group_reloc_table_entry
*entry
;
4702 /* Skip over the #: or : sequence. */
4708 /* Try to parse a group relocation. Anything else is an
4710 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
4712 inst
.error
= _("unknown group relocation");
4713 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4716 /* We now have the group relocation table entry corresponding to
4717 the name in the assembler source. Next, we parse the
4719 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4720 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4722 /* Record the relocation type. */
4726 inst
.reloc
.type
= entry
->ldr_code
;
4730 inst
.reloc
.type
= entry
->ldrs_code
;
4734 inst
.reloc
.type
= entry
->ldc_code
;
4741 if (inst
.reloc
.type
== 0)
4743 inst
.error
= _("this group relocation is not allowed on this instruction");
4744 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4748 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4749 return PARSE_OPERAND_FAIL
;
4753 if (skip_past_char (&p
, ']') == FAIL
)
4755 inst
.error
= _("']' expected");
4756 return PARSE_OPERAND_FAIL
;
4759 if (skip_past_char (&p
, '!') == SUCCESS
)
4760 inst
.operands
[i
].writeback
= 1;
4762 else if (skip_past_comma (&p
) == SUCCESS
)
4764 if (skip_past_char (&p
, '{') == SUCCESS
)
4766 /* [Rn], {expr} - unindexed, with option */
4767 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
4768 0, 255, TRUE
) == FAIL
)
4769 return PARSE_OPERAND_FAIL
;
4771 if (skip_past_char (&p
, '}') == FAIL
)
4773 inst
.error
= _("'}' expected at end of 'option' field");
4774 return PARSE_OPERAND_FAIL
;
4776 if (inst
.operands
[i
].preind
)
4778 inst
.error
= _("cannot combine index with option");
4779 return PARSE_OPERAND_FAIL
;
4782 return PARSE_OPERAND_SUCCESS
;
4786 inst
.operands
[i
].postind
= 1;
4787 inst
.operands
[i
].writeback
= 1;
4789 if (inst
.operands
[i
].preind
)
4791 inst
.error
= _("cannot combine pre- and post-indexing");
4792 return PARSE_OPERAND_FAIL
;
4796 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4798 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4800 /* We might be using the immediate for alignment already. If we
4801 are, OR the register number into the low-order bits. */
4802 if (inst
.operands
[i
].immisalign
)
4803 inst
.operands
[i
].imm
|= reg
;
4805 inst
.operands
[i
].imm
= reg
;
4806 inst
.operands
[i
].immisreg
= 1;
4808 if (skip_past_comma (&p
) == SUCCESS
)
4809 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
4810 return PARSE_OPERAND_FAIL
;
4814 if (inst
.operands
[i
].negative
)
4816 inst
.operands
[i
].negative
= 0;
4819 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4820 return PARSE_OPERAND_FAIL
;
4825 /* If at this point neither .preind nor .postind is set, we have a
4826 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4827 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
4829 inst
.operands
[i
].preind
= 1;
4830 inst
.reloc
.exp
.X_op
= O_constant
;
4831 inst
.reloc
.exp
.X_add_number
= 0;
4834 return PARSE_OPERAND_SUCCESS
;
4838 parse_address (char **str
, int i
)
4840 return parse_address_main (str
, i
, 0, 0) == PARSE_OPERAND_SUCCESS
4844 static parse_operand_result
4845 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
4847 return parse_address_main (str
, i
, 1, type
);
4850 /* Parse an operand for a MOVW or MOVT instruction. */
4852 parse_half (char **str
)
4857 skip_past_char (&p
, '#');
4858 if (strncasecmp (p
, ":lower16:", 9) == 0)
4859 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
4860 else if (strncasecmp (p
, ":upper16:", 9) == 0)
4861 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
4863 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
4866 skip_whitespace (p
);
4869 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4872 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
4874 if (inst
.reloc
.exp
.X_op
!= O_constant
)
4876 inst
.error
= _("constant expression expected");
4879 if (inst
.reloc
.exp
.X_add_number
< 0
4880 || inst
.reloc
.exp
.X_add_number
> 0xffff)
4882 inst
.error
= _("immediate value out of range");
4890 /* Miscellaneous. */
4892 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4893 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4895 parse_psr (char **str
)
4898 unsigned long psr_field
;
4899 const struct asm_psr
*psr
;
4902 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4903 feature for ease of use and backwards compatibility. */
4905 if (strncasecmp (p
, "SPSR", 4) == 0)
4906 psr_field
= SPSR_BIT
;
4907 else if (strncasecmp (p
, "CPSR", 4) == 0)
4914 while (ISALNUM (*p
) || *p
== '_');
4916 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
4927 /* A suffix follows. */
4933 while (ISALNUM (*p
) || *p
== '_');
4935 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
4939 psr_field
|= psr
->field
;
4944 goto error
; /* Garbage after "[CS]PSR". */
4946 psr_field
|= (PSR_c
| PSR_f
);
4952 inst
.error
= _("flag for {c}psr instruction expected");
4956 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4957 value suitable for splatting into the AIF field of the instruction. */
4960 parse_cps_flags (char **str
)
4969 case '\0': case ',':
4972 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
4973 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
4974 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
4977 inst
.error
= _("unrecognized CPS flag");
4982 if (saw_a_flag
== 0)
4984 inst
.error
= _("missing CPS flags");
4992 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4993 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4996 parse_endian_specifier (char **str
)
5001 if (strncasecmp (s
, "BE", 2))
5003 else if (strncasecmp (s
, "LE", 2))
5007 inst
.error
= _("valid endian specifiers are be or le");
5011 if (ISALNUM (s
[2]) || s
[2] == '_')
5013 inst
.error
= _("valid endian specifiers are be or le");
5018 return little_endian
;
5021 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5022 value suitable for poking into the rotate field of an sxt or sxta
5023 instruction, or FAIL on error. */
5026 parse_ror (char **str
)
5031 if (strncasecmp (s
, "ROR", 3) == 0)
5035 inst
.error
= _("missing rotation field after comma");
5039 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5044 case 0: *str
= s
; return 0x0;
5045 case 8: *str
= s
; return 0x1;
5046 case 16: *str
= s
; return 0x2;
5047 case 24: *str
= s
; return 0x3;
5050 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5055 /* Parse a conditional code (from conds[] below). The value returned is in the
5056 range 0 .. 14, or FAIL. */
5058 parse_cond (char **str
)
5061 const struct asm_cond
*c
;
5063 /* Condition codes are always 2 characters, so matching up to
5064 3 characters is sufficient. */
5069 while (ISALPHA (*q
) && n
< 3)
5071 cond
[n
] = TOLOWER(*q
);
5076 c
= hash_find_n (arm_cond_hsh
, cond
, n
);
5079 inst
.error
= _("condition required");
5087 /* Parse an option for a barrier instruction. Returns the encoding for the
5090 parse_barrier (char **str
)
5093 const struct asm_barrier_opt
*o
;
5096 while (ISALPHA (*q
))
5099 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
5107 /* Parse the operands of a table branch instruction. Similar to a memory
5110 parse_tb (char **str
)
5115 if (skip_past_char (&p
, '[') == FAIL
)
5117 inst
.error
= _("'[' expected");
5121 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5123 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5126 inst
.operands
[0].reg
= reg
;
5128 if (skip_past_comma (&p
) == FAIL
)
5130 inst
.error
= _("',' expected");
5134 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5136 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5139 inst
.operands
[0].imm
= reg
;
5141 if (skip_past_comma (&p
) == SUCCESS
)
5143 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5145 if (inst
.reloc
.exp
.X_add_number
!= 1)
5147 inst
.error
= _("invalid shift");
5150 inst
.operands
[0].shifted
= 1;
5153 if (skip_past_char (&p
, ']') == FAIL
)
5155 inst
.error
= _("']' expected");
5162 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5163 information on the types the operands can take and how they are encoded.
5164 Up to four operands may be read; this function handles setting the
5165 ".present" field for each read operand itself.
5166 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5167 else returns FAIL. */
5170 parse_neon_mov (char **str
, int *which_operand
)
5172 int i
= *which_operand
, val
;
5173 enum arm_reg_type rtype
;
5175 struct neon_type_el optype
;
5177 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5179 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5180 inst
.operands
[i
].reg
= val
;
5181 inst
.operands
[i
].isscalar
= 1;
5182 inst
.operands
[i
].vectype
= optype
;
5183 inst
.operands
[i
++].present
= 1;
5185 if (skip_past_comma (&ptr
) == FAIL
)
5188 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5191 inst
.operands
[i
].reg
= val
;
5192 inst
.operands
[i
].isreg
= 1;
5193 inst
.operands
[i
].present
= 1;
5195 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5198 /* Cases 0, 1, 2, 3, 5 (D only). */
5199 if (skip_past_comma (&ptr
) == FAIL
)
5202 inst
.operands
[i
].reg
= val
;
5203 inst
.operands
[i
].isreg
= 1;
5204 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5205 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5206 inst
.operands
[i
].isvec
= 1;
5207 inst
.operands
[i
].vectype
= optype
;
5208 inst
.operands
[i
++].present
= 1;
5210 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5212 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5213 Case 13: VMOV <Sd>, <Rm> */
5214 inst
.operands
[i
].reg
= val
;
5215 inst
.operands
[i
].isreg
= 1;
5216 inst
.operands
[i
].present
= 1;
5218 if (rtype
== REG_TYPE_NQ
)
5220 first_error (_("can't use Neon quad register here"));
5223 else if (rtype
!= REG_TYPE_VFS
)
5226 if (skip_past_comma (&ptr
) == FAIL
)
5228 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5230 inst
.operands
[i
].reg
= val
;
5231 inst
.operands
[i
].isreg
= 1;
5232 inst
.operands
[i
].present
= 1;
5235 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5238 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5239 Case 1: VMOV<c><q> <Dd>, <Dm>
5240 Case 8: VMOV.F32 <Sd>, <Sm>
5241 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5243 inst
.operands
[i
].reg
= val
;
5244 inst
.operands
[i
].isreg
= 1;
5245 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5246 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5247 inst
.operands
[i
].isvec
= 1;
5248 inst
.operands
[i
].vectype
= optype
;
5249 inst
.operands
[i
].present
= 1;
5251 if (skip_past_comma (&ptr
) == SUCCESS
)
5256 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5259 inst
.operands
[i
].reg
= val
;
5260 inst
.operands
[i
].isreg
= 1;
5261 inst
.operands
[i
++].present
= 1;
5263 if (skip_past_comma (&ptr
) == FAIL
)
5266 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5269 inst
.operands
[i
].reg
= val
;
5270 inst
.operands
[i
].isreg
= 1;
5271 inst
.operands
[i
++].present
= 1;
5274 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5275 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5276 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5277 Case 10: VMOV.F32 <Sd>, #<imm>
5278 Case 11: VMOV.F64 <Dd>, #<imm> */
5279 inst
.operands
[i
].immisfloat
= 1;
5280 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5281 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5282 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5286 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5290 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5293 inst
.operands
[i
].reg
= val
;
5294 inst
.operands
[i
].isreg
= 1;
5295 inst
.operands
[i
++].present
= 1;
5297 if (skip_past_comma (&ptr
) == FAIL
)
5300 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5302 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5303 inst
.operands
[i
].reg
= val
;
5304 inst
.operands
[i
].isscalar
= 1;
5305 inst
.operands
[i
].present
= 1;
5306 inst
.operands
[i
].vectype
= optype
;
5308 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5310 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5311 inst
.operands
[i
].reg
= val
;
5312 inst
.operands
[i
].isreg
= 1;
5313 inst
.operands
[i
++].present
= 1;
5315 if (skip_past_comma (&ptr
) == FAIL
)
5318 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5321 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5325 inst
.operands
[i
].reg
= val
;
5326 inst
.operands
[i
].isreg
= 1;
5327 inst
.operands
[i
].isvec
= 1;
5328 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5329 inst
.operands
[i
].vectype
= optype
;
5330 inst
.operands
[i
].present
= 1;
5332 if (rtype
== REG_TYPE_VFS
)
5336 if (skip_past_comma (&ptr
) == FAIL
)
5338 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5341 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5344 inst
.operands
[i
].reg
= val
;
5345 inst
.operands
[i
].isreg
= 1;
5346 inst
.operands
[i
].isvec
= 1;
5347 inst
.operands
[i
].issingle
= 1;
5348 inst
.operands
[i
].vectype
= optype
;
5349 inst
.operands
[i
].present
= 1;
5352 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5356 inst
.operands
[i
].reg
= val
;
5357 inst
.operands
[i
].isreg
= 1;
5358 inst
.operands
[i
].isvec
= 1;
5359 inst
.operands
[i
].issingle
= 1;
5360 inst
.operands
[i
].vectype
= optype
;
5361 inst
.operands
[i
++].present
= 1;
5366 first_error (_("parse error"));
5370 /* Successfully parsed the operands. Update args. */
5376 first_error (_("expected comma"));
5380 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5384 /* Matcher codes for parse_operands. */
5385 enum operand_parse_code
5387 OP_stop
, /* end of line */
5389 OP_RR
, /* ARM register */
5390 OP_RRnpc
, /* ARM register, not r15 */
5391 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5392 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5393 OP_RCP
, /* Coprocessor number */
5394 OP_RCN
, /* Coprocessor register */
5395 OP_RF
, /* FPA register */
5396 OP_RVS
, /* VFP single precision register */
5397 OP_RVD
, /* VFP double precision register (0..15) */
5398 OP_RND
, /* Neon double precision register (0..31) */
5399 OP_RNQ
, /* Neon quad precision register */
5400 OP_RVSD
, /* VFP single or double precision register */
5401 OP_RNDQ
, /* Neon double or quad precision register */
5402 OP_RNSDQ
, /* Neon single, double or quad precision register */
5403 OP_RNSC
, /* Neon scalar D[X] */
5404 OP_RVC
, /* VFP control register */
5405 OP_RMF
, /* Maverick F register */
5406 OP_RMD
, /* Maverick D register */
5407 OP_RMFX
, /* Maverick FX register */
5408 OP_RMDX
, /* Maverick DX register */
5409 OP_RMAX
, /* Maverick AX register */
5410 OP_RMDS
, /* Maverick DSPSC register */
5411 OP_RIWR
, /* iWMMXt wR register */
5412 OP_RIWC
, /* iWMMXt wC register */
5413 OP_RIWG
, /* iWMMXt wCG register */
5414 OP_RXA
, /* XScale accumulator register */
5416 OP_REGLST
, /* ARM register list */
5417 OP_VRSLST
, /* VFP single-precision register list */
5418 OP_VRDLST
, /* VFP double-precision register list */
5419 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5420 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5421 OP_NSTRLST
, /* Neon element/structure list */
5423 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5424 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5425 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5426 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5427 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5428 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5429 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5430 OP_VMOV
, /* Neon VMOV operands. */
5431 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
5432 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5433 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5435 OP_I0
, /* immediate zero */
5436 OP_I7
, /* immediate value 0 .. 7 */
5437 OP_I15
, /* 0 .. 15 */
5438 OP_I16
, /* 1 .. 16 */
5439 OP_I16z
, /* 0 .. 16 */
5440 OP_I31
, /* 0 .. 31 */
5441 OP_I31w
, /* 0 .. 31, optional trailing ! */
5442 OP_I32
, /* 1 .. 32 */
5443 OP_I32z
, /* 0 .. 32 */
5444 OP_I63
, /* 0 .. 63 */
5445 OP_I63s
, /* -64 .. 63 */
5446 OP_I64
, /* 1 .. 64 */
5447 OP_I64z
, /* 0 .. 64 */
5448 OP_I255
, /* 0 .. 255 */
5450 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5451 OP_I7b
, /* 0 .. 7 */
5452 OP_I15b
, /* 0 .. 15 */
5453 OP_I31b
, /* 0 .. 31 */
5455 OP_SH
, /* shifter operand */
5456 OP_SHG
, /* shifter operand with possible group relocation */
5457 OP_ADDR
, /* Memory address expression (any mode) */
5458 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5459 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5460 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5461 OP_EXP
, /* arbitrary expression */
5462 OP_EXPi
, /* same, with optional immediate prefix */
5463 OP_EXPr
, /* same, with optional relocation suffix */
5464 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5466 OP_CPSF
, /* CPS flags */
5467 OP_ENDI
, /* Endianness specifier */
5468 OP_PSR
, /* CPSR/SPSR mask for msr */
5469 OP_COND
, /* conditional code */
5470 OP_TB
, /* Table branch. */
5472 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5473 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5475 OP_RRnpc_I0
, /* ARM register or literal 0 */
5476 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5477 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5478 OP_RF_IF
, /* FPA register or immediate */
5479 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5480 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5482 /* Optional operands. */
5483 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5484 OP_oI31b
, /* 0 .. 31 */
5485 OP_oI32b
, /* 1 .. 32 */
5486 OP_oIffffb
, /* 0 .. 65535 */
5487 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5489 OP_oRR
, /* ARM register */
5490 OP_oRRnpc
, /* ARM register, not the PC */
5491 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
5492 OP_oRND
, /* Optional Neon double precision register */
5493 OP_oRNQ
, /* Optional Neon quad precision register */
5494 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5495 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5496 OP_oSHll
, /* LSL immediate */
5497 OP_oSHar
, /* ASR immediate */
5498 OP_oSHllar
, /* LSL or ASR immediate */
5499 OP_oROR
, /* ROR 0/8/16/24 */
5500 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5502 OP_FIRST_OPTIONAL
= OP_oI7b
5505 /* Generic instruction operand parser. This does no encoding and no
5506 semantic validation; it merely squirrels values away in the inst
5507 structure. Returns SUCCESS or FAIL depending on whether the
5508 specified grammar matched. */
5510 parse_operands (char *str
, const unsigned char *pattern
)
5512 unsigned const char *upat
= pattern
;
5513 char *backtrack_pos
= 0;
5514 const char *backtrack_error
= 0;
5515 int i
, val
, backtrack_index
= 0;
5516 enum arm_reg_type rtype
;
5517 parse_operand_result result
;
5519 #define po_char_or_fail(chr) do { \
5520 if (skip_past_char (&str, chr) == FAIL) \
5524 #define po_reg_or_fail(regtype) do { \
5525 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5526 &inst.operands[i].vectype); \
5529 first_error (_(reg_expected_msgs[regtype])); \
5532 inst.operands[i].reg = val; \
5533 inst.operands[i].isreg = 1; \
5534 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5535 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5536 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5537 || rtype == REG_TYPE_VFD \
5538 || rtype == REG_TYPE_NQ); \
5541 #define po_reg_or_goto(regtype, label) do { \
5542 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5543 &inst.operands[i].vectype); \
5547 inst.operands[i].reg = val; \
5548 inst.operands[i].isreg = 1; \
5549 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5550 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5551 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5552 || rtype == REG_TYPE_VFD \
5553 || rtype == REG_TYPE_NQ); \
5556 #define po_imm_or_fail(min, max, popt) do { \
5557 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5559 inst.operands[i].imm = val; \
5562 #define po_scalar_or_goto(elsz, label) do { \
5563 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5566 inst.operands[i].reg = val; \
5567 inst.operands[i].isscalar = 1; \
5570 #define po_misc_or_fail(expr) do { \
5575 #define po_misc_or_fail_no_backtrack(expr) do { \
5577 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5578 backtrack_pos = 0; \
5579 if (result != PARSE_OPERAND_SUCCESS) \
5583 skip_whitespace (str
);
5585 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5587 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5589 /* Remember where we are in case we need to backtrack. */
5590 assert (!backtrack_pos
);
5591 backtrack_pos
= str
;
5592 backtrack_error
= inst
.error
;
5593 backtrack_index
= i
;
5596 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
5597 po_char_or_fail (',');
5605 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5606 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5607 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5608 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5609 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5610 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5612 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5614 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
5616 /* Also accept generic coprocessor regs for unknown registers. */
5618 po_reg_or_fail (REG_TYPE_CN
);
5620 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5621 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5622 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5623 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5624 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5625 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5626 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5627 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5628 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5629 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5631 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5633 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5634 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
5636 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
5638 /* Neon scalar. Using an element size of 8 means that some invalid
5639 scalars are accepted here, so deal with those in later code. */
5640 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5642 /* WARNING: We can expand to two operands here. This has the potential
5643 to totally confuse the backtracking mechanism! It will be OK at
5644 least as long as we don't try to use optional args as well,
5648 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
5649 inst
.operands
[i
].present
= 1;
5651 skip_past_comma (&str
);
5652 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
5655 /* Optional register operand was omitted. Unfortunately, it's in
5656 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5657 here (this is a bit grotty). */
5658 inst
.operands
[i
] = inst
.operands
[i
-1];
5659 inst
.operands
[i
-1].present
= 0;
5662 /* There's a possibility of getting a 64-bit immediate here, so
5663 we need special handling. */
5664 if (parse_big_immediate (&str
, i
) == FAIL
)
5666 inst
.error
= _("immediate value is out of range");
5674 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
5677 po_imm_or_fail (0, 0, TRUE
);
5682 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
5687 po_scalar_or_goto (8, try_rr
);
5690 po_reg_or_fail (REG_TYPE_RN
);
5696 po_scalar_or_goto (8, try_nsdq
);
5699 po_reg_or_fail (REG_TYPE_NSDQ
);
5705 po_scalar_or_goto (8, try_ndq
);
5708 po_reg_or_fail (REG_TYPE_NDQ
);
5714 po_scalar_or_goto (8, try_vfd
);
5717 po_reg_or_fail (REG_TYPE_VFD
);
5722 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5723 not careful then bad things might happen. */
5724 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
5729 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
5732 /* There's a possibility of getting a 64-bit immediate here, so
5733 we need special handling. */
5734 if (parse_big_immediate (&str
, i
) == FAIL
)
5736 inst
.error
= _("immediate value is out of range");
5744 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
5747 po_imm_or_fail (0, 63, TRUE
);
5752 po_char_or_fail ('[');
5753 po_reg_or_fail (REG_TYPE_RN
);
5754 po_char_or_fail (']');
5759 po_reg_or_fail (REG_TYPE_RN
);
5760 if (skip_past_char (&str
, '!') == SUCCESS
)
5761 inst
.operands
[i
].writeback
= 1;
5765 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
5766 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
5767 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
5768 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
5769 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
5770 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
5771 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
5772 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
5773 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
5774 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
5775 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
5776 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
5778 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
5780 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
5781 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
5783 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
5784 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
5785 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
5787 /* Immediate variants */
5789 po_char_or_fail ('{');
5790 po_imm_or_fail (0, 255, TRUE
);
5791 po_char_or_fail ('}');
5795 /* The expression parser chokes on a trailing !, so we have
5796 to find it first and zap it. */
5799 while (*s
&& *s
!= ',')
5804 inst
.operands
[i
].writeback
= 1;
5806 po_imm_or_fail (0, 31, TRUE
);
5814 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5819 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5824 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
5826 if (inst
.reloc
.exp
.X_op
== O_symbol
)
5828 val
= parse_reloc (&str
);
5831 inst
.error
= _("unrecognized relocation suffix");
5834 else if (val
!= BFD_RELOC_UNUSED
)
5836 inst
.operands
[i
].imm
= val
;
5837 inst
.operands
[i
].hasreloc
= 1;
5842 /* Operand for MOVW or MOVT. */
5844 po_misc_or_fail (parse_half (&str
));
5847 /* Register or expression */
5848 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
5849 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
5851 /* Register or immediate */
5852 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
5853 I0
: po_imm_or_fail (0, 0, FALSE
); break;
5855 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
5857 if (!is_immediate_prefix (*str
))
5860 val
= parse_fpa_immediate (&str
);
5863 /* FPA immediates are encoded as registers 8-15.
5864 parse_fpa_immediate has already applied the offset. */
5865 inst
.operands
[i
].reg
= val
;
5866 inst
.operands
[i
].isreg
= 1;
5869 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
5870 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
5872 /* Two kinds of register */
5875 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5877 || (rege
->type
!= REG_TYPE_MMXWR
5878 && rege
->type
!= REG_TYPE_MMXWC
5879 && rege
->type
!= REG_TYPE_MMXWCG
))
5881 inst
.error
= _("iWMMXt data or control register expected");
5884 inst
.operands
[i
].reg
= rege
->number
;
5885 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
5891 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
5893 || (rege
->type
!= REG_TYPE_MMXWC
5894 && rege
->type
!= REG_TYPE_MMXWCG
))
5896 inst
.error
= _("iWMMXt control register expected");
5899 inst
.operands
[i
].reg
= rege
->number
;
5900 inst
.operands
[i
].isreg
= 1;
5905 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
5906 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
5907 case OP_oROR
: val
= parse_ror (&str
); break;
5908 case OP_PSR
: val
= parse_psr (&str
); break;
5909 case OP_COND
: val
= parse_cond (&str
); break;
5910 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
5913 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
5914 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
5917 val
= parse_psr (&str
);
5921 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
5924 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5926 if (strncasecmp (str
, "APSR_", 5) == 0)
5933 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
5934 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
5935 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
5936 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
5937 default: found
= 16;
5941 inst
.operands
[i
].isvec
= 1;
5948 po_misc_or_fail (parse_tb (&str
));
5951 /* Register lists */
5953 val
= parse_reg_list (&str
);
5956 inst
.operands
[1].writeback
= 1;
5962 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
5966 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
5970 /* Allow Q registers too. */
5971 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5976 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5978 inst
.operands
[i
].issingle
= 1;
5983 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
5988 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
5989 &inst
.operands
[i
].vectype
);
5992 /* Addressing modes */
5994 po_misc_or_fail (parse_address (&str
, i
));
5998 po_misc_or_fail_no_backtrack (
5999 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
6003 po_misc_or_fail_no_backtrack (
6004 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
6008 po_misc_or_fail_no_backtrack (
6009 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
6013 po_misc_or_fail (parse_shifter_operand (&str
, i
));
6017 po_misc_or_fail_no_backtrack (
6018 parse_shifter_operand_group_reloc (&str
, i
));
6022 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
6026 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
6030 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
6034 as_fatal (_("unhandled operand code %d"), upat
[i
]);
6037 /* Various value-based sanity checks and shared operations. We
6038 do not signal immediate failures for the register constraints;
6039 this allows a syntax error to take precedence. */
6048 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6049 inst
.error
= BAD_PC
;
6067 inst
.operands
[i
].imm
= val
;
6074 /* If we get here, this operand was successfully parsed. */
6075 inst
.operands
[i
].present
= 1;
6079 inst
.error
= BAD_ARGS
;
6084 /* The parse routine should already have set inst.error, but set a
6085 default here just in case. */
6087 inst
.error
= _("syntax error");
6091 /* Do not backtrack over a trailing optional argument that
6092 absorbed some text. We will only fail again, with the
6093 'garbage following instruction' error message, which is
6094 probably less helpful than the current one. */
6095 if (backtrack_index
== i
&& backtrack_pos
!= str
6096 && upat
[i
+1] == OP_stop
)
6099 inst
.error
= _("syntax error");
6103 /* Try again, skipping the optional argument at backtrack_pos. */
6104 str
= backtrack_pos
;
6105 inst
.error
= backtrack_error
;
6106 inst
.operands
[backtrack_index
].present
= 0;
6107 i
= backtrack_index
;
6111 /* Check that we have parsed all the arguments. */
6112 if (*str
!= '\0' && !inst
.error
)
6113 inst
.error
= _("garbage following instruction");
6115 return inst
.error
? FAIL
: SUCCESS
;
6118 #undef po_char_or_fail
6119 #undef po_reg_or_fail
6120 #undef po_reg_or_goto
6121 #undef po_imm_or_fail
6122 #undef po_scalar_or_fail
6124 /* Shorthand macro for instruction encoding functions issuing errors. */
6125 #define constraint(expr, err) do { \
6133 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6134 instructions are unpredictable if these registers are used. This
6135 is the BadReg predicate in ARM's Thumb-2 documentation. */
6136 #define reject_bad_reg(reg) \
6138 if (reg == REG_SP || reg == REG_PC) \
6140 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6145 /* If REG is R13 (the stack pointer), warn that its use is
6147 #define warn_deprecated_sp(reg) \
6149 if (warn_on_deprecated && reg == REG_SP) \
6150 as_warn (_("use of r13 is deprecated")); \
6153 /* Functions for operand encoding. ARM, then Thumb. */
6155 #define rotate_left(v, n) (v << n | v >> (32 - n))
6157 /* If VAL can be encoded in the immediate field of an ARM instruction,
6158 return the encoded form. Otherwise, return FAIL. */
6161 encode_arm_immediate (unsigned int val
)
6165 for (i
= 0; i
< 32; i
+= 2)
6166 if ((a
= rotate_left (val
, i
)) <= 0xff)
6167 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6172 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6173 return the encoded form. Otherwise, return FAIL. */
6175 encode_thumb32_immediate (unsigned int val
)
6182 for (i
= 1; i
<= 24; i
++)
6185 if ((val
& ~(0xff << i
)) == 0)
6186 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6190 if (val
== ((a
<< 16) | a
))
6192 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6196 if (val
== ((a
<< 16) | a
))
6197 return 0x200 | (a
>> 8);
6201 /* Encode a VFP SP or DP register number into inst.instruction. */
6204 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6206 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6209 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
6212 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6215 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6220 first_error (_("D register out of range for selected VFP version"));
6228 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6232 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6236 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6240 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6244 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6248 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6256 /* Encode a <shift> in an ARM-format instruction. The immediate,
6257 if any, is handled by md_apply_fix. */
6259 encode_arm_shift (int i
)
6261 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6262 inst
.instruction
|= SHIFT_ROR
<< 5;
6265 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6266 if (inst
.operands
[i
].immisreg
)
6268 inst
.instruction
|= SHIFT_BY_REG
;
6269 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6272 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6277 encode_arm_shifter_operand (int i
)
6279 if (inst
.operands
[i
].isreg
)
6281 inst
.instruction
|= inst
.operands
[i
].reg
;
6282 encode_arm_shift (i
);
6285 inst
.instruction
|= INST_IMMEDIATE
;
6288 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6290 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6292 assert (inst
.operands
[i
].isreg
);
6293 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6295 if (inst
.operands
[i
].preind
)
6299 inst
.error
= _("instruction does not accept preindexed addressing");
6302 inst
.instruction
|= PRE_INDEX
;
6303 if (inst
.operands
[i
].writeback
)
6304 inst
.instruction
|= WRITE_BACK
;
6307 else if (inst
.operands
[i
].postind
)
6309 assert (inst
.operands
[i
].writeback
);
6311 inst
.instruction
|= WRITE_BACK
;
6313 else /* unindexed - only for coprocessor */
6315 inst
.error
= _("instruction does not accept unindexed addressing");
6319 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6320 && (((inst
.instruction
& 0x000f0000) >> 16)
6321 == ((inst
.instruction
& 0x0000f000) >> 12)))
6322 as_warn ((inst
.instruction
& LOAD_BIT
)
6323 ? _("destination register same as write-back base")
6324 : _("source register same as write-back base"));
6327 /* inst.operands[i] was set up by parse_address. Encode it into an
6328 ARM-format mode 2 load or store instruction. If is_t is true,
6329 reject forms that cannot be used with a T instruction (i.e. not
6332 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6334 encode_arm_addr_mode_common (i
, is_t
);
6336 if (inst
.operands
[i
].immisreg
)
6338 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6339 inst
.instruction
|= inst
.operands
[i
].imm
;
6340 if (!inst
.operands
[i
].negative
)
6341 inst
.instruction
|= INDEX_UP
;
6342 if (inst
.operands
[i
].shifted
)
6344 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6345 inst
.instruction
|= SHIFT_ROR
<< 5;
6348 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6349 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6353 else /* immediate offset in inst.reloc */
6355 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6356 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6360 /* inst.operands[i] was set up by parse_address. Encode it into an
6361 ARM-format mode 3 load or store instruction. Reject forms that
6362 cannot be used with such instructions. If is_t is true, reject
6363 forms that cannot be used with a T instruction (i.e. not
6366 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6368 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6370 inst
.error
= _("instruction does not accept scaled register index");
6374 encode_arm_addr_mode_common (i
, is_t
);
6376 if (inst
.operands
[i
].immisreg
)
6378 inst
.instruction
|= inst
.operands
[i
].imm
;
6379 if (!inst
.operands
[i
].negative
)
6380 inst
.instruction
|= INDEX_UP
;
6382 else /* immediate offset in inst.reloc */
6384 inst
.instruction
|= HWOFFSET_IMM
;
6385 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6386 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6390 /* inst.operands[i] was set up by parse_address. Encode it into an
6391 ARM-format instruction. Reject all forms which cannot be encoded
6392 into a coprocessor load/store instruction. If wb_ok is false,
6393 reject use of writeback; if unind_ok is false, reject use of
6394 unindexed addressing. If reloc_override is not 0, use it instead
6395 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6396 (in which case it is preserved). */
6399 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6401 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6403 assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6405 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6407 assert (!inst
.operands
[i
].writeback
);
6410 inst
.error
= _("instruction does not support unindexed addressing");
6413 inst
.instruction
|= inst
.operands
[i
].imm
;
6414 inst
.instruction
|= INDEX_UP
;
6418 if (inst
.operands
[i
].preind
)
6419 inst
.instruction
|= PRE_INDEX
;
6421 if (inst
.operands
[i
].writeback
)
6423 if (inst
.operands
[i
].reg
== REG_PC
)
6425 inst
.error
= _("pc may not be used with write-back");
6430 inst
.error
= _("instruction does not support writeback");
6433 inst
.instruction
|= WRITE_BACK
;
6437 inst
.reloc
.type
= reloc_override
;
6438 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6439 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6440 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6443 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6445 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6451 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6452 Determine whether it can be performed with a move instruction; if
6453 it can, convert inst.instruction to that move instruction and
6454 return 1; if it can't, convert inst.instruction to a literal-pool
6455 load and return 0. If this is not a valid thing to do in the
6456 current context, set inst.error and return 1.
6458 inst.operands[i] describes the destination register. */
6461 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6466 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6470 if ((inst
.instruction
& tbit
) == 0)
6472 inst
.error
= _("invalid pseudo operation");
6475 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6477 inst
.error
= _("constant expression expected");
6480 if (inst
.reloc
.exp
.X_op
== O_constant
)
6484 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
6486 /* This can be done with a mov(1) instruction. */
6487 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
6488 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
6494 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6497 /* This can be done with a mov instruction. */
6498 inst
.instruction
&= LITERAL_MASK
;
6499 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6500 inst
.instruction
|= value
& 0xfff;
6504 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6507 /* This can be done with a mvn instruction. */
6508 inst
.instruction
&= LITERAL_MASK
;
6509 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6510 inst
.instruction
|= value
& 0xfff;
6516 if (add_to_lit_pool () == FAIL
)
6518 inst
.error
= _("literal pool insertion failed");
6521 inst
.operands
[1].reg
= REG_PC
;
6522 inst
.operands
[1].isreg
= 1;
6523 inst
.operands
[1].preind
= 1;
6524 inst
.reloc
.pc_rel
= 1;
6525 inst
.reloc
.type
= (thumb_p
6526 ? BFD_RELOC_ARM_THUMB_OFFSET
6528 ? BFD_RELOC_ARM_HWLITERAL
6529 : BFD_RELOC_ARM_LITERAL
));
6533 /* Functions for instruction encoding, sorted by sub-architecture.
6534 First some generics; their names are taken from the conventional
6535 bit positions for register arguments in ARM format instructions. */
6545 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6551 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6552 inst
.instruction
|= inst
.operands
[1].reg
;
6558 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6559 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6565 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6566 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6572 unsigned Rn
= inst
.operands
[2].reg
;
6573 /* Enforce restrictions on SWP instruction. */
6574 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6575 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6576 _("Rn must not overlap other operands"));
6577 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6578 inst
.instruction
|= inst
.operands
[1].reg
;
6579 inst
.instruction
|= Rn
<< 16;
6585 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6586 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6587 inst
.instruction
|= inst
.operands
[2].reg
;
6593 inst
.instruction
|= inst
.operands
[0].reg
;
6594 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6595 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6601 inst
.instruction
|= inst
.operands
[0].imm
;
6607 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6608 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6611 /* ARM instructions, in alphabetical order by function name (except
6612 that wrapper functions appear immediately after the function they
6615 /* This is a pseudo-op of the form "adr rd, label" to be converted
6616 into a relative address of the form "add rd, pc, #label-.-8". */
6621 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6623 /* Frag hacking will turn this into a sub instruction if the offset turns
6624 out to be negative. */
6625 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6626 inst
.reloc
.pc_rel
= 1;
6627 inst
.reloc
.exp
.X_add_number
-= 8;
6630 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6631 into a relative address of the form:
6632 add rd, pc, #low(label-.-8)"
6633 add rd, rd, #high(label-.-8)" */
6638 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6640 /* Frag hacking will turn this into a sub instruction if the offset turns
6641 out to be negative. */
6642 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
6643 inst
.reloc
.pc_rel
= 1;
6644 inst
.size
= INSN_SIZE
* 2;
6645 inst
.reloc
.exp
.X_add_number
-= 8;
6651 if (!inst
.operands
[1].present
)
6652 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
6653 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6654 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6655 encode_arm_shifter_operand (2);
6661 if (inst
.operands
[0].present
)
6663 constraint ((inst
.instruction
& 0xf0) != 0x40
6664 && inst
.operands
[0].imm
!= 0xf,
6665 _("bad barrier type"));
6666 inst
.instruction
|= inst
.operands
[0].imm
;
6669 inst
.instruction
|= 0xf;
6675 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
6676 constraint (msb
> 32, _("bit-field extends past end of register"));
6677 /* The instruction encoding stores the LSB and MSB,
6678 not the LSB and width. */
6679 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6680 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
6681 inst
.instruction
|= (msb
- 1) << 16;
6689 /* #0 in second position is alternative syntax for bfc, which is
6690 the same instruction but with REG_PC in the Rm field. */
6691 if (!inst
.operands
[1].isreg
)
6692 inst
.operands
[1].reg
= REG_PC
;
6694 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
6695 constraint (msb
> 32, _("bit-field extends past end of register"));
6696 /* The instruction encoding stores the LSB and MSB,
6697 not the LSB and width. */
6698 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6699 inst
.instruction
|= inst
.operands
[1].reg
;
6700 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6701 inst
.instruction
|= (msb
- 1) << 16;
6707 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
6708 _("bit-field extends past end of register"));
6709 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6710 inst
.instruction
|= inst
.operands
[1].reg
;
6711 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
6712 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
6715 /* ARM V5 breakpoint instruction (argument parse)
6716 BKPT <16 bit unsigned immediate>
6717 Instruction is not conditional.
6718 The bit pattern given in insns[] has the COND_ALWAYS condition,
6719 and it is an error if the caller tried to override that. */
6724 /* Top 12 of 16 bits to bits 19:8. */
6725 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
6727 /* Bottom 4 of 16 bits to bits 3:0. */
6728 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
6732 encode_branch (int default_reloc
)
6734 if (inst
.operands
[0].hasreloc
)
6736 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
6737 _("the only suffix valid here is '(plt)'"));
6738 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
6742 inst
.reloc
.type
= default_reloc
;
6744 inst
.reloc
.pc_rel
= 1;
6751 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6752 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6755 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6762 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6764 if (inst
.cond
== COND_ALWAYS
)
6765 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6767 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
6771 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
6774 /* ARM V5 branch-link-exchange instruction (argument parse)
6775 BLX <target_addr> ie BLX(1)
6776 BLX{<condition>} <Rm> ie BLX(2)
6777 Unfortunately, there are two different opcodes for this mnemonic.
6778 So, the insns[].value is not used, and the code here zaps values
6779 into inst.instruction.
6780 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6785 if (inst
.operands
[0].isreg
)
6787 /* Arg is a register; the opcode provided by insns[] is correct.
6788 It is not illegal to do "blx pc", just useless. */
6789 if (inst
.operands
[0].reg
== REG_PC
)
6790 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6792 inst
.instruction
|= inst
.operands
[0].reg
;
6796 /* Arg is an address; this instruction cannot be executed
6797 conditionally, and the opcode must be adjusted. */
6798 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
6799 inst
.instruction
= 0xfa000000;
6801 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
6802 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
6805 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
6812 bfd_boolean want_reloc
;
6814 if (inst
.operands
[0].reg
== REG_PC
)
6815 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6817 inst
.instruction
|= inst
.operands
[0].reg
;
6818 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
6819 it is for ARMv4t or earlier. */
6820 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
6821 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
6825 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
6830 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
6834 /* ARM v5TEJ. Jump to Jazelle code. */
6839 if (inst
.operands
[0].reg
== REG_PC
)
6840 as_tsktsk (_("use of r15 in bxj is not really useful"));
6842 inst
.instruction
|= inst
.operands
[0].reg
;
6845 /* Co-processor data operation:
6846 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6847 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6851 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6852 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
6853 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
6854 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6855 inst
.instruction
|= inst
.operands
[4].reg
;
6856 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6862 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6863 encode_arm_shifter_operand (1);
6866 /* Transfer between coprocessor and ARM registers.
6867 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6872 No special properties. */
6879 Rd
= inst
.operands
[2].reg
;
6882 if (inst
.instruction
== 0xee000010
6883 || inst
.instruction
== 0xfe000010)
6885 reject_bad_reg (Rd
);
6888 constraint (Rd
== REG_SP
, BAD_SP
);
6893 if (inst
.instruction
== 0xe000010)
6894 constraint (Rd
== REG_PC
, BAD_PC
);
6898 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6899 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
6900 inst
.instruction
|= Rd
<< 12;
6901 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
6902 inst
.instruction
|= inst
.operands
[4].reg
;
6903 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
6906 /* Transfer between coprocessor register and pair of ARM registers.
6907 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6912 Two XScale instructions are special cases of these:
6914 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6915 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6917 Result unpredictable if Rd or Rn is R15. */
6924 Rd
= inst
.operands
[2].reg
;
6925 Rn
= inst
.operands
[3].reg
;
6929 reject_bad_reg (Rd
);
6930 reject_bad_reg (Rn
);
6934 constraint (Rd
== REG_PC
, BAD_PC
);
6935 constraint (Rn
== REG_PC
, BAD_PC
);
6938 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
6939 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
6940 inst
.instruction
|= Rd
<< 12;
6941 inst
.instruction
|= Rn
<< 16;
6942 inst
.instruction
|= inst
.operands
[4].reg
;
6948 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
6949 if (inst
.operands
[1].present
)
6951 inst
.instruction
|= CPSI_MMOD
;
6952 inst
.instruction
|= inst
.operands
[1].imm
;
6959 inst
.instruction
|= inst
.operands
[0].imm
;
6965 /* There is no IT instruction in ARM mode. We
6966 process it but do not generate code for it. */
6973 int base_reg
= inst
.operands
[0].reg
;
6974 int range
= inst
.operands
[1].imm
;
6976 inst
.instruction
|= base_reg
<< 16;
6977 inst
.instruction
|= range
;
6979 if (inst
.operands
[1].writeback
)
6980 inst
.instruction
|= LDM_TYPE_2_OR_3
;
6982 if (inst
.operands
[0].writeback
)
6984 inst
.instruction
|= WRITE_BACK
;
6985 /* Check for unpredictable uses of writeback. */
6986 if (inst
.instruction
& LOAD_BIT
)
6988 /* Not allowed in LDM type 2. */
6989 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
6990 && ((range
& (1 << REG_PC
)) == 0))
6991 as_warn (_("writeback of base register is UNPREDICTABLE"));
6992 /* Only allowed if base reg not in list for other types. */
6993 else if (range
& (1 << base_reg
))
6994 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6998 /* Not allowed for type 2. */
6999 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
7000 as_warn (_("writeback of base register is UNPREDICTABLE"));
7001 /* Only allowed if base reg not in list, or first in list. */
7002 else if ((range
& (1 << base_reg
))
7003 && (range
& ((1 << base_reg
) - 1)))
7004 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7009 /* ARMv5TE load-consecutive (argument parse)
7018 constraint (inst
.operands
[0].reg
% 2 != 0,
7019 _("first destination register must be even"));
7020 constraint (inst
.operands
[1].present
7021 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7022 _("can only load two consecutive registers"));
7023 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7024 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
7026 if (!inst
.operands
[1].present
)
7027 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
7029 if (inst
.instruction
& LOAD_BIT
)
7031 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7032 register and the first register written; we have to diagnose
7033 overlap between the base and the second register written here. */
7035 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
7036 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
7037 as_warn (_("base register written back, and overlaps "
7038 "second destination register"));
7040 /* For an index-register load, the index register must not overlap the
7041 destination (even if not write-back). */
7042 else if (inst
.operands
[2].immisreg
7043 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
7044 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
7045 as_warn (_("index register overlaps destination register"));
7048 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7049 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
7055 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
7056 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
7057 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
7058 || inst
.operands
[1].negative
7059 /* This can arise if the programmer has written
7061 or if they have mistakenly used a register name as the last
7064 It is very difficult to distinguish between these two cases
7065 because "rX" might actually be a label. ie the register
7066 name has been occluded by a symbol of the same name. So we
7067 just generate a general 'bad addressing mode' type error
7068 message and leave it up to the programmer to discover the
7069 true cause and fix their mistake. */
7070 || (inst
.operands
[1].reg
== REG_PC
),
7073 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7074 || inst
.reloc
.exp
.X_add_number
!= 0,
7075 _("offset must be zero in ARM encoding"));
7077 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7078 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7079 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7085 constraint (inst
.operands
[0].reg
% 2 != 0,
7086 _("even register required"));
7087 constraint (inst
.operands
[1].present
7088 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7089 _("can only load two consecutive registers"));
7090 /* If op 1 were present and equal to PC, this function wouldn't
7091 have been called in the first place. */
7092 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7094 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7095 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7101 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7102 if (!inst
.operands
[1].isreg
)
7103 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
7105 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
7111 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7113 if (inst
.operands
[1].preind
)
7115 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7116 || inst
.reloc
.exp
.X_add_number
!= 0,
7117 _("this instruction requires a post-indexed address"));
7119 inst
.operands
[1].preind
= 0;
7120 inst
.operands
[1].postind
= 1;
7121 inst
.operands
[1].writeback
= 1;
7123 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7124 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7127 /* Halfword and signed-byte load/store operations. */
7132 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7133 if (!inst
.operands
[1].isreg
)
7134 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7136 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7142 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7144 if (inst
.operands
[1].preind
)
7146 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7147 || inst
.reloc
.exp
.X_add_number
!= 0,
7148 _("this instruction requires a post-indexed address"));
7150 inst
.operands
[1].preind
= 0;
7151 inst
.operands
[1].postind
= 1;
7152 inst
.operands
[1].writeback
= 1;
7154 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7155 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7158 /* Co-processor register load/store.
7159 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7163 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7164 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7165 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7171 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7172 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7173 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
7174 && !(inst
.instruction
& 0x00400000))
7175 as_tsktsk (_("Rd and Rm should be different in mla"));
7177 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7178 inst
.instruction
|= inst
.operands
[1].reg
;
7179 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7180 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7186 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7187 encode_arm_shifter_operand (1);
7190 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7197 top
= (inst
.instruction
& 0x00400000) != 0;
7198 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7199 _(":lower16: not allowed this instruction"));
7200 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7201 _(":upper16: not allowed instruction"));
7202 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7203 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7205 imm
= inst
.reloc
.exp
.X_add_number
;
7206 /* The value is in two pieces: 0:11, 16:19. */
7207 inst
.instruction
|= (imm
& 0x00000fff);
7208 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7212 static void do_vfp_nsyn_opcode (const char *);
7215 do_vfp_nsyn_mrs (void)
7217 if (inst
.operands
[0].isvec
)
7219 if (inst
.operands
[1].reg
!= 1)
7220 first_error (_("operand 1 must be FPSCR"));
7221 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7222 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7223 do_vfp_nsyn_opcode ("fmstat");
7225 else if (inst
.operands
[1].isvec
)
7226 do_vfp_nsyn_opcode ("fmrx");
7234 do_vfp_nsyn_msr (void)
7236 if (inst
.operands
[0].isvec
)
7237 do_vfp_nsyn_opcode ("fmxr");
7247 if (do_vfp_nsyn_mrs () == SUCCESS
)
7250 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7251 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7253 _("'CPSR' or 'SPSR' expected"));
7254 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7255 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7258 /* Two possible forms:
7259 "{C|S}PSR_<field>, Rm",
7260 "{C|S}PSR_f, #expression". */
7265 if (do_vfp_nsyn_msr () == SUCCESS
)
7268 inst
.instruction
|= inst
.operands
[0].imm
;
7269 if (inst
.operands
[1].isreg
)
7270 inst
.instruction
|= inst
.operands
[1].reg
;
7273 inst
.instruction
|= INST_IMMEDIATE
;
7274 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7275 inst
.reloc
.pc_rel
= 0;
7282 if (!inst
.operands
[2].present
)
7283 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7284 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7285 inst
.instruction
|= inst
.operands
[1].reg
;
7286 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7288 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7289 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7290 as_tsktsk (_("Rd and Rm should be different in mul"));
7293 /* Long Multiply Parser
7294 UMULL RdLo, RdHi, Rm, Rs
7295 SMULL RdLo, RdHi, Rm, Rs
7296 UMLAL RdLo, RdHi, Rm, Rs
7297 SMLAL RdLo, RdHi, Rm, Rs. */
7302 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7303 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7304 inst
.instruction
|= inst
.operands
[2].reg
;
7305 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7307 /* rdhi and rdlo must be different. */
7308 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7309 as_tsktsk (_("rdhi and rdlo must be different"));
7311 /* rdhi, rdlo and rm must all be different before armv6. */
7312 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
7313 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7314 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7315 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7321 if (inst
.operands
[0].present
7322 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
7324 /* Architectural NOP hints are CPSR sets with no bits selected. */
7325 inst
.instruction
&= 0xf0000000;
7326 inst
.instruction
|= 0x0320f000;
7327 if (inst
.operands
[0].present
)
7328 inst
.instruction
|= inst
.operands
[0].imm
;
7332 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7333 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7334 Condition defaults to COND_ALWAYS.
7335 Error if Rd, Rn or Rm are R15. */
7340 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7341 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7342 inst
.instruction
|= inst
.operands
[2].reg
;
7343 if (inst
.operands
[3].present
)
7344 encode_arm_shift (3);
7347 /* ARM V6 PKHTB (Argument Parse). */
7352 if (!inst
.operands
[3].present
)
7354 /* If the shift specifier is omitted, turn the instruction
7355 into pkhbt rd, rm, rn. */
7356 inst
.instruction
&= 0xfff00010;
7357 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7358 inst
.instruction
|= inst
.operands
[1].reg
;
7359 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7363 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7364 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7365 inst
.instruction
|= inst
.operands
[2].reg
;
7366 encode_arm_shift (3);
7370 /* ARMv5TE: Preload-Cache
7374 Syntactically, like LDR with B=1, W=0, L=1. */
7379 constraint (!inst
.operands
[0].isreg
,
7380 _("'[' expected after PLD mnemonic"));
7381 constraint (inst
.operands
[0].postind
,
7382 _("post-indexed expression used in preload instruction"));
7383 constraint (inst
.operands
[0].writeback
,
7384 _("writeback used in preload instruction"));
7385 constraint (!inst
.operands
[0].preind
,
7386 _("unindexed addressing used in preload instruction"));
7387 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7390 /* ARMv7: PLI <addr_mode> */
7394 constraint (!inst
.operands
[0].isreg
,
7395 _("'[' expected after PLI mnemonic"));
7396 constraint (inst
.operands
[0].postind
,
7397 _("post-indexed expression used in preload instruction"));
7398 constraint (inst
.operands
[0].writeback
,
7399 _("writeback used in preload instruction"));
7400 constraint (!inst
.operands
[0].preind
,
7401 _("unindexed addressing used in preload instruction"));
7402 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7403 inst
.instruction
&= ~PRE_INDEX
;
7409 inst
.operands
[1] = inst
.operands
[0];
7410 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7411 inst
.operands
[0].isreg
= 1;
7412 inst
.operands
[0].writeback
= 1;
7413 inst
.operands
[0].reg
= REG_SP
;
7417 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7418 word at the specified address and the following word
7420 Unconditionally executed.
7421 Error if Rn is R15. */
7426 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7427 if (inst
.operands
[0].writeback
)
7428 inst
.instruction
|= WRITE_BACK
;
7431 /* ARM V6 ssat (argument parse). */
7436 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7437 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
7438 inst
.instruction
|= inst
.operands
[2].reg
;
7440 if (inst
.operands
[3].present
)
7441 encode_arm_shift (3);
7444 /* ARM V6 usat (argument parse). */
7449 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7450 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7451 inst
.instruction
|= inst
.operands
[2].reg
;
7453 if (inst
.operands
[3].present
)
7454 encode_arm_shift (3);
7457 /* ARM V6 ssat16 (argument parse). */
7462 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7463 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
7464 inst
.instruction
|= inst
.operands
[2].reg
;
7470 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7471 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7472 inst
.instruction
|= inst
.operands
[2].reg
;
7475 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7476 preserving the other bits.
7478 setend <endian_specifier>, where <endian_specifier> is either
7484 if (inst
.operands
[0].imm
)
7485 inst
.instruction
|= 0x200;
7491 unsigned int Rm
= (inst
.operands
[1].present
7492 ? inst
.operands
[1].reg
7493 : inst
.operands
[0].reg
);
7495 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7496 inst
.instruction
|= Rm
;
7497 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
7499 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7500 inst
.instruction
|= SHIFT_BY_REG
;
7503 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7509 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
7510 inst
.reloc
.pc_rel
= 0;
7516 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
7517 inst
.reloc
.pc_rel
= 0;
7520 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7521 SMLAxy{cond} Rd,Rm,Rs,Rn
7522 SMLAWy{cond} Rd,Rm,Rs,Rn
7523 Error if any register is R15. */
7528 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7529 inst
.instruction
|= inst
.operands
[1].reg
;
7530 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7531 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7534 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7535 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7536 Error if any register is R15.
7537 Warning if Rdlo == Rdhi. */
7542 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7543 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7544 inst
.instruction
|= inst
.operands
[2].reg
;
7545 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7547 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7548 as_tsktsk (_("rdhi and rdlo must be different"));
7551 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7552 SMULxy{cond} Rd,Rm,Rs
7553 Error if any register is R15. */
7558 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7559 inst
.instruction
|= inst
.operands
[1].reg
;
7560 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7563 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7564 the same for both ARM and Thumb-2. */
7571 if (inst
.operands
[0].present
)
7573 reg
= inst
.operands
[0].reg
;
7574 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
7579 inst
.instruction
|= reg
<< 16;
7580 inst
.instruction
|= inst
.operands
[1].imm
;
7581 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
7582 inst
.instruction
|= WRITE_BACK
;
7585 /* ARM V6 strex (argument parse). */
7590 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
7591 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
7592 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
7593 || inst
.operands
[2].negative
7594 /* See comment in do_ldrex(). */
7595 || (inst
.operands
[2].reg
== REG_PC
),
7598 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7599 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
7601 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7602 || inst
.reloc
.exp
.X_add_number
!= 0,
7603 _("offset must be zero in ARM encoding"));
7605 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7606 inst
.instruction
|= inst
.operands
[1].reg
;
7607 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7608 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7614 constraint (inst
.operands
[1].reg
% 2 != 0,
7615 _("even register required"));
7616 constraint (inst
.operands
[2].present
7617 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
7618 _("can only store two consecutive registers"));
7619 /* If op 2 were present and equal to PC, this function wouldn't
7620 have been called in the first place. */
7621 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
7623 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7624 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
7625 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
7628 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7629 inst
.instruction
|= inst
.operands
[1].reg
;
7630 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7633 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7634 extends it to 32-bits, and adds the result to a value in another
7635 register. You can specify a rotation by 0, 8, 16, or 24 bits
7636 before extracting the 16-bit value.
7637 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7638 Condition defaults to COND_ALWAYS.
7639 Error if any register uses R15. */
7644 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7645 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7646 inst
.instruction
|= inst
.operands
[2].reg
;
7647 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
7652 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7653 Condition defaults to COND_ALWAYS.
7654 Error if any register uses R15. */
7659 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7660 inst
.instruction
|= inst
.operands
[1].reg
;
7661 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
7664 /* VFP instructions. In a logical order: SP variant first, monad
7665 before dyad, arithmetic then move then load/store. */
7668 do_vfp_sp_monadic (void)
7670 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7671 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7675 do_vfp_sp_dyadic (void)
7677 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7678 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7679 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7683 do_vfp_sp_compare_z (void)
7685 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7689 do_vfp_dp_sp_cvt (void)
7691 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7692 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
7696 do_vfp_sp_dp_cvt (void)
7698 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7699 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7703 do_vfp_reg_from_sp (void)
7705 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7706 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
7710 do_vfp_reg2_from_sp2 (void)
7712 constraint (inst
.operands
[2].imm
!= 2,
7713 _("only two consecutive VFP SP registers allowed here"));
7714 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7715 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7716 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
7720 do_vfp_sp_from_reg (void)
7722 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
7723 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7727 do_vfp_sp2_from_reg2 (void)
7729 constraint (inst
.operands
[0].imm
!= 2,
7730 _("only two consecutive VFP SP registers allowed here"));
7731 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
7732 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7733 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7737 do_vfp_sp_ldst (void)
7739 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7740 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7744 do_vfp_dp_ldst (void)
7746 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7747 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
7752 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
7754 if (inst
.operands
[0].writeback
)
7755 inst
.instruction
|= WRITE_BACK
;
7757 constraint (ldstm_type
!= VFP_LDSTMIA
,
7758 _("this addressing mode requires base-register writeback"));
7759 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7760 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
7761 inst
.instruction
|= inst
.operands
[1].imm
;
7765 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
7769 if (inst
.operands
[0].writeback
)
7770 inst
.instruction
|= WRITE_BACK
;
7772 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
7773 _("this addressing mode requires base-register writeback"));
7775 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7776 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7778 count
= inst
.operands
[1].imm
<< 1;
7779 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
7782 inst
.instruction
|= count
;
7786 do_vfp_sp_ldstmia (void)
7788 vfp_sp_ldstm (VFP_LDSTMIA
);
7792 do_vfp_sp_ldstmdb (void)
7794 vfp_sp_ldstm (VFP_LDSTMDB
);
7798 do_vfp_dp_ldstmia (void)
7800 vfp_dp_ldstm (VFP_LDSTMIA
);
7804 do_vfp_dp_ldstmdb (void)
7806 vfp_dp_ldstm (VFP_LDSTMDB
);
7810 do_vfp_xp_ldstmia (void)
7812 vfp_dp_ldstm (VFP_LDSTMIAX
);
7816 do_vfp_xp_ldstmdb (void)
7818 vfp_dp_ldstm (VFP_LDSTMDBX
);
7822 do_vfp_dp_rd_rm (void)
7824 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7825 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
7829 do_vfp_dp_rn_rd (void)
7831 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
7832 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7836 do_vfp_dp_rd_rn (void)
7838 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7839 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7843 do_vfp_dp_rd_rn_rm (void)
7845 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7846 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
7847 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
7853 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7857 do_vfp_dp_rm_rd_rn (void)
7859 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
7860 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
7861 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
7864 /* VFPv3 instructions. */
7866 do_vfp_sp_const (void)
7868 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7869 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
7870 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
7874 do_vfp_dp_const (void)
7876 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7877 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
7878 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
7882 vfp_conv (int srcsize
)
7884 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
7885 inst
.instruction
|= (immbits
& 1) << 5;
7886 inst
.instruction
|= (immbits
>> 1);
7890 do_vfp_sp_conv_16 (void)
7892 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7897 do_vfp_dp_conv_16 (void)
7899 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7904 do_vfp_sp_conv_32 (void)
7906 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
7911 do_vfp_dp_conv_32 (void)
7913 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
7917 /* FPA instructions. Also in a logical order. */
7922 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7923 inst
.instruction
|= inst
.operands
[1].reg
;
7927 do_fpa_ldmstm (void)
7929 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7930 switch (inst
.operands
[1].imm
)
7932 case 1: inst
.instruction
|= CP_T_X
; break;
7933 case 2: inst
.instruction
|= CP_T_Y
; break;
7934 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
7939 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
7941 /* The instruction specified "ea" or "fd", so we can only accept
7942 [Rn]{!}. The instruction does not really support stacking or
7943 unstacking, so we have to emulate these by setting appropriate
7944 bits and offsets. */
7945 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7946 || inst
.reloc
.exp
.X_add_number
!= 0,
7947 _("this instruction does not support indexing"));
7949 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
7950 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
7952 if (!(inst
.instruction
& INDEX_UP
))
7953 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
7955 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
7957 inst
.operands
[2].preind
= 0;
7958 inst
.operands
[2].postind
= 1;
7962 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7965 /* iWMMXt instructions: strictly in alphabetical order. */
7968 do_iwmmxt_tandorc (void)
7970 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
7974 do_iwmmxt_textrc (void)
7976 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7977 inst
.instruction
|= inst
.operands
[1].imm
;
7981 do_iwmmxt_textrm (void)
7983 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7984 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7985 inst
.instruction
|= inst
.operands
[2].imm
;
7989 do_iwmmxt_tinsr (void)
7991 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7992 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7993 inst
.instruction
|= inst
.operands
[2].imm
;
7997 do_iwmmxt_tmia (void)
7999 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8000 inst
.instruction
|= inst
.operands
[1].reg
;
8001 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8005 do_iwmmxt_waligni (void)
8007 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8008 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8009 inst
.instruction
|= inst
.operands
[2].reg
;
8010 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
8014 do_iwmmxt_wmerge (void)
8016 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8017 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8018 inst
.instruction
|= inst
.operands
[2].reg
;
8019 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
8023 do_iwmmxt_wmov (void)
8025 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8026 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8027 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8028 inst
.instruction
|= inst
.operands
[1].reg
;
8032 do_iwmmxt_wldstbh (void)
8035 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8037 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
8039 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
8040 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
8044 do_iwmmxt_wldstw (void)
8046 /* RIWR_RIWC clears .isreg for a control register. */
8047 if (!inst
.operands
[0].isreg
)
8049 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8050 inst
.instruction
|= 0xf0000000;
8053 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8054 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8058 do_iwmmxt_wldstd (void)
8060 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8061 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
8062 && inst
.operands
[1].immisreg
)
8064 inst
.instruction
&= ~0x1a000ff;
8065 inst
.instruction
|= (0xf << 28);
8066 if (inst
.operands
[1].preind
)
8067 inst
.instruction
|= PRE_INDEX
;
8068 if (!inst
.operands
[1].negative
)
8069 inst
.instruction
|= INDEX_UP
;
8070 if (inst
.operands
[1].writeback
)
8071 inst
.instruction
|= WRITE_BACK
;
8072 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8073 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8074 inst
.instruction
|= inst
.operands
[1].imm
;
8077 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
8081 do_iwmmxt_wshufh (void)
8083 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8084 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8085 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
8086 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
8090 do_iwmmxt_wzero (void)
8092 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8093 inst
.instruction
|= inst
.operands
[0].reg
;
8094 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8095 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8099 do_iwmmxt_wrwrwr_or_imm5 (void)
8101 if (inst
.operands
[2].isreg
)
8104 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
8105 _("immediate operand requires iWMMXt2"));
8107 if (inst
.operands
[2].imm
== 0)
8109 switch ((inst
.instruction
>> 20) & 0xf)
8115 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8116 inst
.operands
[2].imm
= 16;
8117 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
8123 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8124 inst
.operands
[2].imm
= 32;
8125 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
8132 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8134 wrn
= (inst
.instruction
>> 16) & 0xf;
8135 inst
.instruction
&= 0xff0fff0f;
8136 inst
.instruction
|= wrn
;
8137 /* Bail out here; the instruction is now assembled. */
8142 /* Map 32 -> 0, etc. */
8143 inst
.operands
[2].imm
&= 0x1f;
8144 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8148 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8149 operations first, then control, shift, and load/store. */
8151 /* Insns like "foo X,Y,Z". */
8154 do_mav_triple (void)
8156 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8157 inst
.instruction
|= inst
.operands
[1].reg
;
8158 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8161 /* Insns like "foo W,X,Y,Z".
8162 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8167 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8168 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8169 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8170 inst
.instruction
|= inst
.operands
[3].reg
;
8173 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8177 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8180 /* Maverick shift immediate instructions.
8181 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8182 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8187 int imm
= inst
.operands
[2].imm
;
8189 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8190 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8192 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8193 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8194 Bit 4 should be 0. */
8195 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8197 inst
.instruction
|= imm
;
8200 /* XScale instructions. Also sorted arithmetic before move. */
8202 /* Xscale multiply-accumulate (argument parse)
8205 MIAxycc acc0,Rm,Rs. */
8210 inst
.instruction
|= inst
.operands
[1].reg
;
8211 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8214 /* Xscale move-accumulator-register (argument parse)
8216 MARcc acc0,RdLo,RdHi. */
8221 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8222 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8225 /* Xscale move-register-accumulator (argument parse)
8227 MRAcc RdLo,RdHi,acc0. */
8232 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8233 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8234 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8237 /* Encoding functions relevant only to Thumb. */
8239 /* inst.operands[i] is a shifted-register operand; encode
8240 it into inst.instruction in the format used by Thumb32. */
8243 encode_thumb32_shifted_operand (int i
)
8245 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8246 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8248 constraint (inst
.operands
[i
].immisreg
,
8249 _("shift by register not allowed in thumb mode"));
8250 inst
.instruction
|= inst
.operands
[i
].reg
;
8251 if (shift
== SHIFT_RRX
)
8252 inst
.instruction
|= SHIFT_ROR
<< 4;
8255 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8256 _("expression too complex"));
8258 constraint (value
> 32
8259 || (value
== 32 && (shift
== SHIFT_LSL
8260 || shift
== SHIFT_ROR
)),
8261 _("shift expression is too large"));
8265 else if (value
== 32)
8268 inst
.instruction
|= shift
<< 4;
8269 inst
.instruction
|= (value
& 0x1c) << 10;
8270 inst
.instruction
|= (value
& 0x03) << 6;
8275 /* inst.operands[i] was set up by parse_address. Encode it into a
8276 Thumb32 format load or store instruction. Reject forms that cannot
8277 be used with such instructions. If is_t is true, reject forms that
8278 cannot be used with a T instruction; if is_d is true, reject forms
8279 that cannot be used with a D instruction. */
8282 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8284 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8286 constraint (!inst
.operands
[i
].isreg
,
8287 _("Instruction does not support =N addresses"));
8289 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8290 if (inst
.operands
[i
].immisreg
)
8292 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
8293 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8294 constraint (inst
.operands
[i
].negative
,
8295 _("Thumb does not support negative register indexing"));
8296 constraint (inst
.operands
[i
].postind
,
8297 _("Thumb does not support register post-indexing"));
8298 constraint (inst
.operands
[i
].writeback
,
8299 _("Thumb does not support register indexing with writeback"));
8300 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8301 _("Thumb supports only LSL in shifted register indexing"));
8303 inst
.instruction
|= inst
.operands
[i
].imm
;
8304 if (inst
.operands
[i
].shifted
)
8306 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8307 _("expression too complex"));
8308 constraint (inst
.reloc
.exp
.X_add_number
< 0
8309 || inst
.reloc
.exp
.X_add_number
> 3,
8310 _("shift out of range"));
8311 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8313 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8315 else if (inst
.operands
[i
].preind
)
8317 constraint (is_pc
&& inst
.operands
[i
].writeback
,
8318 _("cannot use writeback with PC-relative addressing"));
8319 constraint (is_t
&& inst
.operands
[i
].writeback
,
8320 _("cannot use writeback with this instruction"));
8324 inst
.instruction
|= 0x01000000;
8325 if (inst
.operands
[i
].writeback
)
8326 inst
.instruction
|= 0x00200000;
8330 inst
.instruction
|= 0x00000c00;
8331 if (inst
.operands
[i
].writeback
)
8332 inst
.instruction
|= 0x00000100;
8334 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8336 else if (inst
.operands
[i
].postind
)
8338 assert (inst
.operands
[i
].writeback
);
8339 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8340 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8343 inst
.instruction
|= 0x00200000;
8345 inst
.instruction
|= 0x00000900;
8346 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8348 else /* unindexed - only for coprocessor */
8349 inst
.error
= _("instruction does not accept unindexed addressing");
8352 /* Table of Thumb instructions which exist in both 16- and 32-bit
8353 encodings (the latter only in post-V6T2 cores). The index is the
8354 value used in the insns table below. When there is more than one
8355 possible 16-bit encoding for the instruction, this table always
8357 Also contains several pseudo-instructions used during relaxation. */
8358 #define T16_32_TAB \
8359 X(adc, 4140, eb400000), \
8360 X(adcs, 4140, eb500000), \
8361 X(add, 1c00, eb000000), \
8362 X(adds, 1c00, eb100000), \
8363 X(addi, 0000, f1000000), \
8364 X(addis, 0000, f1100000), \
8365 X(add_pc,000f, f20f0000), \
8366 X(add_sp,000d, f10d0000), \
8367 X(adr, 000f, f20f0000), \
8368 X(and, 4000, ea000000), \
8369 X(ands, 4000, ea100000), \
8370 X(asr, 1000, fa40f000), \
8371 X(asrs, 1000, fa50f000), \
8372 X(b, e000, f000b000), \
8373 X(bcond, d000, f0008000), \
8374 X(bic, 4380, ea200000), \
8375 X(bics, 4380, ea300000), \
8376 X(cmn, 42c0, eb100f00), \
8377 X(cmp, 2800, ebb00f00), \
8378 X(cpsie, b660, f3af8400), \
8379 X(cpsid, b670, f3af8600), \
8380 X(cpy, 4600, ea4f0000), \
8381 X(dec_sp,80dd, f1ad0d00), \
8382 X(eor, 4040, ea800000), \
8383 X(eors, 4040, ea900000), \
8384 X(inc_sp,00dd, f10d0d00), \
8385 X(ldmia, c800, e8900000), \
8386 X(ldr, 6800, f8500000), \
8387 X(ldrb, 7800, f8100000), \
8388 X(ldrh, 8800, f8300000), \
8389 X(ldrsb, 5600, f9100000), \
8390 X(ldrsh, 5e00, f9300000), \
8391 X(ldr_pc,4800, f85f0000), \
8392 X(ldr_pc2,4800, f85f0000), \
8393 X(ldr_sp,9800, f85d0000), \
8394 X(lsl, 0000, fa00f000), \
8395 X(lsls, 0000, fa10f000), \
8396 X(lsr, 0800, fa20f000), \
8397 X(lsrs, 0800, fa30f000), \
8398 X(mov, 2000, ea4f0000), \
8399 X(movs, 2000, ea5f0000), \
8400 X(mul, 4340, fb00f000), \
8401 X(muls, 4340, ffffffff), /* no 32b muls */ \
8402 X(mvn, 43c0, ea6f0000), \
8403 X(mvns, 43c0, ea7f0000), \
8404 X(neg, 4240, f1c00000), /* rsb #0 */ \
8405 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8406 X(orr, 4300, ea400000), \
8407 X(orrs, 4300, ea500000), \
8408 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8409 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8410 X(rev, ba00, fa90f080), \
8411 X(rev16, ba40, fa90f090), \
8412 X(revsh, bac0, fa90f0b0), \
8413 X(ror, 41c0, fa60f000), \
8414 X(rors, 41c0, fa70f000), \
8415 X(sbc, 4180, eb600000), \
8416 X(sbcs, 4180, eb700000), \
8417 X(stmia, c000, e8800000), \
8418 X(str, 6000, f8400000), \
8419 X(strb, 7000, f8000000), \
8420 X(strh, 8000, f8200000), \
8421 X(str_sp,9000, f84d0000), \
8422 X(sub, 1e00, eba00000), \
8423 X(subs, 1e00, ebb00000), \
8424 X(subi, 8000, f1a00000), \
8425 X(subis, 8000, f1b00000), \
8426 X(sxtb, b240, fa4ff080), \
8427 X(sxth, b200, fa0ff080), \
8428 X(tst, 4200, ea100f00), \
8429 X(uxtb, b2c0, fa5ff080), \
8430 X(uxth, b280, fa1ff080), \
8431 X(nop, bf00, f3af8000), \
8432 X(yield, bf10, f3af8001), \
8433 X(wfe, bf20, f3af8002), \
8434 X(wfi, bf30, f3af8003), \
8435 X(sev, bf40, f3af9004), /* typo, 8004? */
8437 /* To catch errors in encoding functions, the codes are all offset by
8438 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8439 as 16-bit instructions. */
8440 #define X(a,b,c) T_MNEM_##a
8441 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
8444 #define X(a,b,c) 0x##b
8445 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
8446 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8449 #define X(a,b,c) 0x##c
8450 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
8451 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8452 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8456 /* Thumb instruction encoders, in alphabetical order. */
8460 do_t_add_sub_w (void)
8464 Rd
= inst
.operands
[0].reg
;
8465 Rn
= inst
.operands
[1].reg
;
8467 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this is the
8468 SP-{plus,minute}-immediate form of the instruction. */
8469 reject_bad_reg (Rd
);
8471 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
8472 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8475 /* Parse an add or subtract instruction. We get here with inst.instruction
8476 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8483 Rd
= inst
.operands
[0].reg
;
8484 Rs
= (inst
.operands
[1].present
8485 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8486 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8494 flags
= (inst
.instruction
== T_MNEM_adds
8495 || inst
.instruction
== T_MNEM_subs
);
8497 narrow
= (current_it_mask
== 0);
8499 narrow
= (current_it_mask
!= 0);
8500 if (!inst
.operands
[2].isreg
)
8504 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
8506 add
= (inst
.instruction
== T_MNEM_add
8507 || inst
.instruction
== T_MNEM_adds
);
8509 if (inst
.size_req
!= 4)
8511 /* Attempt to use a narrow opcode, with relaxation if
8513 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
8514 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
8515 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
8516 opcode
= T_MNEM_add_sp
;
8517 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
8518 opcode
= T_MNEM_add_pc
;
8519 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
8522 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
8524 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
8528 inst
.instruction
= THUMB_OP16(opcode
);
8529 inst
.instruction
|= (Rd
<< 4) | Rs
;
8530 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8531 if (inst
.size_req
!= 2)
8532 inst
.relax
= opcode
;
8535 constraint (inst
.size_req
== 2, BAD_HIREG
);
8537 if (inst
.size_req
== 4
8538 || (inst
.size_req
!= 2 && !opcode
))
8542 constraint (add
, BAD_PC
);
8543 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
8544 _("only SUBS PC, LR, #const allowed"));
8545 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8546 _("expression too complex"));
8547 constraint (inst
.reloc
.exp
.X_add_number
< 0
8548 || inst
.reloc
.exp
.X_add_number
> 0xff,
8549 _("immediate value out of range"));
8550 inst
.instruction
= T2_SUBS_PC_LR
8551 | inst
.reloc
.exp
.X_add_number
;
8552 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8555 else if (Rs
== REG_PC
)
8557 /* Always use addw/subw. */
8558 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
8559 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8563 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8564 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
8567 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8569 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
8571 inst
.instruction
|= Rd
<< 8;
8572 inst
.instruction
|= Rs
<< 16;
8577 Rn
= inst
.operands
[2].reg
;
8578 /* See if we can do this with a 16-bit instruction. */
8579 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
8581 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8586 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
8587 || inst
.instruction
== T_MNEM_add
)
8590 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8594 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
8596 /* Thumb-1 cores (except v6-M) require at least one high
8597 register in a narrow non flag setting add. */
8598 if (Rd
> 7 || Rn
> 7
8599 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
8600 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
8607 inst
.instruction
= T_OPCODE_ADD_HI
;
8608 inst
.instruction
|= (Rd
& 8) << 4;
8609 inst
.instruction
|= (Rd
& 7);
8610 inst
.instruction
|= Rn
<< 3;
8616 constraint (Rd
== REG_PC
, BAD_PC
);
8617 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
8618 constraint (Rs
== REG_PC
, BAD_PC
);
8619 reject_bad_reg (Rn
);
8621 /* If we get here, it can't be done in 16 bits. */
8622 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
8623 _("shift must be constant"));
8624 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8625 inst
.instruction
|= Rd
<< 8;
8626 inst
.instruction
|= Rs
<< 16;
8627 encode_thumb32_shifted_operand (2);
8632 constraint (inst
.instruction
== T_MNEM_adds
8633 || inst
.instruction
== T_MNEM_subs
,
8636 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
8638 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
8639 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
8642 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8644 inst
.instruction
|= (Rd
<< 4) | Rs
;
8645 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8649 Rn
= inst
.operands
[2].reg
;
8650 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
8652 /* We now have Rd, Rs, and Rn set to registers. */
8653 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8655 /* Can't do this for SUB. */
8656 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
8657 inst
.instruction
= T_OPCODE_ADD_HI
;
8658 inst
.instruction
|= (Rd
& 8) << 4;
8659 inst
.instruction
|= (Rd
& 7);
8661 inst
.instruction
|= Rn
<< 3;
8663 inst
.instruction
|= Rs
<< 3;
8665 constraint (1, _("dest must overlap one source register"));
8669 inst
.instruction
= (inst
.instruction
== T_MNEM_add
8670 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
8671 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8681 Rd
= inst
.operands
[0].reg
;
8682 reject_bad_reg (Rd
);
8684 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
8686 /* Defer to section relaxation. */
8687 inst
.relax
= inst
.instruction
;
8688 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8689 inst
.instruction
|= Rd
<< 4;
8691 else if (unified_syntax
&& inst
.size_req
!= 2)
8693 /* Generate a 32-bit opcode. */
8694 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8695 inst
.instruction
|= Rd
<< 8;
8696 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
8697 inst
.reloc
.pc_rel
= 1;
8701 /* Generate a 16-bit opcode. */
8702 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8703 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8704 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
8705 inst
.reloc
.pc_rel
= 1;
8707 inst
.instruction
|= Rd
<< 4;
8711 /* Arithmetic instructions for which there is just one 16-bit
8712 instruction encoding, and it allows only two low registers.
8713 For maximal compatibility with ARM syntax, we allow three register
8714 operands even when Thumb-32 instructions are not available, as long
8715 as the first two are identical. For instance, both "sbc r0,r1" and
8716 "sbc r0,r0,r1" are allowed. */
8722 Rd
= inst
.operands
[0].reg
;
8723 Rs
= (inst
.operands
[1].present
8724 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8725 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8726 Rn
= inst
.operands
[2].reg
;
8728 reject_bad_reg (Rd
);
8729 reject_bad_reg (Rs
);
8730 if (inst
.operands
[2].isreg
)
8731 reject_bad_reg (Rn
);
8735 if (!inst
.operands
[2].isreg
)
8737 /* For an immediate, we always generate a 32-bit opcode;
8738 section relaxation will shrink it later if possible. */
8739 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8740 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8741 inst
.instruction
|= Rd
<< 8;
8742 inst
.instruction
|= Rs
<< 16;
8743 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8749 /* See if we can do this with a 16-bit instruction. */
8750 if (THUMB_SETS_FLAGS (inst
.instruction
))
8751 narrow
= current_it_mask
== 0;
8753 narrow
= current_it_mask
!= 0;
8755 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8757 if (inst
.operands
[2].shifted
)
8759 if (inst
.size_req
== 4)
8765 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8766 inst
.instruction
|= Rd
;
8767 inst
.instruction
|= Rn
<< 3;
8771 /* If we get here, it can't be done in 16 bits. */
8772 constraint (inst
.operands
[2].shifted
8773 && inst
.operands
[2].immisreg
,
8774 _("shift must be constant"));
8775 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8776 inst
.instruction
|= Rd
<< 8;
8777 inst
.instruction
|= Rs
<< 16;
8778 encode_thumb32_shifted_operand (2);
8783 /* On its face this is a lie - the instruction does set the
8784 flags. However, the only supported mnemonic in this mode
8786 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8788 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8789 _("unshifted register required"));
8790 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8791 constraint (Rd
!= Rs
,
8792 _("dest and source1 must be the same register"));
8794 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8795 inst
.instruction
|= Rd
;
8796 inst
.instruction
|= Rn
<< 3;
8800 /* Similarly, but for instructions where the arithmetic operation is
8801 commutative, so we can allow either of them to be different from
8802 the destination operand in a 16-bit instruction. For instance, all
8803 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8810 Rd
= inst
.operands
[0].reg
;
8811 Rs
= (inst
.operands
[1].present
8812 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8813 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8814 Rn
= inst
.operands
[2].reg
;
8816 reject_bad_reg (Rd
);
8817 reject_bad_reg (Rs
);
8818 if (inst
.operands
[2].isreg
)
8819 reject_bad_reg (Rn
);
8823 if (!inst
.operands
[2].isreg
)
8825 /* For an immediate, we always generate a 32-bit opcode;
8826 section relaxation will shrink it later if possible. */
8827 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8828 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
8829 inst
.instruction
|= Rd
<< 8;
8830 inst
.instruction
|= Rs
<< 16;
8831 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8837 /* See if we can do this with a 16-bit instruction. */
8838 if (THUMB_SETS_FLAGS (inst
.instruction
))
8839 narrow
= current_it_mask
== 0;
8841 narrow
= current_it_mask
!= 0;
8843 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
8845 if (inst
.operands
[2].shifted
)
8847 if (inst
.size_req
== 4)
8854 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8855 inst
.instruction
|= Rd
;
8856 inst
.instruction
|= Rn
<< 3;
8861 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8862 inst
.instruction
|= Rd
;
8863 inst
.instruction
|= Rs
<< 3;
8868 /* If we get here, it can't be done in 16 bits. */
8869 constraint (inst
.operands
[2].shifted
8870 && inst
.operands
[2].immisreg
,
8871 _("shift must be constant"));
8872 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8873 inst
.instruction
|= Rd
<< 8;
8874 inst
.instruction
|= Rs
<< 16;
8875 encode_thumb32_shifted_operand (2);
8880 /* On its face this is a lie - the instruction does set the
8881 flags. However, the only supported mnemonic in this mode
8883 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
8885 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
8886 _("unshifted register required"));
8887 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
8889 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
8890 inst
.instruction
|= Rd
;
8893 inst
.instruction
|= Rn
<< 3;
8895 inst
.instruction
|= Rs
<< 3;
8897 constraint (1, _("dest must overlap one source register"));
8904 if (inst
.operands
[0].present
)
8906 constraint ((inst
.instruction
& 0xf0) != 0x40
8907 && inst
.operands
[0].imm
!= 0xf,
8908 _("bad barrier type"));
8909 inst
.instruction
|= inst
.operands
[0].imm
;
8912 inst
.instruction
|= 0xf;
8919 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8920 constraint (msb
> 32, _("bit-field extends past end of register"));
8921 /* The instruction encoding stores the LSB and MSB,
8922 not the LSB and width. */
8923 Rd
= inst
.operands
[0].reg
;
8924 reject_bad_reg (Rd
);
8925 inst
.instruction
|= Rd
<< 8;
8926 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
8927 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
8928 inst
.instruction
|= msb
- 1;
8937 Rd
= inst
.operands
[0].reg
;
8938 reject_bad_reg (Rd
);
8940 /* #0 in second position is alternative syntax for bfc, which is
8941 the same instruction but with REG_PC in the Rm field. */
8942 if (!inst
.operands
[1].isreg
)
8946 Rn
= inst
.operands
[1].reg
;
8947 reject_bad_reg (Rn
);
8950 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8951 constraint (msb
> 32, _("bit-field extends past end of register"));
8952 /* The instruction encoding stores the LSB and MSB,
8953 not the LSB and width. */
8954 inst
.instruction
|= Rd
<< 8;
8955 inst
.instruction
|= Rn
<< 16;
8956 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8957 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8958 inst
.instruction
|= msb
- 1;
8966 Rd
= inst
.operands
[0].reg
;
8967 Rn
= inst
.operands
[1].reg
;
8969 reject_bad_reg (Rd
);
8970 reject_bad_reg (Rn
);
8972 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8973 _("bit-field extends past end of register"));
8974 inst
.instruction
|= Rd
<< 8;
8975 inst
.instruction
|= Rn
<< 16;
8976 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
8977 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
8978 inst
.instruction
|= inst
.operands
[3].imm
- 1;
8981 /* ARM V5 Thumb BLX (argument parse)
8982 BLX <target_addr> which is BLX(1)
8983 BLX <Rm> which is BLX(2)
8984 Unfortunately, there are two different opcodes for this mnemonic.
8985 So, the insns[].value is not used, and the code here zaps values
8986 into inst.instruction.
8988 ??? How to take advantage of the additional two bits of displacement
8989 available in Thumb32 mode? Need new relocation? */
8994 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
8995 if (inst
.operands
[0].isreg
)
8997 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
8998 /* We have a register, so this is BLX(2). */
8999 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9003 /* No register. This must be BLX(1). */
9004 inst
.instruction
= 0xf000e800;
9006 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
9007 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
9010 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
9011 inst
.reloc
.pc_rel
= 1;
9021 if (current_it_mask
)
9023 /* Conditional branches inside IT blocks are encoded as unconditional
9026 /* A branch must be the last instruction in an IT block. */
9027 constraint (current_it_mask
!= 0x10, BAD_BRANCH
);
9032 if (cond
!= COND_ALWAYS
)
9033 opcode
= T_MNEM_bcond
;
9035 opcode
= inst
.instruction
;
9037 if (unified_syntax
&& inst
.size_req
== 4)
9039 inst
.instruction
= THUMB_OP32(opcode
);
9040 if (cond
== COND_ALWAYS
)
9041 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
9044 assert (cond
!= 0xF);
9045 inst
.instruction
|= cond
<< 22;
9046 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
9051 inst
.instruction
= THUMB_OP16(opcode
);
9052 if (cond
== COND_ALWAYS
)
9053 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
9056 inst
.instruction
|= cond
<< 8;
9057 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
9059 /* Allow section relaxation. */
9060 if (unified_syntax
&& inst
.size_req
!= 2)
9061 inst
.relax
= opcode
;
9064 inst
.reloc
.pc_rel
= 1;
9070 constraint (inst
.cond
!= COND_ALWAYS
,
9071 _("instruction is always unconditional"));
9072 if (inst
.operands
[0].present
)
9074 constraint (inst
.operands
[0].imm
> 255,
9075 _("immediate value out of range"));
9076 inst
.instruction
|= inst
.operands
[0].imm
;
9081 do_t_branch23 (void)
9083 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
9084 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
9085 inst
.reloc
.pc_rel
= 1;
9087 #if defined(OBJ_COFF)
9088 /* If the destination of the branch is a defined symbol which does not have
9089 the THUMB_FUNC attribute, then we must be calling a function which has
9090 the (interfacearm) attribute. We look for the Thumb entry point to that
9091 function and change the branch to refer to that function instead. */
9092 if ( inst
.reloc
.exp
.X_op
== O_symbol
9093 && inst
.reloc
.exp
.X_add_symbol
!= NULL
9094 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
9095 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
9096 inst
.reloc
.exp
.X_add_symbol
=
9097 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
9104 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
9105 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9106 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9107 should cause the alignment to be checked once it is known. This is
9108 because BX PC only works if the instruction is word aligned. */
9116 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
9117 Rm
= inst
.operands
[0].reg
;
9118 reject_bad_reg (Rm
);
9119 inst
.instruction
|= Rm
<< 16;
9128 Rd
= inst
.operands
[0].reg
;
9129 Rm
= inst
.operands
[1].reg
;
9131 reject_bad_reg (Rd
);
9132 reject_bad_reg (Rm
);
9134 inst
.instruction
|= Rd
<< 8;
9135 inst
.instruction
|= Rm
<< 16;
9136 inst
.instruction
|= Rm
;
9142 constraint (current_it_mask
, BAD_NOT_IT
);
9143 inst
.instruction
|= inst
.operands
[0].imm
;
9149 constraint (current_it_mask
, BAD_NOT_IT
);
9151 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
9152 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
9154 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
9155 inst
.instruction
= 0xf3af8000;
9156 inst
.instruction
|= imod
<< 9;
9157 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
9158 if (inst
.operands
[1].present
)
9159 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
9163 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
9164 && (inst
.operands
[0].imm
& 4),
9165 _("selected processor does not support 'A' form "
9166 "of this instruction"));
9167 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
9168 _("Thumb does not support the 2-argument "
9169 "form of this instruction"));
9170 inst
.instruction
|= inst
.operands
[0].imm
;
9174 /* THUMB CPY instruction (argument parse). */
9179 if (inst
.size_req
== 4)
9181 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
9182 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9183 inst
.instruction
|= inst
.operands
[1].reg
;
9187 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9188 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9189 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9196 constraint (current_it_mask
, BAD_NOT_IT
);
9197 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9198 inst
.instruction
|= inst
.operands
[0].reg
;
9199 inst
.reloc
.pc_rel
= 1;
9200 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
9206 inst
.instruction
|= inst
.operands
[0].imm
;
9212 unsigned Rd
, Rn
, Rm
;
9214 Rd
= inst
.operands
[0].reg
;
9215 Rn
= (inst
.operands
[1].present
9216 ? inst
.operands
[1].reg
: Rd
);
9217 Rm
= inst
.operands
[2].reg
;
9219 reject_bad_reg (Rd
);
9220 reject_bad_reg (Rn
);
9221 reject_bad_reg (Rm
);
9223 inst
.instruction
|= Rd
<< 8;
9224 inst
.instruction
|= Rn
<< 16;
9225 inst
.instruction
|= Rm
;
9231 if (unified_syntax
&& inst
.size_req
== 4)
9232 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9234 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9240 unsigned int cond
= inst
.operands
[0].imm
;
9242 constraint (current_it_mask
, BAD_NOT_IT
);
9243 current_it_mask
= (inst
.instruction
& 0xf) | 0x10;
9246 /* If the condition is a negative condition, invert the mask. */
9247 if ((cond
& 0x1) == 0x0)
9249 unsigned int mask
= inst
.instruction
& 0x000f;
9251 if ((mask
& 0x7) == 0)
9252 /* no conversion needed */;
9253 else if ((mask
& 0x3) == 0)
9255 else if ((mask
& 0x1) == 0)
9260 inst
.instruction
&= 0xfff0;
9261 inst
.instruction
|= mask
;
9264 inst
.instruction
|= cond
<< 4;
9267 /* Helper function used for both push/pop and ldm/stm. */
9269 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
9273 load
= (inst
.instruction
& (1 << 20)) != 0;
9275 if (mask
& (1 << 13))
9276 inst
.error
= _("SP not allowed in register list");
9279 if (mask
& (1 << 14)
9280 && mask
& (1 << 15))
9281 inst
.error
= _("LR and PC should not both be in register list");
9283 if ((mask
& (1 << base
)) != 0
9285 as_warn (_("base register should not be in register list "
9286 "when written back"));
9290 if (mask
& (1 << 15))
9291 inst
.error
= _("PC not allowed in register list");
9293 if (mask
& (1 << base
))
9294 as_warn (_("value stored for r%d is UNPREDICTABLE"), base
);
9297 if ((mask
& (mask
- 1)) == 0)
9299 /* Single register transfers implemented as str/ldr. */
9302 if (inst
.instruction
& (1 << 23))
9303 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
9305 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
9309 if (inst
.instruction
& (1 << 23))
9310 inst
.instruction
= 0x00800000; /* ia -> [base] */
9312 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
9315 inst
.instruction
|= 0xf8400000;
9317 inst
.instruction
|= 0x00100000;
9319 mask
= ffs (mask
) - 1;
9323 inst
.instruction
|= WRITE_BACK
;
9325 inst
.instruction
|= mask
;
9326 inst
.instruction
|= base
<< 16;
9332 /* This really doesn't seem worth it. */
9333 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9334 _("expression too complex"));
9335 constraint (inst
.operands
[1].writeback
,
9336 _("Thumb load/store multiple does not support {reglist}^"));
9344 /* See if we can use a 16-bit instruction. */
9345 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9346 && inst
.size_req
!= 4
9347 && !(inst
.operands
[1].imm
& ~0xff))
9349 mask
= 1 << inst
.operands
[0].reg
;
9351 if (inst
.operands
[0].reg
<= 7
9352 && (inst
.instruction
== T_MNEM_stmia
9353 ? inst
.operands
[0].writeback
9354 : (inst
.operands
[0].writeback
9355 == !(inst
.operands
[1].imm
& mask
))))
9357 if (inst
.instruction
== T_MNEM_stmia
9358 && (inst
.operands
[1].imm
& mask
)
9359 && (inst
.operands
[1].imm
& (mask
- 1)))
9360 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9361 inst
.operands
[0].reg
);
9363 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9364 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9365 inst
.instruction
|= inst
.operands
[1].imm
;
9368 else if (inst
.operands
[0] .reg
== REG_SP
9369 && inst
.operands
[0].writeback
)
9371 inst
.instruction
= THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
9372 ? T_MNEM_push
: T_MNEM_pop
);
9373 inst
.instruction
|= inst
.operands
[1].imm
;
9380 if (inst
.instruction
< 0xffff)
9381 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9383 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
9384 inst
.operands
[0].writeback
);
9389 constraint (inst
.operands
[0].reg
> 7
9390 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9391 constraint (inst
.instruction
!= T_MNEM_ldmia
9392 && inst
.instruction
!= T_MNEM_stmia
,
9393 _("Thumb-2 instruction only valid in unified syntax"));
9394 if (inst
.instruction
== T_MNEM_stmia
)
9396 if (!inst
.operands
[0].writeback
)
9397 as_warn (_("this instruction will write back the base register"));
9398 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9399 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9400 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9401 inst
.operands
[0].reg
);
9405 if (!inst
.operands
[0].writeback
9406 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9407 as_warn (_("this instruction will write back the base register"));
9408 else if (inst
.operands
[0].writeback
9409 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9410 as_warn (_("this instruction will not write back the base register"));
9413 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9414 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9415 inst
.instruction
|= inst
.operands
[1].imm
;
9422 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9423 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9424 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9425 || inst
.operands
[1].negative
,
9428 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9429 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9430 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9436 if (!inst
.operands
[1].present
)
9438 constraint (inst
.operands
[0].reg
== REG_LR
,
9439 _("r14 not allowed as first register "
9440 "when second register is omitted"));
9441 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9443 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
9446 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9447 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9448 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9454 unsigned long opcode
;
9457 opcode
= inst
.instruction
;
9460 if (!inst
.operands
[1].isreg
)
9462 if (opcode
<= 0xffff)
9463 inst
.instruction
= THUMB_OP32 (opcode
);
9464 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9467 if (inst
.operands
[1].isreg
9468 && !inst
.operands
[1].writeback
9469 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
9470 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
9472 && inst
.size_req
!= 4)
9474 /* Insn may have a 16-bit form. */
9475 Rn
= inst
.operands
[1].reg
;
9476 if (inst
.operands
[1].immisreg
)
9478 inst
.instruction
= THUMB_OP16 (opcode
);
9480 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
9483 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
9484 && opcode
!= T_MNEM_ldrsb
)
9485 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
9486 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
9493 if (inst
.reloc
.pc_rel
)
9494 opcode
= T_MNEM_ldr_pc2
;
9496 opcode
= T_MNEM_ldr_pc
;
9500 if (opcode
== T_MNEM_ldr
)
9501 opcode
= T_MNEM_ldr_sp
;
9503 opcode
= T_MNEM_str_sp
;
9505 inst
.instruction
= inst
.operands
[0].reg
<< 8;
9509 inst
.instruction
= inst
.operands
[0].reg
;
9510 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9512 inst
.instruction
|= THUMB_OP16 (opcode
);
9513 if (inst
.size_req
== 2)
9514 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9516 inst
.relax
= opcode
;
9520 /* Definitely a 32-bit variant. */
9521 inst
.instruction
= THUMB_OP32 (opcode
);
9522 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9523 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9527 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9529 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
9531 /* Only [Rn,Rm] is acceptable. */
9532 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
9533 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
9534 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
9535 || inst
.operands
[1].negative
,
9536 _("Thumb does not support this addressing mode"));
9537 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9541 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9542 if (!inst
.operands
[1].isreg
)
9543 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9546 constraint (!inst
.operands
[1].preind
9547 || inst
.operands
[1].shifted
9548 || inst
.operands
[1].writeback
,
9549 _("Thumb does not support this addressing mode"));
9550 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
9552 constraint (inst
.instruction
& 0x0600,
9553 _("byte or halfword not valid for base register"));
9554 constraint (inst
.operands
[1].reg
== REG_PC
9555 && !(inst
.instruction
& THUMB_LOAD_BIT
),
9556 _("r15 based store not allowed"));
9557 constraint (inst
.operands
[1].immisreg
,
9558 _("invalid base register for register offset"));
9560 if (inst
.operands
[1].reg
== REG_PC
)
9561 inst
.instruction
= T_OPCODE_LDR_PC
;
9562 else if (inst
.instruction
& THUMB_LOAD_BIT
)
9563 inst
.instruction
= T_OPCODE_LDR_SP
;
9565 inst
.instruction
= T_OPCODE_STR_SP
;
9567 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9568 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9572 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
9573 if (!inst
.operands
[1].immisreg
)
9575 /* Immediate offset. */
9576 inst
.instruction
|= inst
.operands
[0].reg
;
9577 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9578 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9582 /* Register offset. */
9583 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
9584 constraint (inst
.operands
[1].negative
,
9585 _("Thumb does not support this addressing mode"));
9588 switch (inst
.instruction
)
9590 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
9591 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
9592 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
9593 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
9594 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
9595 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
9596 case 0x5600 /* ldrsb */:
9597 case 0x5e00 /* ldrsh */: break;
9601 inst
.instruction
|= inst
.operands
[0].reg
;
9602 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9603 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
9609 if (!inst
.operands
[1].present
)
9611 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9612 constraint (inst
.operands
[0].reg
== REG_LR
,
9613 _("r14 not allowed here"));
9615 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9616 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9617 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
9623 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9624 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
9630 unsigned Rd
, Rn
, Rm
, Ra
;
9632 Rd
= inst
.operands
[0].reg
;
9633 Rn
= inst
.operands
[1].reg
;
9634 Rm
= inst
.operands
[2].reg
;
9635 Ra
= inst
.operands
[3].reg
;
9637 reject_bad_reg (Rd
);
9638 reject_bad_reg (Rn
);
9639 reject_bad_reg (Rm
);
9640 reject_bad_reg (Ra
);
9642 inst
.instruction
|= Rd
<< 8;
9643 inst
.instruction
|= Rn
<< 16;
9644 inst
.instruction
|= Rm
;
9645 inst
.instruction
|= Ra
<< 12;
9651 unsigned RdLo
, RdHi
, Rn
, Rm
;
9653 RdLo
= inst
.operands
[0].reg
;
9654 RdHi
= inst
.operands
[1].reg
;
9655 Rn
= inst
.operands
[2].reg
;
9656 Rm
= inst
.operands
[3].reg
;
9658 reject_bad_reg (RdLo
);
9659 reject_bad_reg (RdHi
);
9660 reject_bad_reg (Rn
);
9661 reject_bad_reg (Rm
);
9663 inst
.instruction
|= RdLo
<< 12;
9664 inst
.instruction
|= RdHi
<< 8;
9665 inst
.instruction
|= Rn
<< 16;
9666 inst
.instruction
|= Rm
;
9674 Rn
= inst
.operands
[0].reg
;
9675 Rm
= inst
.operands
[1].reg
;
9679 int r0off
= (inst
.instruction
== T_MNEM_mov
9680 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
9681 unsigned long opcode
;
9683 bfd_boolean low_regs
;
9685 low_regs
= (Rn
<= 7 && Rm
<= 7);
9686 opcode
= inst
.instruction
;
9687 if (current_it_mask
)
9688 narrow
= opcode
!= T_MNEM_movs
;
9690 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
9691 if (inst
.size_req
== 4
9692 || inst
.operands
[1].shifted
)
9695 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
9696 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
9697 && !inst
.operands
[1].shifted
9701 inst
.instruction
= T2_SUBS_PC_LR
;
9705 if (opcode
== T_MNEM_cmp
)
9707 constraint (Rn
== REG_PC
, BAD_PC
);
9710 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
9712 warn_deprecated_sp (Rm
);
9713 /* R15 was documented as a valid choice for Rm in ARMv6,
9714 but as UNPREDICTABLE in ARMv7. ARM's proprietary
9715 tools reject R15, so we do too. */
9716 constraint (Rm
== REG_PC
, BAD_PC
);
9719 reject_bad_reg (Rm
);
9721 else if (opcode
== T_MNEM_mov
9722 || opcode
== T_MNEM_movs
)
9724 if (inst
.operands
[1].isreg
)
9726 if (opcode
== T_MNEM_movs
)
9728 reject_bad_reg (Rn
);
9729 reject_bad_reg (Rm
);
9731 else if ((Rn
== REG_SP
|| Rn
== REG_PC
)
9732 && (Rm
== REG_SP
|| Rm
== REG_PC
))
9733 reject_bad_reg (Rm
);
9736 reject_bad_reg (Rn
);
9739 if (!inst
.operands
[1].isreg
)
9741 /* Immediate operand. */
9742 if (current_it_mask
== 0 && opcode
== T_MNEM_mov
)
9744 if (low_regs
&& narrow
)
9746 inst
.instruction
= THUMB_OP16 (opcode
);
9747 inst
.instruction
|= Rn
<< 8;
9748 if (inst
.size_req
== 2)
9749 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9751 inst
.relax
= opcode
;
9755 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9756 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9757 inst
.instruction
|= Rn
<< r0off
;
9758 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9761 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
9762 && (inst
.instruction
== T_MNEM_mov
9763 || inst
.instruction
== T_MNEM_movs
))
9765 /* Register shifts are encoded as separate shift instructions. */
9766 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
9768 if (current_it_mask
)
9773 if (inst
.size_req
== 4)
9776 if (!low_regs
|| inst
.operands
[1].imm
> 7)
9782 switch (inst
.operands
[1].shift_kind
)
9785 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
9788 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
9791 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
9794 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
9800 inst
.instruction
= opcode
;
9803 inst
.instruction
|= Rn
;
9804 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
9809 inst
.instruction
|= CONDS_BIT
;
9811 inst
.instruction
|= Rn
<< 8;
9812 inst
.instruction
|= Rm
<< 16;
9813 inst
.instruction
|= inst
.operands
[1].imm
;
9818 /* Some mov with immediate shift have narrow variants.
9819 Register shifts are handled above. */
9820 if (low_regs
&& inst
.operands
[1].shifted
9821 && (inst
.instruction
== T_MNEM_mov
9822 || inst
.instruction
== T_MNEM_movs
))
9824 if (current_it_mask
)
9825 narrow
= (inst
.instruction
== T_MNEM_mov
);
9827 narrow
= (inst
.instruction
== T_MNEM_movs
);
9832 switch (inst
.operands
[1].shift_kind
)
9834 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
9835 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
9836 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
9837 default: narrow
= FALSE
; break;
9843 inst
.instruction
|= Rn
;
9844 inst
.instruction
|= Rm
<< 3;
9845 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
9849 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9850 inst
.instruction
|= Rn
<< r0off
;
9851 encode_thumb32_shifted_operand (1);
9855 switch (inst
.instruction
)
9858 inst
.instruction
= T_OPCODE_MOV_HR
;
9859 inst
.instruction
|= (Rn
& 0x8) << 4;
9860 inst
.instruction
|= (Rn
& 0x7);
9861 inst
.instruction
|= Rm
<< 3;
9865 /* We know we have low registers at this point.
9866 Generate ADD Rd, Rs, #0. */
9867 inst
.instruction
= T_OPCODE_ADD_I3
;
9868 inst
.instruction
|= Rn
;
9869 inst
.instruction
|= Rm
<< 3;
9875 inst
.instruction
= T_OPCODE_CMP_LR
;
9876 inst
.instruction
|= Rn
;
9877 inst
.instruction
|= Rm
<< 3;
9881 inst
.instruction
= T_OPCODE_CMP_HR
;
9882 inst
.instruction
|= (Rn
& 0x8) << 4;
9883 inst
.instruction
|= (Rn
& 0x7);
9884 inst
.instruction
|= Rm
<< 3;
9891 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9892 if (inst
.operands
[1].isreg
)
9894 if (Rn
< 8 && Rm
< 8)
9896 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9897 since a MOV instruction produces unpredictable results. */
9898 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9899 inst
.instruction
= T_OPCODE_ADD_I3
;
9901 inst
.instruction
= T_OPCODE_CMP_LR
;
9903 inst
.instruction
|= Rn
;
9904 inst
.instruction
|= Rm
<< 3;
9908 if (inst
.instruction
== T_OPCODE_MOV_I8
)
9909 inst
.instruction
= T_OPCODE_MOV_HR
;
9911 inst
.instruction
= T_OPCODE_CMP_HR
;
9918 _("only lo regs allowed with immediate"));
9919 inst
.instruction
|= Rn
<< 8;
9920 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
9931 top
= (inst
.instruction
& 0x00800000) != 0;
9932 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
9934 constraint (top
, _(":lower16: not allowed this instruction"));
9935 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
9937 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
9939 constraint (!top
, _(":upper16: not allowed this instruction"));
9940 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
9943 Rd
= inst
.operands
[0].reg
;
9944 reject_bad_reg (Rd
);
9946 inst
.instruction
|= Rd
<< 8;
9947 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
9949 imm
= inst
.reloc
.exp
.X_add_number
;
9950 inst
.instruction
|= (imm
& 0xf000) << 4;
9951 inst
.instruction
|= (imm
& 0x0800) << 15;
9952 inst
.instruction
|= (imm
& 0x0700) << 4;
9953 inst
.instruction
|= (imm
& 0x00ff);
9962 Rn
= inst
.operands
[0].reg
;
9963 Rm
= inst
.operands
[1].reg
;
9965 if (inst
.instruction
== T_MNEM_cmp
9966 || inst
.instruction
== T_MNEM_cmn
)
9967 constraint (Rn
== REG_PC
, BAD_PC
);
9969 reject_bad_reg (Rn
);
9970 reject_bad_reg (Rm
);
9974 int r0off
= (inst
.instruction
== T_MNEM_mvn
9975 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
9978 if (inst
.size_req
== 4
9979 || inst
.instruction
> 0xffff
9980 || inst
.operands
[1].shifted
9981 || Rn
> 7 || Rm
> 7)
9983 else if (inst
.instruction
== T_MNEM_cmn
)
9985 else if (THUMB_SETS_FLAGS (inst
.instruction
))
9986 narrow
= (current_it_mask
== 0);
9988 narrow
= (current_it_mask
!= 0);
9990 if (!inst
.operands
[1].isreg
)
9992 /* For an immediate, we always generate a 32-bit opcode;
9993 section relaxation will shrink it later if possible. */
9994 if (inst
.instruction
< 0xffff)
9995 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9996 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9997 inst
.instruction
|= Rn
<< r0off
;
9998 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10002 /* See if we can do this with a 16-bit instruction. */
10005 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10006 inst
.instruction
|= Rn
;
10007 inst
.instruction
|= Rm
<< 3;
10011 constraint (inst
.operands
[1].shifted
10012 && inst
.operands
[1].immisreg
,
10013 _("shift must be constant"));
10014 if (inst
.instruction
< 0xffff)
10015 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10016 inst
.instruction
|= Rn
<< r0off
;
10017 encode_thumb32_shifted_operand (1);
10023 constraint (inst
.instruction
> 0xffff
10024 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
10025 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
10026 _("unshifted register required"));
10027 constraint (Rn
> 7 || Rm
> 7,
10030 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10031 inst
.instruction
|= Rn
;
10032 inst
.instruction
|= Rm
<< 3;
10042 if (do_vfp_nsyn_mrs () == SUCCESS
)
10045 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
10048 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10049 _("selected processor does not support "
10050 "requested special purpose register"));
10054 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10055 _("selected processor does not support "
10056 "requested special purpose register"));
10057 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10058 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
10059 _("'CPSR' or 'SPSR' expected"));
10062 Rd
= inst
.operands
[0].reg
;
10063 reject_bad_reg (Rd
);
10065 inst
.instruction
|= Rd
<< 8;
10066 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10067 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
10076 if (do_vfp_nsyn_msr () == SUCCESS
)
10079 constraint (!inst
.operands
[1].isreg
,
10080 _("Thumb encoding does not support an immediate here"));
10081 flags
= inst
.operands
[0].imm
;
10084 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10085 _("selected processor does not support "
10086 "requested special purpose register"));
10090 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10091 _("selected processor does not support "
10092 "requested special purpose register"));
10096 Rn
= inst
.operands
[1].reg
;
10097 reject_bad_reg (Rn
);
10099 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10100 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
10101 inst
.instruction
|= (flags
& 0xff);
10102 inst
.instruction
|= Rn
<< 16;
10108 bfd_boolean narrow
;
10109 unsigned Rd
, Rn
, Rm
;
10111 if (!inst
.operands
[2].present
)
10112 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
10114 Rd
= inst
.operands
[0].reg
;
10115 Rn
= inst
.operands
[1].reg
;
10116 Rm
= inst
.operands
[2].reg
;
10118 if (unified_syntax
)
10120 if (inst
.size_req
== 4
10126 else if (inst
.instruction
== T_MNEM_muls
)
10127 narrow
= (current_it_mask
== 0);
10129 narrow
= (current_it_mask
!= 0);
10133 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
10134 constraint (Rn
> 7 || Rm
> 7,
10141 /* 16-bit MULS/Conditional MUL. */
10142 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10143 inst
.instruction
|= Rd
;
10146 inst
.instruction
|= Rm
<< 3;
10148 inst
.instruction
|= Rn
<< 3;
10150 constraint (1, _("dest must overlap one source register"));
10154 constraint(inst
.instruction
!= T_MNEM_mul
,
10155 _("Thumb-2 MUL must not set flags"));
10157 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10158 inst
.instruction
|= Rd
<< 8;
10159 inst
.instruction
|= Rn
<< 16;
10160 inst
.instruction
|= Rm
<< 0;
10162 reject_bad_reg (Rd
);
10163 reject_bad_reg (Rn
);
10164 reject_bad_reg (Rm
);
10171 unsigned RdLo
, RdHi
, Rn
, Rm
;
10173 RdLo
= inst
.operands
[0].reg
;
10174 RdHi
= inst
.operands
[1].reg
;
10175 Rn
= inst
.operands
[2].reg
;
10176 Rm
= inst
.operands
[3].reg
;
10178 reject_bad_reg (RdLo
);
10179 reject_bad_reg (RdHi
);
10180 reject_bad_reg (Rn
);
10181 reject_bad_reg (Rm
);
10183 inst
.instruction
|= RdLo
<< 12;
10184 inst
.instruction
|= RdHi
<< 8;
10185 inst
.instruction
|= Rn
<< 16;
10186 inst
.instruction
|= Rm
;
10189 as_tsktsk (_("rdhi and rdlo must be different"));
10195 if (unified_syntax
)
10197 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
10199 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10200 inst
.instruction
|= inst
.operands
[0].imm
;
10204 /* PR9722: Check for Thumb2 availability before
10205 generating a thumb2 nop instruction. */
10206 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
10208 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10209 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
10212 inst
.instruction
= 0x46c0;
10217 constraint (inst
.operands
[0].present
,
10218 _("Thumb does not support NOP with hints"));
10219 inst
.instruction
= 0x46c0;
10226 if (unified_syntax
)
10228 bfd_boolean narrow
;
10230 if (THUMB_SETS_FLAGS (inst
.instruction
))
10231 narrow
= (current_it_mask
== 0);
10233 narrow
= (current_it_mask
!= 0);
10234 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10236 if (inst
.size_req
== 4)
10241 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10242 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10243 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10247 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10248 inst
.instruction
|= inst
.operands
[0].reg
;
10249 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10254 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
10256 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10258 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10259 inst
.instruction
|= inst
.operands
[0].reg
;
10260 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10269 Rd
= inst
.operands
[0].reg
;
10270 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
10272 reject_bad_reg (Rd
);
10273 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10274 reject_bad_reg (Rn
);
10276 inst
.instruction
|= Rd
<< 8;
10277 inst
.instruction
|= Rn
<< 16;
10279 if (!inst
.operands
[2].isreg
)
10281 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10282 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10288 Rm
= inst
.operands
[2].reg
;
10289 reject_bad_reg (Rm
);
10291 constraint (inst
.operands
[2].shifted
10292 && inst
.operands
[2].immisreg
,
10293 _("shift must be constant"));
10294 encode_thumb32_shifted_operand (2);
10301 unsigned Rd
, Rn
, Rm
;
10303 Rd
= inst
.operands
[0].reg
;
10304 Rn
= inst
.operands
[1].reg
;
10305 Rm
= inst
.operands
[2].reg
;
10307 reject_bad_reg (Rd
);
10308 reject_bad_reg (Rn
);
10309 reject_bad_reg (Rm
);
10311 inst
.instruction
|= Rd
<< 8;
10312 inst
.instruction
|= Rn
<< 16;
10313 inst
.instruction
|= Rm
;
10314 if (inst
.operands
[3].present
)
10316 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
10317 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10318 _("expression too complex"));
10319 inst
.instruction
|= (val
& 0x1c) << 10;
10320 inst
.instruction
|= (val
& 0x03) << 6;
10327 if (!inst
.operands
[3].present
)
10328 inst
.instruction
&= ~0x00000020;
10335 if (inst
.operands
[0].immisreg
)
10336 reject_bad_reg (inst
.operands
[0].imm
);
10338 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10342 do_t_push_pop (void)
10346 constraint (inst
.operands
[0].writeback
,
10347 _("push/pop do not support {reglist}^"));
10348 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
10349 _("expression too complex"));
10351 mask
= inst
.operands
[0].imm
;
10352 if ((mask
& ~0xff) == 0)
10353 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
10354 else if ((inst
.instruction
== T_MNEM_push
10355 && (mask
& ~0xff) == 1 << REG_LR
)
10356 || (inst
.instruction
== T_MNEM_pop
10357 && (mask
& ~0xff) == 1 << REG_PC
))
10359 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10360 inst
.instruction
|= THUMB_PP_PC_LR
;
10361 inst
.instruction
|= mask
& 0xff;
10363 else if (unified_syntax
)
10365 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10366 encode_thumb2_ldmstm (13, mask
, TRUE
);
10370 inst
.error
= _("invalid register list to push/pop instruction");
10380 Rd
= inst
.operands
[0].reg
;
10381 Rm
= inst
.operands
[1].reg
;
10383 reject_bad_reg (Rd
);
10384 reject_bad_reg (Rm
);
10386 inst
.instruction
|= Rd
<< 8;
10387 inst
.instruction
|= Rm
<< 16;
10388 inst
.instruction
|= Rm
;
10396 Rd
= inst
.operands
[0].reg
;
10397 Rm
= inst
.operands
[1].reg
;
10399 reject_bad_reg (Rd
);
10400 reject_bad_reg (Rm
);
10402 if (Rd
<= 7 && Rm
<= 7
10403 && inst
.size_req
!= 4)
10405 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10406 inst
.instruction
|= Rd
;
10407 inst
.instruction
|= Rm
<< 3;
10409 else if (unified_syntax
)
10411 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10412 inst
.instruction
|= Rd
<< 8;
10413 inst
.instruction
|= Rm
<< 16;
10414 inst
.instruction
|= Rm
;
10417 inst
.error
= BAD_HIREG
;
10425 Rd
= inst
.operands
[0].reg
;
10426 Rm
= inst
.operands
[1].reg
;
10428 reject_bad_reg (Rd
);
10429 reject_bad_reg (Rm
);
10431 inst
.instruction
|= Rd
<< 8;
10432 inst
.instruction
|= Rm
;
10440 Rd
= inst
.operands
[0].reg
;
10441 Rs
= (inst
.operands
[1].present
10442 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10443 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10445 reject_bad_reg (Rd
);
10446 reject_bad_reg (Rs
);
10447 if (inst
.operands
[2].isreg
)
10448 reject_bad_reg (inst
.operands
[2].reg
);
10450 inst
.instruction
|= Rd
<< 8;
10451 inst
.instruction
|= Rs
<< 16;
10452 if (!inst
.operands
[2].isreg
)
10454 bfd_boolean narrow
;
10456 if ((inst
.instruction
& 0x00100000) != 0)
10457 narrow
= (current_it_mask
== 0);
10459 narrow
= (current_it_mask
!= 0);
10461 if (Rd
> 7 || Rs
> 7)
10464 if (inst
.size_req
== 4 || !unified_syntax
)
10467 if (inst
.reloc
.exp
.X_op
!= O_constant
10468 || inst
.reloc
.exp
.X_add_number
!= 0)
10471 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10472 relaxation, but it doesn't seem worth the hassle. */
10475 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10476 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
10477 inst
.instruction
|= Rs
<< 3;
10478 inst
.instruction
|= Rd
;
10482 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10483 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10487 encode_thumb32_shifted_operand (2);
10493 constraint (current_it_mask
, BAD_NOT_IT
);
10494 if (inst
.operands
[0].imm
)
10495 inst
.instruction
|= 0x8;
10501 if (!inst
.operands
[1].present
)
10502 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
10504 if (unified_syntax
)
10506 bfd_boolean narrow
;
10509 switch (inst
.instruction
)
10512 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
10514 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
10516 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
10518 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
10522 if (THUMB_SETS_FLAGS (inst
.instruction
))
10523 narrow
= (current_it_mask
== 0);
10525 narrow
= (current_it_mask
!= 0);
10526 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10528 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
10530 if (inst
.operands
[2].isreg
10531 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
10532 || inst
.operands
[2].reg
> 7))
10534 if (inst
.size_req
== 4)
10537 reject_bad_reg (inst
.operands
[0].reg
);
10538 reject_bad_reg (inst
.operands
[1].reg
);
10542 if (inst
.operands
[2].isreg
)
10544 reject_bad_reg (inst
.operands
[2].reg
);
10545 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10546 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10547 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10548 inst
.instruction
|= inst
.operands
[2].reg
;
10552 inst
.operands
[1].shifted
= 1;
10553 inst
.operands
[1].shift_kind
= shift_kind
;
10554 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
10555 ? T_MNEM_movs
: T_MNEM_mov
);
10556 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10557 encode_thumb32_shifted_operand (1);
10558 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10559 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10564 if (inst
.operands
[2].isreg
)
10566 switch (shift_kind
)
10568 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10569 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10570 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10571 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10575 inst
.instruction
|= inst
.operands
[0].reg
;
10576 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10580 switch (shift_kind
)
10582 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10583 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10584 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10587 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10588 inst
.instruction
|= inst
.operands
[0].reg
;
10589 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10595 constraint (inst
.operands
[0].reg
> 7
10596 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
10597 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10599 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
10601 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
10602 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
10603 _("source1 and dest must be same register"));
10605 switch (inst
.instruction
)
10607 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10608 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10609 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10610 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10614 inst
.instruction
|= inst
.operands
[0].reg
;
10615 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10619 switch (inst
.instruction
)
10621 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10622 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10623 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10624 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
10627 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10628 inst
.instruction
|= inst
.operands
[0].reg
;
10629 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10637 unsigned Rd
, Rn
, Rm
;
10639 Rd
= inst
.operands
[0].reg
;
10640 Rn
= inst
.operands
[1].reg
;
10641 Rm
= inst
.operands
[2].reg
;
10643 reject_bad_reg (Rd
);
10644 reject_bad_reg (Rn
);
10645 reject_bad_reg (Rm
);
10647 inst
.instruction
|= Rd
<< 8;
10648 inst
.instruction
|= Rn
<< 16;
10649 inst
.instruction
|= Rm
;
10655 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
10656 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10657 _("expression too complex"));
10658 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10659 inst
.instruction
|= (value
& 0xf000) >> 12;
10660 inst
.instruction
|= (value
& 0x0ff0);
10661 inst
.instruction
|= (value
& 0x000f) << 16;
10669 Rd
= inst
.operands
[0].reg
;
10670 Rn
= inst
.operands
[2].reg
;
10672 reject_bad_reg (Rd
);
10673 reject_bad_reg (Rn
);
10675 inst
.instruction
|= Rd
<< 8;
10676 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10677 inst
.instruction
|= Rn
<< 16;
10679 if (inst
.operands
[3].present
)
10681 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10682 _("expression too complex"));
10684 if (inst
.reloc
.exp
.X_add_number
!= 0)
10686 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10687 inst
.instruction
|= 0x00200000; /* sh bit */
10688 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10689 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10691 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10700 Rd
= inst
.operands
[0].reg
;
10701 Rn
= inst
.operands
[2].reg
;
10703 reject_bad_reg (Rd
);
10704 reject_bad_reg (Rn
);
10706 inst
.instruction
|= Rd
<< 8;
10707 inst
.instruction
|= inst
.operands
[1].imm
- 1;
10708 inst
.instruction
|= Rn
<< 16;
10714 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10715 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10716 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10717 || inst
.operands
[2].negative
,
10720 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10721 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10722 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10723 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
10729 if (!inst
.operands
[2].present
)
10730 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
10732 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10733 || inst
.operands
[0].reg
== inst
.operands
[2].reg
10734 || inst
.operands
[0].reg
== inst
.operands
[3].reg
10735 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
10738 inst
.instruction
|= inst
.operands
[0].reg
;
10739 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10740 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10741 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10747 unsigned Rd
, Rn
, Rm
;
10749 Rd
= inst
.operands
[0].reg
;
10750 Rn
= inst
.operands
[1].reg
;
10751 Rm
= inst
.operands
[2].reg
;
10753 reject_bad_reg (Rd
);
10754 reject_bad_reg (Rn
);
10755 reject_bad_reg (Rm
);
10757 inst
.instruction
|= Rd
<< 8;
10758 inst
.instruction
|= Rn
<< 16;
10759 inst
.instruction
|= Rm
;
10760 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
10768 Rd
= inst
.operands
[0].reg
;
10769 Rm
= inst
.operands
[1].reg
;
10771 reject_bad_reg (Rd
);
10772 reject_bad_reg (Rm
);
10774 if (inst
.instruction
<= 0xffff && inst
.size_req
!= 4
10775 && Rd
<= 7 && Rm
<= 7
10776 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
10778 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10779 inst
.instruction
|= Rd
;
10780 inst
.instruction
|= Rm
<< 3;
10782 else if (unified_syntax
)
10784 if (inst
.instruction
<= 0xffff)
10785 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10786 inst
.instruction
|= Rd
<< 8;
10787 inst
.instruction
|= Rm
;
10788 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
10792 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
10793 _("Thumb encoding does not support rotation"));
10794 constraint (1, BAD_HIREG
);
10801 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
10810 half
= (inst
.instruction
& 0x10) != 0;
10811 constraint (current_it_mask
&& current_it_mask
!= 0x10, BAD_BRANCH
);
10812 constraint (inst
.operands
[0].immisreg
,
10813 _("instruction requires register index"));
10815 Rn
= inst
.operands
[0].reg
;
10816 Rm
= inst
.operands
[0].imm
;
10818 constraint (Rn
== REG_SP
, BAD_SP
);
10819 reject_bad_reg (Rm
);
10821 constraint (!half
&& inst
.operands
[0].shifted
,
10822 _("instruction does not allow shifted index"));
10823 inst
.instruction
|= (Rn
<< 16) | Rm
;
10831 Rd
= inst
.operands
[0].reg
;
10832 Rn
= inst
.operands
[2].reg
;
10834 reject_bad_reg (Rd
);
10835 reject_bad_reg (Rn
);
10837 inst
.instruction
|= Rd
<< 8;
10838 inst
.instruction
|= inst
.operands
[1].imm
;
10839 inst
.instruction
|= Rn
<< 16;
10841 if (inst
.operands
[3].present
)
10843 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10844 _("expression too complex"));
10845 if (inst
.reloc
.exp
.X_add_number
!= 0)
10847 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
10848 inst
.instruction
|= 0x00200000; /* sh bit */
10850 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x1c) << 10;
10851 inst
.instruction
|= (inst
.reloc
.exp
.X_add_number
& 0x03) << 6;
10853 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10862 Rd
= inst
.operands
[0].reg
;
10863 Rn
= inst
.operands
[2].reg
;
10865 reject_bad_reg (Rd
);
10866 reject_bad_reg (Rn
);
10868 inst
.instruction
|= Rd
<< 8;
10869 inst
.instruction
|= inst
.operands
[1].imm
;
10870 inst
.instruction
|= Rn
<< 16;
10873 /* Neon instruction encoder helpers. */
10875 /* Encodings for the different types for various Neon opcodes. */
10877 /* An "invalid" code for the following tables. */
10880 struct neon_tab_entry
10883 unsigned float_or_poly
;
10884 unsigned scalar_or_imm
;
10887 /* Map overloaded Neon opcodes to their respective encodings. */
10888 #define NEON_ENC_TAB \
10889 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10890 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10891 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10892 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10893 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10894 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10895 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10896 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10897 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10898 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10899 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10900 /* Register variants of the following two instructions are encoded as
10901 vcge / vcgt with the operands reversed. */ \
10902 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10903 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10904 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10905 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10906 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10907 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10908 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10909 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10910 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10911 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10912 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10913 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10914 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10915 X(vshl, 0x0000400, N_INV, 0x0800510), \
10916 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10917 X(vand, 0x0000110, N_INV, 0x0800030), \
10918 X(vbic, 0x0100110, N_INV, 0x0800030), \
10919 X(veor, 0x1000110, N_INV, N_INV), \
10920 X(vorn, 0x0300110, N_INV, 0x0800010), \
10921 X(vorr, 0x0200110, N_INV, 0x0800010), \
10922 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10923 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10924 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10925 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10926 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10927 X(vst1, 0x0000000, 0x0800000, N_INV), \
10928 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10929 X(vst2, 0x0000100, 0x0800100, N_INV), \
10930 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10931 X(vst3, 0x0000200, 0x0800200, N_INV), \
10932 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10933 X(vst4, 0x0000300, 0x0800300, N_INV), \
10934 X(vmovn, 0x1b20200, N_INV, N_INV), \
10935 X(vtrn, 0x1b20080, N_INV, N_INV), \
10936 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10937 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10938 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10939 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10940 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10941 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10942 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10943 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10944 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10948 #define X(OPC,I,F,S) N_MNEM_##OPC
10953 static const struct neon_tab_entry neon_enc_tab
[] =
10955 #define X(OPC,I,F,S) { (I), (F), (S) }
10960 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10961 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10962 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10963 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10964 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10965 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10966 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10967 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10968 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10969 #define NEON_ENC_SINGLE(X) \
10970 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10971 #define NEON_ENC_DOUBLE(X) \
10972 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10974 /* Define shapes for instruction operands. The following mnemonic characters
10975 are used in this table:
10977 F - VFP S<n> register
10978 D - Neon D<n> register
10979 Q - Neon Q<n> register
10983 L - D<n> register list
10985 This table is used to generate various data:
10986 - enumerations of the form NS_DDR to be used as arguments to
10988 - a table classifying shapes into single, double, quad, mixed.
10989 - a table used to drive neon_select_shape. */
10991 #define NEON_SHAPE_DEF \
10992 X(3, (D, D, D), DOUBLE), \
10993 X(3, (Q, Q, Q), QUAD), \
10994 X(3, (D, D, I), DOUBLE), \
10995 X(3, (Q, Q, I), QUAD), \
10996 X(3, (D, D, S), DOUBLE), \
10997 X(3, (Q, Q, S), QUAD), \
10998 X(2, (D, D), DOUBLE), \
10999 X(2, (Q, Q), QUAD), \
11000 X(2, (D, S), DOUBLE), \
11001 X(2, (Q, S), QUAD), \
11002 X(2, (D, R), DOUBLE), \
11003 X(2, (Q, R), QUAD), \
11004 X(2, (D, I), DOUBLE), \
11005 X(2, (Q, I), QUAD), \
11006 X(3, (D, L, D), DOUBLE), \
11007 X(2, (D, Q), MIXED), \
11008 X(2, (Q, D), MIXED), \
11009 X(3, (D, Q, I), MIXED), \
11010 X(3, (Q, D, I), MIXED), \
11011 X(3, (Q, D, D), MIXED), \
11012 X(3, (D, Q, Q), MIXED), \
11013 X(3, (Q, Q, D), MIXED), \
11014 X(3, (Q, D, S), MIXED), \
11015 X(3, (D, Q, S), MIXED), \
11016 X(4, (D, D, D, I), DOUBLE), \
11017 X(4, (Q, Q, Q, I), QUAD), \
11018 X(2, (F, F), SINGLE), \
11019 X(3, (F, F, F), SINGLE), \
11020 X(2, (F, I), SINGLE), \
11021 X(2, (F, D), MIXED), \
11022 X(2, (D, F), MIXED), \
11023 X(3, (F, F, I), MIXED), \
11024 X(4, (R, R, F, F), SINGLE), \
11025 X(4, (F, F, R, R), SINGLE), \
11026 X(3, (D, R, R), DOUBLE), \
11027 X(3, (R, R, D), DOUBLE), \
11028 X(2, (S, R), SINGLE), \
11029 X(2, (R, S), SINGLE), \
11030 X(2, (F, R), SINGLE), \
11031 X(2, (R, F), SINGLE)
11033 #define S2(A,B) NS_##A##B
11034 #define S3(A,B,C) NS_##A##B##C
11035 #define S4(A,B,C,D) NS_##A##B##C##D
11037 #define X(N, L, C) S##N L
11050 enum neon_shape_class
11058 #define X(N, L, C) SC_##C
11060 static enum neon_shape_class neon_shape_class
[] =
11078 /* Register widths of above. */
11079 static unsigned neon_shape_el_size
[] =
11090 struct neon_shape_info
11093 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
11096 #define S2(A,B) { SE_##A, SE_##B }
11097 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11098 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11100 #define X(N, L, C) { N, S##N L }
11102 static struct neon_shape_info neon_shape_tab
[] =
11112 /* Bit masks used in type checking given instructions.
11113 'N_EQK' means the type must be the same as (or based on in some way) the key
11114 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11115 set, various other bits can be set as well in order to modify the meaning of
11116 the type constraint. */
11118 enum neon_type_mask
11141 N_KEY
= 0x1000000, /* key element (main type specifier). */
11142 N_EQK
= 0x2000000, /* given operand has the same type & size as the key. */
11143 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
11144 N_DBL
= 0x0000001, /* if N_EQK, this operand is twice the size. */
11145 N_HLF
= 0x0000002, /* if N_EQK, this operand is half the size. */
11146 N_SGN
= 0x0000004, /* if N_EQK, this operand is forced to be signed. */
11147 N_UNS
= 0x0000008, /* if N_EQK, this operand is forced to be unsigned. */
11148 N_INT
= 0x0000010, /* if N_EQK, this operand is forced to be integer. */
11149 N_FLT
= 0x0000020, /* if N_EQK, this operand is forced to be float. */
11150 N_SIZ
= 0x0000040, /* if N_EQK, this operand is forced to be size-only. */
11152 N_MAX_NONSPECIAL
= N_F64
11155 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11157 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11158 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11159 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11160 #define N_SUF_32 (N_SU_32 | N_F32)
11161 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11162 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11164 /* Pass this as the first type argument to neon_check_type to ignore types
11166 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11168 /* Select a "shape" for the current instruction (describing register types or
11169 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11170 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11171 function of operand parsing, so this function doesn't need to be called.
11172 Shapes should be listed in order of decreasing length. */
11174 static enum neon_shape
11175 neon_select_shape (enum neon_shape shape
, ...)
11178 enum neon_shape first_shape
= shape
;
11180 /* Fix missing optional operands. FIXME: we don't know at this point how
11181 many arguments we should have, so this makes the assumption that we have
11182 > 1. This is true of all current Neon opcodes, I think, but may not be
11183 true in the future. */
11184 if (!inst
.operands
[1].present
)
11185 inst
.operands
[1] = inst
.operands
[0];
11187 va_start (ap
, shape
);
11189 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
11194 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
11196 if (!inst
.operands
[j
].present
)
11202 switch (neon_shape_tab
[shape
].el
[j
])
11205 if (!(inst
.operands
[j
].isreg
11206 && inst
.operands
[j
].isvec
11207 && inst
.operands
[j
].issingle
11208 && !inst
.operands
[j
].isquad
))
11213 if (!(inst
.operands
[j
].isreg
11214 && inst
.operands
[j
].isvec
11215 && !inst
.operands
[j
].isquad
11216 && !inst
.operands
[j
].issingle
))
11221 if (!(inst
.operands
[j
].isreg
11222 && !inst
.operands
[j
].isvec
))
11227 if (!(inst
.operands
[j
].isreg
11228 && inst
.operands
[j
].isvec
11229 && inst
.operands
[j
].isquad
11230 && !inst
.operands
[j
].issingle
))
11235 if (!(!inst
.operands
[j
].isreg
11236 && !inst
.operands
[j
].isscalar
))
11241 if (!(!inst
.operands
[j
].isreg
11242 && inst
.operands
[j
].isscalar
))
11256 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
11257 first_error (_("invalid instruction shape"));
11262 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11263 means the Q bit should be set). */
11266 neon_quad (enum neon_shape shape
)
11268 return neon_shape_class
[shape
] == SC_QUAD
;
11272 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
11275 /* Allow modification to be made to types which are constrained to be
11276 based on the key element, based on bits set alongside N_EQK. */
11277 if ((typebits
& N_EQK
) != 0)
11279 if ((typebits
& N_HLF
) != 0)
11281 else if ((typebits
& N_DBL
) != 0)
11283 if ((typebits
& N_SGN
) != 0)
11284 *g_type
= NT_signed
;
11285 else if ((typebits
& N_UNS
) != 0)
11286 *g_type
= NT_unsigned
;
11287 else if ((typebits
& N_INT
) != 0)
11288 *g_type
= NT_integer
;
11289 else if ((typebits
& N_FLT
) != 0)
11290 *g_type
= NT_float
;
11291 else if ((typebits
& N_SIZ
) != 0)
11292 *g_type
= NT_untyped
;
11296 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11297 operand type, i.e. the single type specified in a Neon instruction when it
11298 is the only one given. */
11300 static struct neon_type_el
11301 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
11303 struct neon_type_el dest
= *key
;
11305 assert ((thisarg
& N_EQK
) != 0);
11307 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
11312 /* Convert Neon type and size into compact bitmask representation. */
11314 static enum neon_type_mask
11315 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
11322 case 8: return N_8
;
11323 case 16: return N_16
;
11324 case 32: return N_32
;
11325 case 64: return N_64
;
11333 case 8: return N_I8
;
11334 case 16: return N_I16
;
11335 case 32: return N_I32
;
11336 case 64: return N_I64
;
11344 case 16: return N_F16
;
11345 case 32: return N_F32
;
11346 case 64: return N_F64
;
11354 case 8: return N_P8
;
11355 case 16: return N_P16
;
11363 case 8: return N_S8
;
11364 case 16: return N_S16
;
11365 case 32: return N_S32
;
11366 case 64: return N_S64
;
11374 case 8: return N_U8
;
11375 case 16: return N_U16
;
11376 case 32: return N_U32
;
11377 case 64: return N_U64
;
11388 /* Convert compact Neon bitmask type representation to a type and size. Only
11389 handles the case where a single bit is set in the mask. */
11392 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
11393 enum neon_type_mask mask
)
11395 if ((mask
& N_EQK
) != 0)
11398 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
11400 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
11402 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
11404 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
11409 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
11411 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
11412 *type
= NT_unsigned
;
11413 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
11414 *type
= NT_integer
;
11415 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
11416 *type
= NT_untyped
;
11417 else if ((mask
& (N_P8
| N_P16
)) != 0)
11419 else if ((mask
& (N_F32
| N_F64
)) != 0)
11427 /* Modify a bitmask of allowed types. This is only needed for type
11431 modify_types_allowed (unsigned allowed
, unsigned mods
)
11434 enum neon_el_type type
;
11440 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
11442 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
11444 neon_modify_type_size (mods
, &type
, &size
);
11445 destmask
|= type_chk_of_el_type (type
, size
);
11452 /* Check type and return type classification.
11453 The manual states (paraphrase): If one datatype is given, it indicates the
11455 - the second operand, if there is one
11456 - the operand, if there is no second operand
11457 - the result, if there are no operands.
11458 This isn't quite good enough though, so we use a concept of a "key" datatype
11459 which is set on a per-instruction basis, which is the one which matters when
11460 only one data type is written.
11461 Note: this function has side-effects (e.g. filling in missing operands). All
11462 Neon instructions should call it before performing bit encoding. */
11464 static struct neon_type_el
11465 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
11468 unsigned i
, pass
, key_el
= 0;
11469 unsigned types
[NEON_MAX_TYPE_ELS
];
11470 enum neon_el_type k_type
= NT_invtype
;
11471 unsigned k_size
= -1u;
11472 struct neon_type_el badtype
= {NT_invtype
, -1};
11473 unsigned key_allowed
= 0;
11475 /* Optional registers in Neon instructions are always (not) in operand 1.
11476 Fill in the missing operand here, if it was omitted. */
11477 if (els
> 1 && !inst
.operands
[1].present
)
11478 inst
.operands
[1] = inst
.operands
[0];
11480 /* Suck up all the varargs. */
11482 for (i
= 0; i
< els
; i
++)
11484 unsigned thisarg
= va_arg (ap
, unsigned);
11485 if (thisarg
== N_IGNORE_TYPE
)
11490 types
[i
] = thisarg
;
11491 if ((thisarg
& N_KEY
) != 0)
11496 if (inst
.vectype
.elems
> 0)
11497 for (i
= 0; i
< els
; i
++)
11498 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
11500 first_error (_("types specified in both the mnemonic and operands"));
11504 /* Duplicate inst.vectype elements here as necessary.
11505 FIXME: No idea if this is exactly the same as the ARM assembler,
11506 particularly when an insn takes one register and one non-register
11508 if (inst
.vectype
.elems
== 1 && els
> 1)
11511 inst
.vectype
.elems
= els
;
11512 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
11513 for (j
= 0; j
< els
; j
++)
11515 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11518 else if (inst
.vectype
.elems
== 0 && els
> 0)
11521 /* No types were given after the mnemonic, so look for types specified
11522 after each operand. We allow some flexibility here; as long as the
11523 "key" operand has a type, we can infer the others. */
11524 for (j
= 0; j
< els
; j
++)
11525 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
11526 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
11528 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
11530 for (j
= 0; j
< els
; j
++)
11531 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
11532 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11537 first_error (_("operand types can't be inferred"));
11541 else if (inst
.vectype
.elems
!= els
)
11543 first_error (_("type specifier has the wrong number of parts"));
11547 for (pass
= 0; pass
< 2; pass
++)
11549 for (i
= 0; i
< els
; i
++)
11551 unsigned thisarg
= types
[i
];
11552 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
11553 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
11554 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
11555 unsigned g_size
= inst
.vectype
.el
[i
].size
;
11557 /* Decay more-specific signed & unsigned types to sign-insensitive
11558 integer types if sign-specific variants are unavailable. */
11559 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
11560 && (types_allowed
& N_SU_ALL
) == 0)
11561 g_type
= NT_integer
;
11563 /* If only untyped args are allowed, decay any more specific types to
11564 them. Some instructions only care about signs for some element
11565 sizes, so handle that properly. */
11566 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
11567 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
11568 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
11569 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
11570 g_type
= NT_untyped
;
11574 if ((thisarg
& N_KEY
) != 0)
11578 key_allowed
= thisarg
& ~N_KEY
;
11583 if ((thisarg
& N_VFP
) != 0)
11585 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
11586 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
11588 /* In VFP mode, operands must match register widths. If we
11589 have a key operand, use its width, else use the width of
11590 the current operand. */
11596 if (regwidth
!= match
)
11598 first_error (_("operand size must match register width"));
11603 if ((thisarg
& N_EQK
) == 0)
11605 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
11607 if ((given_type
& types_allowed
) == 0)
11609 first_error (_("bad type in Neon instruction"));
11615 enum neon_el_type mod_k_type
= k_type
;
11616 unsigned mod_k_size
= k_size
;
11617 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
11618 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
11620 first_error (_("inconsistent types in Neon instruction"));
11628 return inst
.vectype
.el
[key_el
];
11631 /* Neon-style VFP instruction forwarding. */
11633 /* Thumb VFP instructions have 0xE in the condition field. */
11636 do_vfp_cond_or_thumb (void)
11639 inst
.instruction
|= 0xe0000000;
11641 inst
.instruction
|= inst
.cond
<< 28;
11644 /* Look up and encode a simple mnemonic, for use as a helper function for the
11645 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
11646 etc. It is assumed that operand parsing has already been done, and that the
11647 operands are in the form expected by the given opcode (this isn't necessarily
11648 the same as the form in which they were parsed, hence some massaging must
11649 take place before this function is called).
11650 Checks current arch version against that in the looked-up opcode. */
11653 do_vfp_nsyn_opcode (const char *opname
)
11655 const struct asm_opcode
*opcode
;
11657 opcode
= hash_find (arm_ops_hsh
, opname
);
11662 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
11663 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
11668 inst
.instruction
= opcode
->tvalue
;
11669 opcode
->tencode ();
11673 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
11674 opcode
->aencode ();
11679 do_vfp_nsyn_add_sub (enum neon_shape rs
)
11681 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
11686 do_vfp_nsyn_opcode ("fadds");
11688 do_vfp_nsyn_opcode ("fsubs");
11693 do_vfp_nsyn_opcode ("faddd");
11695 do_vfp_nsyn_opcode ("fsubd");
11699 /* Check operand types to see if this is a VFP instruction, and if so call
11703 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
11705 enum neon_shape rs
;
11706 struct neon_type_el et
;
11711 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11712 et
= neon_check_type (2, rs
,
11713 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11717 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11718 et
= neon_check_type (3, rs
,
11719 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11726 if (et
.type
!= NT_invtype
)
11738 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
11740 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
11745 do_vfp_nsyn_opcode ("fmacs");
11747 do_vfp_nsyn_opcode ("fmscs");
11752 do_vfp_nsyn_opcode ("fmacd");
11754 do_vfp_nsyn_opcode ("fmscd");
11759 do_vfp_nsyn_mul (enum neon_shape rs
)
11762 do_vfp_nsyn_opcode ("fmuls");
11764 do_vfp_nsyn_opcode ("fmuld");
11768 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
11770 int is_neg
= (inst
.instruction
& 0x80) != 0;
11771 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
11776 do_vfp_nsyn_opcode ("fnegs");
11778 do_vfp_nsyn_opcode ("fabss");
11783 do_vfp_nsyn_opcode ("fnegd");
11785 do_vfp_nsyn_opcode ("fabsd");
11789 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11790 insns belong to Neon, and are handled elsewhere. */
11793 do_vfp_nsyn_ldm_stm (int is_dbmode
)
11795 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
11799 do_vfp_nsyn_opcode ("fldmdbs");
11801 do_vfp_nsyn_opcode ("fldmias");
11806 do_vfp_nsyn_opcode ("fstmdbs");
11808 do_vfp_nsyn_opcode ("fstmias");
11813 do_vfp_nsyn_sqrt (void)
11815 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11816 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11819 do_vfp_nsyn_opcode ("fsqrts");
11821 do_vfp_nsyn_opcode ("fsqrtd");
11825 do_vfp_nsyn_div (void)
11827 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11828 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11829 N_F32
| N_F64
| N_KEY
| N_VFP
);
11832 do_vfp_nsyn_opcode ("fdivs");
11834 do_vfp_nsyn_opcode ("fdivd");
11838 do_vfp_nsyn_nmul (void)
11840 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
11841 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
11842 N_F32
| N_F64
| N_KEY
| N_VFP
);
11846 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11847 do_vfp_sp_dyadic ();
11851 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11852 do_vfp_dp_rd_rn_rm ();
11854 do_vfp_cond_or_thumb ();
11858 do_vfp_nsyn_cmp (void)
11860 if (inst
.operands
[1].isreg
)
11862 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
11863 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
11867 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11868 do_vfp_sp_monadic ();
11872 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11873 do_vfp_dp_rd_rm ();
11878 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
11879 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
11881 switch (inst
.instruction
& 0x0fffffff)
11884 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
11887 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
11895 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
11896 do_vfp_sp_compare_z ();
11900 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
11904 do_vfp_cond_or_thumb ();
11908 nsyn_insert_sp (void)
11910 inst
.operands
[1] = inst
.operands
[0];
11911 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
11912 inst
.operands
[0].reg
= REG_SP
;
11913 inst
.operands
[0].isreg
= 1;
11914 inst
.operands
[0].writeback
= 1;
11915 inst
.operands
[0].present
= 1;
11919 do_vfp_nsyn_push (void)
11922 if (inst
.operands
[1].issingle
)
11923 do_vfp_nsyn_opcode ("fstmdbs");
11925 do_vfp_nsyn_opcode ("fstmdbd");
11929 do_vfp_nsyn_pop (void)
11932 if (inst
.operands
[1].issingle
)
11933 do_vfp_nsyn_opcode ("fldmias");
11935 do_vfp_nsyn_opcode ("fldmiad");
11938 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11939 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11942 neon_dp_fixup (unsigned i
)
11946 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11960 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11964 neon_logbits (unsigned x
)
11966 return ffs (x
) - 4;
11969 #define LOW4(R) ((R) & 0xf)
11970 #define HI1(R) (((R) >> 4) & 1)
11972 /* Encode insns with bit pattern:
11974 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11975 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11977 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11978 different meaning for some instruction. */
11981 neon_three_same (int isquad
, int ubit
, int size
)
11983 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
11984 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
11985 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
11986 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
11987 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
11988 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
11989 inst
.instruction
|= (isquad
!= 0) << 6;
11990 inst
.instruction
|= (ubit
!= 0) << 24;
11992 inst
.instruction
|= neon_logbits (size
) << 20;
11994 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
11997 /* Encode instructions of the form:
11999 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12000 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12002 Don't write size if SIZE == -1. */
12005 neon_two_same (int qbit
, int ubit
, int size
)
12007 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12008 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12009 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12010 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12011 inst
.instruction
|= (qbit
!= 0) << 6;
12012 inst
.instruction
|= (ubit
!= 0) << 24;
12015 inst
.instruction
|= neon_logbits (size
) << 18;
12017 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12020 /* Neon instruction encoders, in approximate order of appearance. */
12023 do_neon_dyadic_i_su (void)
12025 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12026 struct neon_type_el et
= neon_check_type (3, rs
,
12027 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
12028 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12032 do_neon_dyadic_i64_su (void)
12034 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12035 struct neon_type_el et
= neon_check_type (3, rs
,
12036 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12037 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12041 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
12044 unsigned size
= et
.size
>> 3;
12045 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12046 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12047 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12048 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12049 inst
.instruction
|= (isquad
!= 0) << 6;
12050 inst
.instruction
|= immbits
<< 16;
12051 inst
.instruction
|= (size
>> 3) << 7;
12052 inst
.instruction
|= (size
& 0x7) << 19;
12054 inst
.instruction
|= (uval
!= 0) << 24;
12056 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12060 do_neon_shl_imm (void)
12062 if (!inst
.operands
[2].isreg
)
12064 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12065 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
12066 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12067 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
12071 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12072 struct neon_type_el et
= neon_check_type (3, rs
,
12073 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12076 /* VSHL/VQSHL 3-register variants have syntax such as:
12078 whereas other 3-register operations encoded by neon_three_same have
12081 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12083 tmp
= inst
.operands
[2].reg
;
12084 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12085 inst
.operands
[1].reg
= tmp
;
12086 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12087 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12092 do_neon_qshl_imm (void)
12094 if (!inst
.operands
[2].isreg
)
12096 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12097 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
12099 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12100 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
12101 inst
.operands
[2].imm
);
12105 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12106 struct neon_type_el et
= neon_check_type (3, rs
,
12107 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12110 /* See note in do_neon_shl_imm. */
12111 tmp
= inst
.operands
[2].reg
;
12112 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12113 inst
.operands
[1].reg
= tmp
;
12114 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12115 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12120 do_neon_rshl (void)
12122 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12123 struct neon_type_el et
= neon_check_type (3, rs
,
12124 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12127 tmp
= inst
.operands
[2].reg
;
12128 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12129 inst
.operands
[1].reg
= tmp
;
12130 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12134 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
12136 /* Handle .I8 pseudo-instructions. */
12139 /* Unfortunately, this will make everything apart from zero out-of-range.
12140 FIXME is this the intended semantics? There doesn't seem much point in
12141 accepting .I8 if so. */
12142 immediate
|= immediate
<< 8;
12148 if (immediate
== (immediate
& 0x000000ff))
12150 *immbits
= immediate
;
12153 else if (immediate
== (immediate
& 0x0000ff00))
12155 *immbits
= immediate
>> 8;
12158 else if (immediate
== (immediate
& 0x00ff0000))
12160 *immbits
= immediate
>> 16;
12163 else if (immediate
== (immediate
& 0xff000000))
12165 *immbits
= immediate
>> 24;
12168 if ((immediate
& 0xffff) != (immediate
>> 16))
12169 goto bad_immediate
;
12170 immediate
&= 0xffff;
12173 if (immediate
== (immediate
& 0x000000ff))
12175 *immbits
= immediate
;
12178 else if (immediate
== (immediate
& 0x0000ff00))
12180 *immbits
= immediate
>> 8;
12185 first_error (_("immediate value out of range"));
12189 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12193 neon_bits_same_in_bytes (unsigned imm
)
12195 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
12196 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
12197 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
12198 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
12201 /* For immediate of above form, return 0bABCD. */
12204 neon_squash_bits (unsigned imm
)
12206 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
12207 | ((imm
& 0x01000000) >> 21);
12210 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12213 neon_qfloat_bits (unsigned imm
)
12215 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
12218 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12219 the instruction. *OP is passed as the initial value of the op field, and
12220 may be set to a different value depending on the constant (i.e.
12221 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12222 MVN). If the immediate looks like a repeated pattern then also
12223 try smaller element sizes. */
12226 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
12227 unsigned *immbits
, int *op
, int size
,
12228 enum neon_el_type type
)
12230 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12232 if (type
== NT_float
&& !float_p
)
12235 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
12237 if (size
!= 32 || *op
== 1)
12239 *immbits
= neon_qfloat_bits (immlo
);
12245 if (neon_bits_same_in_bytes (immhi
)
12246 && neon_bits_same_in_bytes (immlo
))
12250 *immbits
= (neon_squash_bits (immhi
) << 4)
12251 | neon_squash_bits (immlo
);
12256 if (immhi
!= immlo
)
12262 if (immlo
== (immlo
& 0x000000ff))
12267 else if (immlo
== (immlo
& 0x0000ff00))
12269 *immbits
= immlo
>> 8;
12272 else if (immlo
== (immlo
& 0x00ff0000))
12274 *immbits
= immlo
>> 16;
12277 else if (immlo
== (immlo
& 0xff000000))
12279 *immbits
= immlo
>> 24;
12282 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
12284 *immbits
= (immlo
>> 8) & 0xff;
12287 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
12289 *immbits
= (immlo
>> 16) & 0xff;
12293 if ((immlo
& 0xffff) != (immlo
>> 16))
12300 if (immlo
== (immlo
& 0x000000ff))
12305 else if (immlo
== (immlo
& 0x0000ff00))
12307 *immbits
= immlo
>> 8;
12311 if ((immlo
& 0xff) != (immlo
>> 8))
12316 if (immlo
== (immlo
& 0x000000ff))
12318 /* Don't allow MVN with 8-bit immediate. */
12328 /* Write immediate bits [7:0] to the following locations:
12330 |28/24|23 19|18 16|15 4|3 0|
12331 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12333 This function is used by VMOV/VMVN/VORR/VBIC. */
12336 neon_write_immbits (unsigned immbits
)
12338 inst
.instruction
|= immbits
& 0xf;
12339 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
12340 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
12343 /* Invert low-order SIZE bits of XHI:XLO. */
12346 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
12348 unsigned immlo
= xlo
? *xlo
: 0;
12349 unsigned immhi
= xhi
? *xhi
: 0;
12354 immlo
= (~immlo
) & 0xff;
12358 immlo
= (~immlo
) & 0xffff;
12362 immhi
= (~immhi
) & 0xffffffff;
12363 /* fall through. */
12366 immlo
= (~immlo
) & 0xffffffff;
12381 do_neon_logic (void)
12383 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
12385 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12386 neon_check_type (3, rs
, N_IGNORE_TYPE
);
12387 /* U bit and size field were set as part of the bitmask. */
12388 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12389 neon_three_same (neon_quad (rs
), 0, -1);
12393 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
12394 struct neon_type_el et
= neon_check_type (2, rs
,
12395 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
12396 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
12400 if (et
.type
== NT_invtype
)
12403 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12405 immbits
= inst
.operands
[1].imm
;
12408 /* .i64 is a pseudo-op, so the immediate must be a repeating
12410 if (immbits
!= (inst
.operands
[1].regisimm
?
12411 inst
.operands
[1].reg
: 0))
12413 /* Set immbits to an invalid constant. */
12414 immbits
= 0xdeadbeef;
12421 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12425 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12429 /* Pseudo-instruction for VBIC. */
12430 neon_invert_size (&immbits
, 0, et
.size
);
12431 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12435 /* Pseudo-instruction for VORR. */
12436 neon_invert_size (&immbits
, 0, et
.size
);
12437 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12447 inst
.instruction
|= neon_quad (rs
) << 6;
12448 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12449 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12450 inst
.instruction
|= cmode
<< 8;
12451 neon_write_immbits (immbits
);
12453 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12458 do_neon_bitfield (void)
12460 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12461 neon_check_type (3, rs
, N_IGNORE_TYPE
);
12462 neon_three_same (neon_quad (rs
), 0, -1);
12466 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
12469 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12470 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
12472 if (et
.type
== NT_float
)
12474 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
12475 neon_three_same (neon_quad (rs
), 0, -1);
12479 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12480 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
12485 do_neon_dyadic_if_su (void)
12487 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12491 do_neon_dyadic_if_su_d (void)
12493 /* This version only allow D registers, but that constraint is enforced during
12494 operand parsing so we don't need to do anything extra here. */
12495 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12499 do_neon_dyadic_if_i_d (void)
12501 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12502 affected if we specify unsigned args. */
12503 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12506 enum vfp_or_neon_is_neon_bits
12509 NEON_CHECK_ARCH
= 2
12512 /* Call this function if an instruction which may have belonged to the VFP or
12513 Neon instruction sets, but turned out to be a Neon instruction (due to the
12514 operand types involved, etc.). We have to check and/or fix-up a couple of
12517 - Make sure the user hasn't attempted to make a Neon instruction
12519 - Alter the value in the condition code field if necessary.
12520 - Make sure that the arch supports Neon instructions.
12522 Which of these operations take place depends on bits from enum
12523 vfp_or_neon_is_neon_bits.
12525 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12526 current instruction's condition is COND_ALWAYS, the condition field is
12527 changed to inst.uncond_value. This is necessary because instructions shared
12528 between VFP and Neon may be conditional for the VFP variants only, and the
12529 unconditional Neon version must have, e.g., 0xF in the condition field. */
12532 vfp_or_neon_is_neon (unsigned check
)
12534 /* Conditions are always legal in Thumb mode (IT blocks). */
12535 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
12537 if (inst
.cond
!= COND_ALWAYS
)
12539 first_error (_(BAD_COND
));
12542 if (inst
.uncond_value
!= -1)
12543 inst
.instruction
|= inst
.uncond_value
<< 28;
12546 if ((check
& NEON_CHECK_ARCH
)
12547 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
12549 first_error (_(BAD_FPU
));
12557 do_neon_addsub_if_i (void)
12559 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
12562 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12565 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12566 affected if we specify unsigned args. */
12567 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
12570 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12572 V<op> A,B (A is operand 0, B is operand 2)
12577 so handle that case specially. */
12580 neon_exchange_operands (void)
12582 void *scratch
= alloca (sizeof (inst
.operands
[0]));
12583 if (inst
.operands
[1].present
)
12585 /* Swap operands[1] and operands[2]. */
12586 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
12587 inst
.operands
[1] = inst
.operands
[2];
12588 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
12592 inst
.operands
[1] = inst
.operands
[2];
12593 inst
.operands
[2] = inst
.operands
[0];
12598 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
12600 if (inst
.operands
[2].isreg
)
12603 neon_exchange_operands ();
12604 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
12608 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12609 struct neon_type_el et
= neon_check_type (2, rs
,
12610 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
12612 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12613 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12614 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12615 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12616 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12617 inst
.instruction
|= neon_quad (rs
) << 6;
12618 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12619 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12621 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12628 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
12632 do_neon_cmp_inv (void)
12634 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
12640 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
12643 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
12644 scalars, which are encoded in 5 bits, M : Rm.
12645 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
12646 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
12650 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
12652 unsigned regno
= NEON_SCALAR_REG (scalar
);
12653 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
12658 if (regno
> 7 || elno
> 3)
12660 return regno
| (elno
<< 3);
12663 if (regno
> 15 || elno
> 1)
12665 return regno
| (elno
<< 4);
12669 first_error (_("scalar out of range for multiply instruction"));
12675 /* Encode multiply / multiply-accumulate scalar instructions. */
12678 neon_mul_mac (struct neon_type_el et
, int ubit
)
12682 /* Give a more helpful error message if we have an invalid type. */
12683 if (et
.type
== NT_invtype
)
12686 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
12687 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12688 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12689 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12690 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12691 inst
.instruction
|= LOW4 (scalar
);
12692 inst
.instruction
|= HI1 (scalar
) << 5;
12693 inst
.instruction
|= (et
.type
== NT_float
) << 8;
12694 inst
.instruction
|= neon_logbits (et
.size
) << 20;
12695 inst
.instruction
|= (ubit
!= 0) << 24;
12697 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12701 do_neon_mac_maybe_scalar (void)
12703 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
12706 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12709 if (inst
.operands
[2].isscalar
)
12711 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12712 struct neon_type_el et
= neon_check_type (3, rs
,
12713 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
12714 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12715 neon_mul_mac (et
, neon_quad (rs
));
12719 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12720 affected if we specify unsigned args. */
12721 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12728 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12729 struct neon_type_el et
= neon_check_type (3, rs
,
12730 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
12731 neon_three_same (neon_quad (rs
), 0, et
.size
);
12734 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12735 same types as the MAC equivalents. The polynomial type for this instruction
12736 is encoded the same as the integer type. */
12741 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
12744 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12747 if (inst
.operands
[2].isscalar
)
12748 do_neon_mac_maybe_scalar ();
12750 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
12754 do_neon_qdmulh (void)
12756 if (inst
.operands
[2].isscalar
)
12758 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
12759 struct neon_type_el et
= neon_check_type (3, rs
,
12760 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12761 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
12762 neon_mul_mac (et
, neon_quad (rs
));
12766 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12767 struct neon_type_el et
= neon_check_type (3, rs
,
12768 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
12769 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12770 /* The U bit (rounding) comes from bit mask. */
12771 neon_three_same (neon_quad (rs
), 0, et
.size
);
12776 do_neon_fcmp_absolute (void)
12778 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12779 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12780 /* Size field comes from bit mask. */
12781 neon_three_same (neon_quad (rs
), 1, -1);
12785 do_neon_fcmp_absolute_inv (void)
12787 neon_exchange_operands ();
12788 do_neon_fcmp_absolute ();
12792 do_neon_step (void)
12794 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12795 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
12796 neon_three_same (neon_quad (rs
), 0, -1);
12800 do_neon_abs_neg (void)
12802 enum neon_shape rs
;
12803 struct neon_type_el et
;
12805 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
12808 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12811 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
12812 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
12814 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12815 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12816 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12817 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12818 inst
.instruction
|= neon_quad (rs
) << 6;
12819 inst
.instruction
|= (et
.type
== NT_float
) << 10;
12820 inst
.instruction
|= neon_logbits (et
.size
) << 18;
12822 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12828 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12829 struct neon_type_el et
= neon_check_type (2, rs
,
12830 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12831 int imm
= inst
.operands
[2].imm
;
12832 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12833 _("immediate out of range for insert"));
12834 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12840 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12841 struct neon_type_el et
= neon_check_type (2, rs
,
12842 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
12843 int imm
= inst
.operands
[2].imm
;
12844 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12845 _("immediate out of range for insert"));
12846 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
12850 do_neon_qshlu_imm (void)
12852 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12853 struct neon_type_el et
= neon_check_type (2, rs
,
12854 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
12855 int imm
= inst
.operands
[2].imm
;
12856 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
12857 _("immediate out of range for shift"));
12858 /* Only encodes the 'U present' variant of the instruction.
12859 In this case, signed types have OP (bit 8) set to 0.
12860 Unsigned types have OP set to 1. */
12861 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
12862 /* The rest of the bits are the same as other immediate shifts. */
12863 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
12867 do_neon_qmovn (void)
12869 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12870 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12871 /* Saturating move where operands can be signed or unsigned, and the
12872 destination has the same signedness. */
12873 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12874 if (et
.type
== NT_unsigned
)
12875 inst
.instruction
|= 0xc0;
12877 inst
.instruction
|= 0x80;
12878 neon_two_same (0, 1, et
.size
/ 2);
12882 do_neon_qmovun (void)
12884 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12885 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12886 /* Saturating move with unsigned results. Operands must be signed. */
12887 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12888 neon_two_same (0, 1, et
.size
/ 2);
12892 do_neon_rshift_sat_narrow (void)
12894 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12895 or unsigned. If operands are unsigned, results must also be unsigned. */
12896 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12897 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
12898 int imm
= inst
.operands
[2].imm
;
12899 /* This gets the bounds check, size encoding and immediate bits calculation
12903 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12904 VQMOVN.I<size> <Dd>, <Qm>. */
12907 inst
.operands
[2].present
= 0;
12908 inst
.instruction
= N_MNEM_vqmovn
;
12913 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12914 _("immediate out of range"));
12915 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
12919 do_neon_rshift_sat_narrow_u (void)
12921 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12922 or unsigned. If operands are unsigned, results must also be unsigned. */
12923 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12924 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
12925 int imm
= inst
.operands
[2].imm
;
12926 /* This gets the bounds check, size encoding and immediate bits calculation
12930 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12931 VQMOVUN.I<size> <Dd>, <Qm>. */
12934 inst
.operands
[2].present
= 0;
12935 inst
.instruction
= N_MNEM_vqmovun
;
12940 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12941 _("immediate out of range"));
12942 /* FIXME: The manual is kind of unclear about what value U should have in
12943 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12945 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
12949 do_neon_movn (void)
12951 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
12952 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12953 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12954 neon_two_same (0, 1, et
.size
/ 2);
12958 do_neon_rshift_narrow (void)
12960 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
12961 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
12962 int imm
= inst
.operands
[2].imm
;
12963 /* This gets the bounds check, size encoding and immediate bits calculation
12967 /* If immediate is zero then we are a pseudo-instruction for
12968 VMOVN.I<size> <Dd>, <Qm> */
12971 inst
.operands
[2].present
= 0;
12972 inst
.instruction
= N_MNEM_vmovn
;
12977 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
12978 _("immediate out of range for narrowing operation"));
12979 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
12983 do_neon_shll (void)
12985 /* FIXME: Type checking when lengthening. */
12986 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
12987 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
12988 unsigned imm
= inst
.operands
[2].imm
;
12990 if (imm
== et
.size
)
12992 /* Maximum shift variant. */
12993 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12994 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12995 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12996 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12997 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12998 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13000 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13004 /* A more-specific type check for non-max versions. */
13005 et
= neon_check_type (2, NS_QDI
,
13006 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13007 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13008 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
13012 /* Check the various types for the VCVT instruction, and return which version
13013 the current instruction is. */
13016 neon_cvt_flavour (enum neon_shape rs
)
13018 #define CVT_VAR(C,X,Y) \
13019 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13020 if (et.type != NT_invtype) \
13022 inst.error = NULL; \
13025 struct neon_type_el et
;
13026 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
13027 || rs
== NS_FF
) ? N_VFP
: 0;
13028 /* The instruction versions which take an immediate take one register
13029 argument, which is extended to the width of the full register. Thus the
13030 "source" and "destination" registers must have the same width. Hack that
13031 here by making the size equal to the key (wider, in this case) operand. */
13032 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
13034 CVT_VAR (0, N_S32
, N_F32
);
13035 CVT_VAR (1, N_U32
, N_F32
);
13036 CVT_VAR (2, N_F32
, N_S32
);
13037 CVT_VAR (3, N_F32
, N_U32
);
13038 /* Half-precision conversions. */
13039 CVT_VAR (4, N_F32
, N_F16
);
13040 CVT_VAR (5, N_F16
, N_F32
);
13044 /* VFP instructions. */
13045 CVT_VAR (6, N_F32
, N_F64
);
13046 CVT_VAR (7, N_F64
, N_F32
);
13047 CVT_VAR (8, N_S32
, N_F64
| key
);
13048 CVT_VAR (9, N_U32
, N_F64
| key
);
13049 CVT_VAR (10, N_F64
| key
, N_S32
);
13050 CVT_VAR (11, N_F64
| key
, N_U32
);
13051 /* VFP instructions with bitshift. */
13052 CVT_VAR (12, N_F32
| key
, N_S16
);
13053 CVT_VAR (13, N_F32
| key
, N_U16
);
13054 CVT_VAR (14, N_F64
| key
, N_S16
);
13055 CVT_VAR (15, N_F64
| key
, N_U16
);
13056 CVT_VAR (16, N_S16
, N_F32
| key
);
13057 CVT_VAR (17, N_U16
, N_F32
| key
);
13058 CVT_VAR (18, N_S16
, N_F64
| key
);
13059 CVT_VAR (19, N_U16
, N_F64
| key
);
13065 /* Neon-syntax VFP conversions. */
13068 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
13070 const char *opname
= 0;
13072 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
13074 /* Conversions with immediate bitshift. */
13075 const char *enc
[] =
13099 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13101 opname
= enc
[flavour
];
13102 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13103 _("operands 0 and 1 must be the same register"));
13104 inst
.operands
[1] = inst
.operands
[2];
13105 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
13110 /* Conversions without bitshift. */
13111 const char *enc
[] =
13127 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13128 opname
= enc
[flavour
];
13132 do_vfp_nsyn_opcode (opname
);
13136 do_vfp_nsyn_cvtz (void)
13138 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
13139 int flavour
= neon_cvt_flavour (rs
);
13140 const char *enc
[] =
13154 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
13155 do_vfp_nsyn_opcode (enc
[flavour
]);
13161 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
13162 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
13163 int flavour
= neon_cvt_flavour (rs
);
13165 /* VFP rather than Neon conversions. */
13168 do_vfp_nsyn_cvt (rs
, flavour
);
13178 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13180 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13183 /* Fixed-point conversion with #0 immediate is encoded as an
13184 integer conversion. */
13185 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
13187 immbits
= 32 - inst
.operands
[2].imm
;
13188 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13190 inst
.instruction
|= enctab
[flavour
];
13191 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13192 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13193 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13194 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13195 inst
.instruction
|= neon_quad (rs
) << 6;
13196 inst
.instruction
|= 1 << 21;
13197 inst
.instruction
|= immbits
<< 16;
13199 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13207 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
13209 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13211 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13215 inst
.instruction
|= enctab
[flavour
];
13217 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13218 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13219 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13220 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13221 inst
.instruction
|= neon_quad (rs
) << 6;
13222 inst
.instruction
|= 2 << 18;
13224 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13228 /* Half-precision conversions for Advanced SIMD -- neon. */
13233 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
13235 as_bad (_("operand size must match register width"));
13240 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
13242 as_bad (_("operand size must match register width"));
13247 inst
.instruction
= 0x3b60600;
13249 inst
.instruction
= 0x3b60700;
13251 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13252 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13253 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13254 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13255 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13259 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13260 do_vfp_nsyn_cvt (rs
, flavour
);
13265 do_neon_cvtb (void)
13267 inst
.instruction
= 0xeb20a40;
13269 /* The sizes are attached to the mnemonic. */
13270 if (inst
.vectype
.el
[0].type
!= NT_invtype
13271 && inst
.vectype
.el
[0].size
== 16)
13272 inst
.instruction
|= 0x00010000;
13274 /* Programmer's syntax: the sizes are attached to the operands. */
13275 else if (inst
.operands
[0].vectype
.type
!= NT_invtype
13276 && inst
.operands
[0].vectype
.size
== 16)
13277 inst
.instruction
|= 0x00010000;
13279 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
13280 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
13281 do_vfp_cond_or_thumb ();
13286 do_neon_cvtt (void)
13289 inst
.instruction
|= 0x80;
13293 neon_move_immediate (void)
13295 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
13296 struct neon_type_el et
= neon_check_type (2, rs
,
13297 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
13298 unsigned immlo
, immhi
= 0, immbits
;
13299 int op
, cmode
, float_p
;
13301 constraint (et
.type
== NT_invtype
,
13302 _("operand size must be specified for immediate VMOV"));
13304 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13305 op
= (inst
.instruction
& (1 << 5)) != 0;
13307 immlo
= inst
.operands
[1].imm
;
13308 if (inst
.operands
[1].regisimm
)
13309 immhi
= inst
.operands
[1].reg
;
13311 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
13312 _("immediate has bits set outside the operand size"));
13314 float_p
= inst
.operands
[1].immisfloat
;
13316 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
13317 et
.size
, et
.type
)) == FAIL
)
13319 /* Invert relevant bits only. */
13320 neon_invert_size (&immlo
, &immhi
, et
.size
);
13321 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13322 with one or the other; those cases are caught by
13323 neon_cmode_for_move_imm. */
13325 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
13326 &op
, et
.size
, et
.type
)) == FAIL
)
13328 first_error (_("immediate out of range"));
13333 inst
.instruction
&= ~(1 << 5);
13334 inst
.instruction
|= op
<< 5;
13336 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13337 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13338 inst
.instruction
|= neon_quad (rs
) << 6;
13339 inst
.instruction
|= cmode
<< 8;
13341 neon_write_immbits (immbits
);
13347 if (inst
.operands
[1].isreg
)
13349 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13351 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13352 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13353 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13354 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13355 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13356 inst
.instruction
|= neon_quad (rs
) << 6;
13360 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13361 neon_move_immediate ();
13364 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13367 /* Encode instructions of form:
13369 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13370 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
13373 neon_mixed_length (struct neon_type_el et
, unsigned size
)
13375 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13376 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13377 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13378 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13379 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13380 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13381 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
13382 inst
.instruction
|= neon_logbits (size
) << 20;
13384 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13388 do_neon_dyadic_long (void)
13390 /* FIXME: Type checking for lengthening op. */
13391 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13392 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
13393 neon_mixed_length (et
, et
.size
);
13397 do_neon_abal (void)
13399 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13400 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
13401 neon_mixed_length (et
, et
.size
);
13405 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
13407 if (inst
.operands
[2].isscalar
)
13409 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
13410 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
13411 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13412 neon_mul_mac (et
, et
.type
== NT_unsigned
);
13416 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13417 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
13418 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13419 neon_mixed_length (et
, et
.size
);
13424 do_neon_mac_maybe_scalar_long (void)
13426 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
13430 do_neon_dyadic_wide (void)
13432 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
13433 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13434 neon_mixed_length (et
, et
.size
);
13438 do_neon_dyadic_narrow (void)
13440 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13441 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
13442 /* Operand sign is unimportant, and the U bit is part of the opcode,
13443 so force the operand type to integer. */
13444 et
.type
= NT_integer
;
13445 neon_mixed_length (et
, et
.size
/ 2);
13449 do_neon_mul_sat_scalar_long (void)
13451 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
13455 do_neon_vmull (void)
13457 if (inst
.operands
[2].isscalar
)
13458 do_neon_mac_maybe_scalar_long ();
13461 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13462 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
13463 if (et
.type
== NT_poly
)
13464 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
13466 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13467 /* For polynomial encoding, size field must be 0b00 and the U bit must be
13468 zero. Should be OK as-is. */
13469 neon_mixed_length (et
, et
.size
);
13476 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
13477 struct neon_type_el et
= neon_check_type (3, rs
,
13478 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13479 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
13481 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
13482 _("shift out of range"));
13483 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13484 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13485 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13486 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13487 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13488 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13489 inst
.instruction
|= neon_quad (rs
) << 6;
13490 inst
.instruction
|= imm
<< 8;
13492 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13498 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13499 struct neon_type_el et
= neon_check_type (2, rs
,
13500 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13501 unsigned op
= (inst
.instruction
>> 7) & 3;
13502 /* N (width of reversed regions) is encoded as part of the bitmask. We
13503 extract it here to check the elements to be reversed are smaller.
13504 Otherwise we'd get a reserved instruction. */
13505 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
13506 assert (elsize
!= 0);
13507 constraint (et
.size
>= elsize
,
13508 _("elements must be smaller than reversal region"));
13509 neon_two_same (neon_quad (rs
), 1, et
.size
);
13515 if (inst
.operands
[1].isscalar
)
13517 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
13518 struct neon_type_el et
= neon_check_type (2, rs
,
13519 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13520 unsigned sizebits
= et
.size
>> 3;
13521 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
13522 int logsize
= neon_logbits (et
.size
);
13523 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
13525 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
13528 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13529 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13530 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13531 inst
.instruction
|= LOW4 (dm
);
13532 inst
.instruction
|= HI1 (dm
) << 5;
13533 inst
.instruction
|= neon_quad (rs
) << 6;
13534 inst
.instruction
|= x
<< 17;
13535 inst
.instruction
|= sizebits
<< 16;
13537 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13541 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
13542 struct neon_type_el et
= neon_check_type (2, rs
,
13543 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
13544 /* Duplicate ARM register to lanes of vector. */
13545 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
13548 case 8: inst
.instruction
|= 0x400000; break;
13549 case 16: inst
.instruction
|= 0x000020; break;
13550 case 32: inst
.instruction
|= 0x000000; break;
13553 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13554 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
13555 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
13556 inst
.instruction
|= neon_quad (rs
) << 21;
13557 /* The encoding for this instruction is identical for the ARM and Thumb
13558 variants, except for the condition field. */
13559 do_vfp_cond_or_thumb ();
13563 /* VMOV has particularly many variations. It can be one of:
13564 0. VMOV<c><q> <Qd>, <Qm>
13565 1. VMOV<c><q> <Dd>, <Dm>
13566 (Register operations, which are VORR with Rm = Rn.)
13567 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13568 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13570 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13571 (ARM register to scalar.)
13572 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13573 (Two ARM registers to vector.)
13574 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13575 (Scalar to ARM register.)
13576 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13577 (Vector to two ARM registers.)
13578 8. VMOV.F32 <Sd>, <Sm>
13579 9. VMOV.F64 <Dd>, <Dm>
13580 (VFP register moves.)
13581 10. VMOV.F32 <Sd>, #imm
13582 11. VMOV.F64 <Dd>, #imm
13583 (VFP float immediate load.)
13584 12. VMOV <Rd>, <Sm>
13585 (VFP single to ARM reg.)
13586 13. VMOV <Sd>, <Rm>
13587 (ARM reg to VFP single.)
13588 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13589 (Two ARM regs to two VFP singles.)
13590 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13591 (Two VFP singles to two ARM regs.)
13593 These cases can be disambiguated using neon_select_shape, except cases 1/9
13594 and 3/11 which depend on the operand type too.
13596 All the encoded bits are hardcoded by this function.
13598 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13599 Cases 5, 7 may be used with VFPv2 and above.
13601 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13602 can specify a type where it doesn't make sense to, and is ignored). */
13607 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
13608 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
13610 struct neon_type_el et
;
13611 const char *ldconst
= 0;
13615 case NS_DD
: /* case 1/9. */
13616 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
13617 /* It is not an error here if no type is given. */
13619 if (et
.type
== NT_float
&& et
.size
== 64)
13621 do_vfp_nsyn_opcode ("fcpyd");
13624 /* fall through. */
13626 case NS_QQ
: /* case 0/1. */
13628 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13630 /* The architecture manual I have doesn't explicitly state which
13631 value the U bit should have for register->register moves, but
13632 the equivalent VORR instruction has U = 0, so do that. */
13633 inst
.instruction
= 0x0200110;
13634 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13635 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13636 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13637 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13638 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13639 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13640 inst
.instruction
|= neon_quad (rs
) << 6;
13642 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13646 case NS_DI
: /* case 3/11. */
13647 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
13649 if (et
.type
== NT_float
&& et
.size
== 64)
13651 /* case 11 (fconstd). */
13652 ldconst
= "fconstd";
13653 goto encode_fconstd
;
13655 /* fall through. */
13657 case NS_QI
: /* case 2/3. */
13658 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13660 inst
.instruction
= 0x0800010;
13661 neon_move_immediate ();
13662 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13665 case NS_SR
: /* case 4. */
13667 unsigned bcdebits
= 0;
13668 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
13669 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
13670 int logsize
= neon_logbits (et
.size
);
13671 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
13672 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
13674 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
13676 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
13677 && et
.size
!= 32, _(BAD_FPU
));
13678 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
13679 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
13683 case 8: bcdebits
= 0x8; break;
13684 case 16: bcdebits
= 0x1; break;
13685 case 32: bcdebits
= 0x0; break;
13689 bcdebits
|= x
<< logsize
;
13691 inst
.instruction
= 0xe000b10;
13692 do_vfp_cond_or_thumb ();
13693 inst
.instruction
|= LOW4 (dn
) << 16;
13694 inst
.instruction
|= HI1 (dn
) << 7;
13695 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13696 inst
.instruction
|= (bcdebits
& 3) << 5;
13697 inst
.instruction
|= (bcdebits
>> 2) << 21;
13701 case NS_DRR
: /* case 5 (fmdrr). */
13702 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
13705 inst
.instruction
= 0xc400b10;
13706 do_vfp_cond_or_thumb ();
13707 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
13708 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
13709 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13710 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13713 case NS_RS
: /* case 6. */
13715 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
13716 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
13717 unsigned logsize
= neon_logbits (et
.size
);
13718 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
13719 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
13720 unsigned abcdebits
= 0;
13722 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
13724 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
13725 && et
.size
!= 32, _(BAD_FPU
));
13726 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
13727 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
13731 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
13732 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
13733 case 32: abcdebits
= 0x00; break;
13737 abcdebits
|= x
<< logsize
;
13738 inst
.instruction
= 0xe100b10;
13739 do_vfp_cond_or_thumb ();
13740 inst
.instruction
|= LOW4 (dn
) << 16;
13741 inst
.instruction
|= HI1 (dn
) << 7;
13742 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
13743 inst
.instruction
|= (abcdebits
& 3) << 5;
13744 inst
.instruction
|= (abcdebits
>> 2) << 21;
13748 case NS_RRD
: /* case 7 (fmrrd). */
13749 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
13752 inst
.instruction
= 0xc500b10;
13753 do_vfp_cond_or_thumb ();
13754 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
13755 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13756 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13757 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13760 case NS_FF
: /* case 8 (fcpys). */
13761 do_vfp_nsyn_opcode ("fcpys");
13764 case NS_FI
: /* case 10 (fconsts). */
13765 ldconst
= "fconsts";
13767 if (is_quarter_float (inst
.operands
[1].imm
))
13769 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
13770 do_vfp_nsyn_opcode (ldconst
);
13773 first_error (_("immediate out of range"));
13776 case NS_RF
: /* case 12 (fmrs). */
13777 do_vfp_nsyn_opcode ("fmrs");
13780 case NS_FR
: /* case 13 (fmsr). */
13781 do_vfp_nsyn_opcode ("fmsr");
13784 /* The encoders for the fmrrs and fmsrr instructions expect three operands
13785 (one of which is a list), but we have parsed four. Do some fiddling to
13786 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
13788 case NS_RRFF
: /* case 14 (fmrrs). */
13789 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
13790 _("VFP registers must be adjacent"));
13791 inst
.operands
[2].imm
= 2;
13792 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13793 do_vfp_nsyn_opcode ("fmrrs");
13796 case NS_FFRR
: /* case 15 (fmsrr). */
13797 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
13798 _("VFP registers must be adjacent"));
13799 inst
.operands
[1] = inst
.operands
[2];
13800 inst
.operands
[2] = inst
.operands
[3];
13801 inst
.operands
[0].imm
= 2;
13802 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
13803 do_vfp_nsyn_opcode ("fmsrr");
13812 do_neon_rshift_round_imm (void)
13814 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13815 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
13816 int imm
= inst
.operands
[2].imm
;
13818 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13821 inst
.operands
[2].present
= 0;
13826 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13827 _("immediate out of range for shift"));
13828 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
13833 do_neon_movl (void)
13835 struct neon_type_el et
= neon_check_type (2, NS_QD
,
13836 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13837 unsigned sizebits
= et
.size
>> 3;
13838 inst
.instruction
|= sizebits
<< 19;
13839 neon_two_same (0, et
.type
== NT_unsigned
, -1);
13845 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13846 struct neon_type_el et
= neon_check_type (2, rs
,
13847 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13848 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13849 neon_two_same (neon_quad (rs
), 1, et
.size
);
13853 do_neon_zip_uzp (void)
13855 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13856 struct neon_type_el et
= neon_check_type (2, rs
,
13857 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13858 if (rs
== NS_DD
&& et
.size
== 32)
13860 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13861 inst
.instruction
= N_MNEM_vtrn
;
13865 neon_two_same (neon_quad (rs
), 1, et
.size
);
13869 do_neon_sat_abs_neg (void)
13871 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13872 struct neon_type_el et
= neon_check_type (2, rs
,
13873 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13874 neon_two_same (neon_quad (rs
), 1, et
.size
);
13878 do_neon_pair_long (void)
13880 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13881 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
13882 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13883 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
13884 neon_two_same (neon_quad (rs
), 1, et
.size
);
13888 do_neon_recip_est (void)
13890 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13891 struct neon_type_el et
= neon_check_type (2, rs
,
13892 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
13893 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13894 neon_two_same (neon_quad (rs
), 1, et
.size
);
13900 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13901 struct neon_type_el et
= neon_check_type (2, rs
,
13902 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
13903 neon_two_same (neon_quad (rs
), 1, et
.size
);
13909 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13910 struct neon_type_el et
= neon_check_type (2, rs
,
13911 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
13912 neon_two_same (neon_quad (rs
), 1, et
.size
);
13918 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13919 struct neon_type_el et
= neon_check_type (2, rs
,
13920 N_EQK
| N_INT
, N_8
| N_KEY
);
13921 neon_two_same (neon_quad (rs
), 1, et
.size
);
13927 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13928 neon_two_same (neon_quad (rs
), 1, -1);
13932 do_neon_tbl_tbx (void)
13934 unsigned listlenbits
;
13935 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
13937 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
13939 first_error (_("bad list length for table lookup"));
13943 listlenbits
= inst
.operands
[1].imm
- 1;
13944 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13945 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13946 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13947 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13948 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13949 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13950 inst
.instruction
|= listlenbits
<< 8;
13952 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13956 do_neon_ldm_stm (void)
13958 /* P, U and L bits are part of bitmask. */
13959 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
13960 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
13962 if (inst
.operands
[1].issingle
)
13964 do_vfp_nsyn_ldm_stm (is_dbmode
);
13968 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
13969 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13971 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
13972 _("register list must contain at least 1 and at most 16 "
13975 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
13976 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
13977 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13978 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
13980 inst
.instruction
|= offsetbits
;
13982 do_vfp_cond_or_thumb ();
13986 do_neon_ldr_str (void)
13988 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
13990 if (inst
.operands
[0].issingle
)
13993 do_vfp_nsyn_opcode ("flds");
13995 do_vfp_nsyn_opcode ("fsts");
14000 do_vfp_nsyn_opcode ("fldd");
14002 do_vfp_nsyn_opcode ("fstd");
14006 /* "interleave" version also handles non-interleaving register VLD1/VST1
14010 do_neon_ld_st_interleave (void)
14012 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
14013 N_8
| N_16
| N_32
| N_64
);
14014 unsigned alignbits
= 0;
14016 /* The bits in this table go:
14017 0: register stride of one (0) or two (1)
14018 1,2: register list length, minus one (1, 2, 3, 4).
14019 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14020 We use -1 for invalid entries. */
14021 const int typetable
[] =
14023 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14024 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14025 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14026 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14030 if (et
.type
== NT_invtype
)
14033 if (inst
.operands
[1].immisalign
)
14034 switch (inst
.operands
[1].imm
>> 8)
14036 case 64: alignbits
= 1; break;
14038 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
14039 goto bad_alignment
;
14043 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
14044 goto bad_alignment
;
14049 first_error (_("bad alignment"));
14053 inst
.instruction
|= alignbits
<< 4;
14054 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14056 /* Bits [4:6] of the immediate in a list specifier encode register stride
14057 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14058 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14059 up the right value for "type" in a table based on this value and the given
14060 list style, then stick it back. */
14061 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
14062 | (((inst
.instruction
>> 8) & 3) << 3);
14064 typebits
= typetable
[idx
];
14066 constraint (typebits
== -1, _("bad list type for instruction"));
14068 inst
.instruction
&= ~0xf00;
14069 inst
.instruction
|= typebits
<< 8;
14072 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14073 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14074 otherwise. The variable arguments are a list of pairs of legal (size, align)
14075 values, terminated with -1. */
14078 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
14081 int result
= FAIL
, thissize
, thisalign
;
14083 if (!inst
.operands
[1].immisalign
)
14089 va_start (ap
, do_align
);
14093 thissize
= va_arg (ap
, int);
14094 if (thissize
== -1)
14096 thisalign
= va_arg (ap
, int);
14098 if (size
== thissize
&& align
== thisalign
)
14101 while (result
!= SUCCESS
);
14105 if (result
== SUCCESS
)
14108 first_error (_("unsupported alignment for instruction"));
14114 do_neon_ld_st_lane (void)
14116 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14117 int align_good
, do_align
= 0;
14118 int logsize
= neon_logbits (et
.size
);
14119 int align
= inst
.operands
[1].imm
>> 8;
14120 int n
= (inst
.instruction
>> 8) & 3;
14121 int max_el
= 64 / et
.size
;
14123 if (et
.type
== NT_invtype
)
14126 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
14127 _("bad list length"));
14128 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
14129 _("scalar index out of range"));
14130 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
14132 _("stride of 2 unavailable when element size is 8"));
14136 case 0: /* VLD1 / VST1. */
14137 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
14139 if (align_good
== FAIL
)
14143 unsigned alignbits
= 0;
14146 case 16: alignbits
= 0x1; break;
14147 case 32: alignbits
= 0x3; break;
14150 inst
.instruction
|= alignbits
<< 4;
14154 case 1: /* VLD2 / VST2. */
14155 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
14157 if (align_good
== FAIL
)
14160 inst
.instruction
|= 1 << 4;
14163 case 2: /* VLD3 / VST3. */
14164 constraint (inst
.operands
[1].immisalign
,
14165 _("can't use alignment with this instruction"));
14168 case 3: /* VLD4 / VST4. */
14169 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14170 16, 64, 32, 64, 32, 128, -1);
14171 if (align_good
== FAIL
)
14175 unsigned alignbits
= 0;
14178 case 8: alignbits
= 0x1; break;
14179 case 16: alignbits
= 0x1; break;
14180 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
14183 inst
.instruction
|= alignbits
<< 4;
14190 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14191 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14192 inst
.instruction
|= 1 << (4 + logsize
);
14194 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
14195 inst
.instruction
|= logsize
<< 10;
14198 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14201 do_neon_ld_dup (void)
14203 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14204 int align_good
, do_align
= 0;
14206 if (et
.type
== NT_invtype
)
14209 switch ((inst
.instruction
>> 8) & 3)
14211 case 0: /* VLD1. */
14212 assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
14213 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14214 &do_align
, 16, 16, 32, 32, -1);
14215 if (align_good
== FAIL
)
14217 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
14220 case 2: inst
.instruction
|= 1 << 5; break;
14221 default: first_error (_("bad list length")); return;
14223 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14226 case 1: /* VLD2. */
14227 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14228 &do_align
, 8, 16, 16, 32, 32, 64, -1);
14229 if (align_good
== FAIL
)
14231 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
14232 _("bad list length"));
14233 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14234 inst
.instruction
|= 1 << 5;
14235 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14238 case 2: /* VLD3. */
14239 constraint (inst
.operands
[1].immisalign
,
14240 _("can't use alignment with this instruction"));
14241 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
14242 _("bad list length"));
14243 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14244 inst
.instruction
|= 1 << 5;
14245 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14248 case 3: /* VLD4. */
14250 int align
= inst
.operands
[1].imm
>> 8;
14251 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14252 16, 64, 32, 64, 32, 128, -1);
14253 if (align_good
== FAIL
)
14255 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
14256 _("bad list length"));
14257 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14258 inst
.instruction
|= 1 << 5;
14259 if (et
.size
== 32 && align
== 128)
14260 inst
.instruction
|= 0x3 << 6;
14262 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14269 inst
.instruction
|= do_align
<< 4;
14272 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14273 apart from bits [11:4]. */
14276 do_neon_ldx_stx (void)
14278 switch (NEON_LANE (inst
.operands
[0].imm
))
14280 case NEON_INTERLEAVE_LANES
:
14281 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
14282 do_neon_ld_st_interleave ();
14285 case NEON_ALL_LANES
:
14286 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
14291 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
14292 do_neon_ld_st_lane ();
14295 /* L bit comes from bit mask. */
14296 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14297 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14298 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14300 if (inst
.operands
[1].postind
)
14302 int postreg
= inst
.operands
[1].imm
& 0xf;
14303 constraint (!inst
.operands
[1].immisreg
,
14304 _("post-index must be a register"));
14305 constraint (postreg
== 0xd || postreg
== 0xf,
14306 _("bad register for post-index"));
14307 inst
.instruction
|= postreg
;
14309 else if (inst
.operands
[1].writeback
)
14311 inst
.instruction
|= 0xd;
14314 inst
.instruction
|= 0xf;
14317 inst
.instruction
|= 0xf9000000;
14319 inst
.instruction
|= 0xf4000000;
14322 /* Overall per-instruction processing. */
14324 /* We need to be able to fix up arbitrary expressions in some statements.
14325 This is so that we can handle symbols that are an arbitrary distance from
14326 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14327 which returns part of an address in a form which will be valid for
14328 a data instruction. We do this by pushing the expression into a symbol
14329 in the expr_section, and creating a fix for that. */
14332 fix_new_arm (fragS
* frag
,
14347 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
14351 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
14356 /* Mark whether the fix is to a THUMB instruction, or an ARM
14358 new_fix
->tc_fix_data
= thumb_mode
;
14361 /* Create a frg for an instruction requiring relaxation. */
14363 output_relax_insn (void)
14369 /* The size of the instruction is unknown, so tie the debug info to the
14370 start of the instruction. */
14371 dwarf2_emit_insn (0);
14373 switch (inst
.reloc
.exp
.X_op
)
14376 sym
= inst
.reloc
.exp
.X_add_symbol
;
14377 offset
= inst
.reloc
.exp
.X_add_number
;
14381 offset
= inst
.reloc
.exp
.X_add_number
;
14384 sym
= make_expr_symbol (&inst
.reloc
.exp
);
14388 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
14389 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
14390 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
14393 /* Write a 32-bit thumb instruction to buf. */
14395 put_thumb32_insn (char * buf
, unsigned long insn
)
14397 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
14398 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
14402 output_inst (const char * str
)
14408 as_bad ("%s -- `%s'", inst
.error
, str
);
14413 output_relax_insn ();
14416 if (inst
.size
== 0)
14419 to
= frag_more (inst
.size
);
14420 /* PR 9814: Record the thumb mode into the current frag so that we know
14421 what type of NOP padding to use, if necessary. We override any previous
14422 setting so that if the mode has changed then the NOPS that we use will
14423 match the encoding of the last instruction in the frag. */
14424 frag_now
->tc_frag_data
= thumb_mode
| MODE_RECORDED
;
14426 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
14428 assert (inst
.size
== (2 * THUMB_SIZE
));
14429 put_thumb32_insn (to
, inst
.instruction
);
14431 else if (inst
.size
> INSN_SIZE
)
14433 assert (inst
.size
== (2 * INSN_SIZE
));
14434 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
14435 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
14438 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
14440 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
14441 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
14442 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
14445 dwarf2_emit_insn (inst
.size
);
14448 /* Tag values used in struct asm_opcode's tag field. */
14451 OT_unconditional
, /* Instruction cannot be conditionalized.
14452 The ARM condition field is still 0xE. */
14453 OT_unconditionalF
, /* Instruction cannot be conditionalized
14454 and carries 0xF in its ARM condition field. */
14455 OT_csuffix
, /* Instruction takes a conditional suffix. */
14456 OT_csuffixF
, /* Some forms of the instruction take a conditional
14457 suffix, others place 0xF where the condition field
14459 OT_cinfix3
, /* Instruction takes a conditional infix,
14460 beginning at character index 3. (In
14461 unified mode, it becomes a suffix.) */
14462 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
14463 tsts, cmps, cmns, and teqs. */
14464 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
14465 character index 3, even in unified mode. Used for
14466 legacy instructions where suffix and infix forms
14467 may be ambiguous. */
14468 OT_csuf_or_in3
, /* Instruction takes either a conditional
14469 suffix or an infix at character index 3. */
14470 OT_odd_infix_unc
, /* This is the unconditional variant of an
14471 instruction that takes a conditional infix
14472 at an unusual position. In unified mode,
14473 this variant will accept a suffix. */
14474 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
14475 are the conditional variants of instructions that
14476 take conditional infixes in unusual positions.
14477 The infix appears at character index
14478 (tag - OT_odd_infix_0). These are not accepted
14479 in unified mode. */
14482 /* Subroutine of md_assemble, responsible for looking up the primary
14483 opcode from the mnemonic the user wrote. STR points to the
14484 beginning of the mnemonic.
14486 This is not simply a hash table lookup, because of conditional
14487 variants. Most instructions have conditional variants, which are
14488 expressed with a _conditional affix_ to the mnemonic. If we were
14489 to encode each conditional variant as a literal string in the opcode
14490 table, it would have approximately 20,000 entries.
14492 Most mnemonics take this affix as a suffix, and in unified syntax,
14493 'most' is upgraded to 'all'. However, in the divided syntax, some
14494 instructions take the affix as an infix, notably the s-variants of
14495 the arithmetic instructions. Of those instructions, all but six
14496 have the infix appear after the third character of the mnemonic.
14498 Accordingly, the algorithm for looking up primary opcodes given
14501 1. Look up the identifier in the opcode table.
14502 If we find a match, go to step U.
14504 2. Look up the last two characters of the identifier in the
14505 conditions table. If we find a match, look up the first N-2
14506 characters of the identifier in the opcode table. If we
14507 find a match, go to step CE.
14509 3. Look up the fourth and fifth characters of the identifier in
14510 the conditions table. If we find a match, extract those
14511 characters from the identifier, and look up the remaining
14512 characters in the opcode table. If we find a match, go
14517 U. Examine the tag field of the opcode structure, in case this is
14518 one of the six instructions with its conditional infix in an
14519 unusual place. If it is, the tag tells us where to find the
14520 infix; look it up in the conditions table and set inst.cond
14521 accordingly. Otherwise, this is an unconditional instruction.
14522 Again set inst.cond accordingly. Return the opcode structure.
14524 CE. Examine the tag field to make sure this is an instruction that
14525 should receive a conditional suffix. If it is not, fail.
14526 Otherwise, set inst.cond from the suffix we already looked up,
14527 and return the opcode structure.
14529 CM. Examine the tag field to make sure this is an instruction that
14530 should receive a conditional infix after the third character.
14531 If it is not, fail. Otherwise, undo the edits to the current
14532 line of input and proceed as for case CE. */
14534 static const struct asm_opcode
*
14535 opcode_lookup (char **str
)
14539 const struct asm_opcode
*opcode
;
14540 const struct asm_cond
*cond
;
14542 bfd_boolean neon_supported
;
14544 neon_supported
= ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
);
14546 /* Scan up to the end of the mnemonic, which must end in white space,
14547 '.' (in unified mode, or for Neon instructions), or end of string. */
14548 for (base
= end
= *str
; *end
!= '\0'; end
++)
14549 if (*end
== ' ' || ((unified_syntax
|| neon_supported
) && *end
== '.'))
14555 /* Handle a possible width suffix and/or Neon type suffix. */
14560 /* The .w and .n suffixes are only valid if the unified syntax is in
14562 if (unified_syntax
&& end
[1] == 'w')
14564 else if (unified_syntax
&& end
[1] == 'n')
14569 inst
.vectype
.elems
= 0;
14571 *str
= end
+ offset
;
14573 if (end
[offset
] == '.')
14575 /* See if we have a Neon type suffix (possible in either unified or
14576 non-unified ARM syntax mode). */
14577 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
14580 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
14586 /* Look for unaffixed or special-case affixed mnemonic. */
14587 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
14591 if (opcode
->tag
< OT_odd_infix_0
)
14593 inst
.cond
= COND_ALWAYS
;
14597 if (warn_on_deprecated
&& unified_syntax
)
14598 as_warn (_("conditional infixes are deprecated in unified syntax"));
14599 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
14600 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14603 inst
.cond
= cond
->value
;
14607 /* Cannot have a conditional suffix on a mnemonic of less than two
14609 if (end
- base
< 3)
14612 /* Look for suffixed mnemonic. */
14614 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14615 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
14616 if (opcode
&& cond
)
14619 switch (opcode
->tag
)
14621 case OT_cinfix3_legacy
:
14622 /* Ignore conditional suffixes matched on infix only mnemonics. */
14626 case OT_cinfix3_deprecated
:
14627 case OT_odd_infix_unc
:
14628 if (!unified_syntax
)
14630 /* else fall through */
14634 case OT_csuf_or_in3
:
14635 inst
.cond
= cond
->value
;
14638 case OT_unconditional
:
14639 case OT_unconditionalF
:
14642 inst
.cond
= cond
->value
;
14646 /* delayed diagnostic */
14647 inst
.error
= BAD_COND
;
14648 inst
.cond
= COND_ALWAYS
;
14657 /* Cannot have a usual-position infix on a mnemonic of less than
14658 six characters (five would be a suffix). */
14659 if (end
- base
< 6)
14662 /* Look for infixed mnemonic in the usual position. */
14664 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
14668 memcpy (save
, affix
, 2);
14669 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
14670 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
14671 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
14672 memcpy (affix
, save
, 2);
14675 && (opcode
->tag
== OT_cinfix3
14676 || opcode
->tag
== OT_cinfix3_deprecated
14677 || opcode
->tag
== OT_csuf_or_in3
14678 || opcode
->tag
== OT_cinfix3_legacy
))
14681 if (warn_on_deprecated
&& unified_syntax
14682 && (opcode
->tag
== OT_cinfix3
14683 || opcode
->tag
== OT_cinfix3_deprecated
))
14684 as_warn (_("conditional infixes are deprecated in unified syntax"));
14686 inst
.cond
= cond
->value
;
14694 md_assemble (char *str
)
14697 const struct asm_opcode
* opcode
;
14699 /* Align the previous label if needed. */
14700 if (last_label_seen
!= NULL
)
14702 symbol_set_frag (last_label_seen
, frag_now
);
14703 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
14704 S_SET_SEGMENT (last_label_seen
, now_seg
);
14707 memset (&inst
, '\0', sizeof (inst
));
14708 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
14710 opcode
= opcode_lookup (&p
);
14713 /* It wasn't an instruction, but it might be a register alias of
14714 the form alias .req reg, or a Neon .dn/.qn directive. */
14715 if (!create_register_alias (str
, p
)
14716 && !create_neon_reg_alias (str
, p
))
14717 as_bad (_("bad instruction `%s'"), str
);
14722 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
14723 as_warn (_("s suffix on comparison instruction is deprecated"));
14725 /* The value which unconditional instructions should have in place of the
14726 condition field. */
14727 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
14731 arm_feature_set variant
;
14733 variant
= cpu_variant
;
14734 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
14735 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
14736 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
14737 /* Check that this instruction is supported for this CPU. */
14738 if (!opcode
->tvariant
14739 || (thumb_mode
== 1
14740 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
14742 as_bad (_("selected processor does not support `%s'"), str
);
14745 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
14746 && opcode
->tencode
!= do_t_branch
)
14748 as_bad (_("Thumb does not support conditional execution"));
14752 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
) && !inst
.size_req
)
14754 /* Implicit require narrow instructions on Thumb-1. This avoids
14755 relaxation accidentally introducing Thumb-2 instructions. */
14756 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
14757 && !(ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
)
14758 || ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_barrier
)))
14762 /* Check conditional suffixes. */
14763 if (current_it_mask
)
14766 cond
= current_cc
^ ((current_it_mask
>> 4) & 1) ^ 1;
14767 current_it_mask
<<= 1;
14768 current_it_mask
&= 0x1f;
14769 /* The BKPT instruction is unconditional even in an IT block. */
14771 && cond
!= inst
.cond
&& opcode
->tencode
!= do_t_bkpt
)
14773 as_bad (_("incorrect condition in IT block"));
14777 else if (inst
.cond
!= COND_ALWAYS
&& opcode
->tencode
!= do_t_branch
)
14779 as_bad (_("thumb conditional instruction not in IT block"));
14783 mapping_state (MAP_THUMB
);
14784 inst
.instruction
= opcode
->tvalue
;
14786 if (!parse_operands (p
, opcode
->operands
))
14787 opcode
->tencode ();
14789 /* Clear current_it_mask at the end of an IT block. */
14790 if (current_it_mask
== 0x10)
14791 current_it_mask
= 0;
14793 if (!(inst
.error
|| inst
.relax
))
14795 assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
14796 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
14797 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
14799 as_bad (_("cannot honor width suffix -- `%s'"), str
);
14804 /* Something has gone badly wrong if we try to relax a fixed size
14806 assert (inst
.size_req
== 0 || !inst
.relax
);
14808 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14809 *opcode
->tvariant
);
14810 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
14811 set those bits when Thumb-2 32-bit instructions are seen. ie.
14812 anything other than bl/blx and v6-M instructions.
14813 This is overly pessimistic for relaxable instructions. */
14814 if (((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
14816 && !(ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
)
14817 || ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_barrier
)))
14818 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
14821 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
14825 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
14826 is_bx
= (opcode
->aencode
== do_bx
);
14828 /* Check that this instruction is supported for this CPU. */
14829 if (!(is_bx
&& fix_v4bx
)
14830 && !(opcode
->avariant
&&
14831 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
14833 as_bad (_("selected processor does not support `%s'"), str
);
14838 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
14842 mapping_state (MAP_ARM
);
14843 inst
.instruction
= opcode
->avalue
;
14844 if (opcode
->tag
== OT_unconditionalF
)
14845 inst
.instruction
|= 0xF << 28;
14847 inst
.instruction
|= inst
.cond
<< 28;
14848 inst
.size
= INSN_SIZE
;
14849 if (!parse_operands (p
, opcode
->operands
))
14850 opcode
->aencode ();
14851 /* Arm mode bx is marked as both v4T and v5 because it's still required
14852 on a hypothetical non-thumb v5 core. */
14854 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
14856 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
14857 *opcode
->avariant
);
14861 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14868 /* Various frobbings of labels and their addresses. */
14871 arm_start_line_hook (void)
14873 last_label_seen
= NULL
;
14877 arm_frob_label (symbolS
* sym
)
14879 last_label_seen
= sym
;
14881 ARM_SET_THUMB (sym
, thumb_mode
);
14883 #if defined OBJ_COFF || defined OBJ_ELF
14884 ARM_SET_INTERWORK (sym
, support_interwork
);
14887 /* Note - do not allow local symbols (.Lxxx) to be labelled
14888 as Thumb functions. This is because these labels, whilst
14889 they exist inside Thumb code, are not the entry points for
14890 possible ARM->Thumb calls. Also, these labels can be used
14891 as part of a computed goto or switch statement. eg gcc
14892 can generate code that looks like this:
14894 ldr r2, [pc, .Laaa]
14904 The first instruction loads the address of the jump table.
14905 The second instruction converts a table index into a byte offset.
14906 The third instruction gets the jump address out of the table.
14907 The fourth instruction performs the jump.
14909 If the address stored at .Laaa is that of a symbol which has the
14910 Thumb_Func bit set, then the linker will arrange for this address
14911 to have the bottom bit set, which in turn would mean that the
14912 address computation performed by the third instruction would end
14913 up with the bottom bit set. Since the ARM is capable of unaligned
14914 word loads, the instruction would then load the incorrect address
14915 out of the jump table, and chaos would ensue. */
14916 if (label_is_thumb_function_name
14917 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
14918 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
14920 /* When the address of a Thumb function is taken the bottom
14921 bit of that address should be set. This will allow
14922 interworking between Arm and Thumb functions to work
14925 THUMB_SET_FUNC (sym
, 1);
14927 label_is_thumb_function_name
= FALSE
;
14930 dwarf2_emit_label (sym
);
14934 arm_data_in_code (void)
14936 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
14938 *input_line_pointer
= '/';
14939 input_line_pointer
+= 5;
14940 *input_line_pointer
= 0;
14948 arm_canonicalize_symbol_name (char * name
)
14952 if (thumb_mode
&& (len
= strlen (name
)) > 5
14953 && streq (name
+ len
- 5, "/data"))
14954 *(name
+ len
- 5) = 0;
14959 /* Table of all register names defined by default. The user can
14960 define additional names with .req. Note that all register names
14961 should appear in both upper and lowercase variants. Some registers
14962 also have mixed-case names. */
14964 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14965 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14966 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14967 #define REGSET(p,t) \
14968 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14969 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14970 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14971 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14972 #define REGSETH(p,t) \
14973 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14974 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14975 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14976 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14977 #define REGSET2(p,t) \
14978 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14979 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14980 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14981 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14983 static const struct reg_entry reg_names
[] =
14985 /* ARM integer registers. */
14986 REGSET(r
, RN
), REGSET(R
, RN
),
14988 /* ATPCS synonyms. */
14989 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
14990 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
14991 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
14993 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
14994 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
14995 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
14997 /* Well-known aliases. */
14998 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
14999 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
15001 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
15002 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
15004 /* Coprocessor numbers. */
15005 REGSET(p
, CP
), REGSET(P
, CP
),
15007 /* Coprocessor register numbers. The "cr" variants are for backward
15009 REGSET(c
, CN
), REGSET(C
, CN
),
15010 REGSET(cr
, CN
), REGSET(CR
, CN
),
15012 /* FPA registers. */
15013 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
15014 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
15016 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
15017 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
15019 /* VFP SP registers. */
15020 REGSET(s
,VFS
), REGSET(S
,VFS
),
15021 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
15023 /* VFP DP Registers. */
15024 REGSET(d
,VFD
), REGSET(D
,VFD
),
15025 /* Extra Neon DP registers. */
15026 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
15028 /* Neon QP registers. */
15029 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
15031 /* VFP control registers. */
15032 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
15033 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
15034 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
15035 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
15036 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
15037 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
15039 /* Maverick DSP coprocessor registers. */
15040 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
15041 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
15043 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
15044 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
15045 REGDEF(dspsc
,0,DSPSC
),
15047 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
15048 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
15049 REGDEF(DSPSC
,0,DSPSC
),
15051 /* iWMMXt data registers - p0, c0-15. */
15052 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
15054 /* iWMMXt control registers - p1, c0-3. */
15055 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
15056 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
15057 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
15058 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
15060 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
15061 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
15062 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
15063 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
15064 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
15066 /* XScale accumulator registers. */
15067 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
15073 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
15074 within psr_required_here. */
15075 static const struct asm_psr psrs
[] =
15077 /* Backward compatibility notation. Note that "all" is no longer
15078 truly all possible PSR bits. */
15079 {"all", PSR_c
| PSR_f
},
15083 /* Individual flags. */
15088 /* Combinations of flags. */
15089 {"fs", PSR_f
| PSR_s
},
15090 {"fx", PSR_f
| PSR_x
},
15091 {"fc", PSR_f
| PSR_c
},
15092 {"sf", PSR_s
| PSR_f
},
15093 {"sx", PSR_s
| PSR_x
},
15094 {"sc", PSR_s
| PSR_c
},
15095 {"xf", PSR_x
| PSR_f
},
15096 {"xs", PSR_x
| PSR_s
},
15097 {"xc", PSR_x
| PSR_c
},
15098 {"cf", PSR_c
| PSR_f
},
15099 {"cs", PSR_c
| PSR_s
},
15100 {"cx", PSR_c
| PSR_x
},
15101 {"fsx", PSR_f
| PSR_s
| PSR_x
},
15102 {"fsc", PSR_f
| PSR_s
| PSR_c
},
15103 {"fxs", PSR_f
| PSR_x
| PSR_s
},
15104 {"fxc", PSR_f
| PSR_x
| PSR_c
},
15105 {"fcs", PSR_f
| PSR_c
| PSR_s
},
15106 {"fcx", PSR_f
| PSR_c
| PSR_x
},
15107 {"sfx", PSR_s
| PSR_f
| PSR_x
},
15108 {"sfc", PSR_s
| PSR_f
| PSR_c
},
15109 {"sxf", PSR_s
| PSR_x
| PSR_f
},
15110 {"sxc", PSR_s
| PSR_x
| PSR_c
},
15111 {"scf", PSR_s
| PSR_c
| PSR_f
},
15112 {"scx", PSR_s
| PSR_c
| PSR_x
},
15113 {"xfs", PSR_x
| PSR_f
| PSR_s
},
15114 {"xfc", PSR_x
| PSR_f
| PSR_c
},
15115 {"xsf", PSR_x
| PSR_s
| PSR_f
},
15116 {"xsc", PSR_x
| PSR_s
| PSR_c
},
15117 {"xcf", PSR_x
| PSR_c
| PSR_f
},
15118 {"xcs", PSR_x
| PSR_c
| PSR_s
},
15119 {"cfs", PSR_c
| PSR_f
| PSR_s
},
15120 {"cfx", PSR_c
| PSR_f
| PSR_x
},
15121 {"csf", PSR_c
| PSR_s
| PSR_f
},
15122 {"csx", PSR_c
| PSR_s
| PSR_x
},
15123 {"cxf", PSR_c
| PSR_x
| PSR_f
},
15124 {"cxs", PSR_c
| PSR_x
| PSR_s
},
15125 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
15126 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
15127 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
15128 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
15129 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
15130 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
15131 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
15132 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
15133 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
15134 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
15135 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
15136 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
15137 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
15138 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
15139 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
15140 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
15141 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
15142 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
15143 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
15144 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
15145 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
15146 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
15147 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
15148 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
15151 /* Table of V7M psr names. */
15152 static const struct asm_psr v7m_psrs
[] =
15154 {"apsr", 0 }, {"APSR", 0 },
15155 {"iapsr", 1 }, {"IAPSR", 1 },
15156 {"eapsr", 2 }, {"EAPSR", 2 },
15157 {"psr", 3 }, {"PSR", 3 },
15158 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
15159 {"ipsr", 5 }, {"IPSR", 5 },
15160 {"epsr", 6 }, {"EPSR", 6 },
15161 {"iepsr", 7 }, {"IEPSR", 7 },
15162 {"msp", 8 }, {"MSP", 8 },
15163 {"psp", 9 }, {"PSP", 9 },
15164 {"primask", 16}, {"PRIMASK", 16},
15165 {"basepri", 17}, {"BASEPRI", 17},
15166 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
15167 {"faultmask", 19}, {"FAULTMASK", 19},
15168 {"control", 20}, {"CONTROL", 20}
15171 /* Table of all shift-in-operand names. */
15172 static const struct asm_shift_name shift_names
[] =
15174 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
15175 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
15176 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
15177 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
15178 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
15179 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
15182 /* Table of all explicit relocation names. */
15184 static struct reloc_entry reloc_names
[] =
15186 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
15187 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
15188 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
15189 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
15190 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
15191 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
15192 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
15193 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
15194 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
15195 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
15196 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
15200 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
15201 static const struct asm_cond conds
[] =
15205 {"cs", 0x2}, {"hs", 0x2},
15206 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
15220 static struct asm_barrier_opt barrier_opt_names
[] =
15228 /* Table of ARM-format instructions. */
15230 /* Macros for gluing together operand strings. N.B. In all cases
15231 other than OPS0, the trailing OP_stop comes from default
15232 zero-initialization of the unspecified elements of the array. */
15233 #define OPS0() { OP_stop, }
15234 #define OPS1(a) { OP_##a, }
15235 #define OPS2(a,b) { OP_##a,OP_##b, }
15236 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
15237 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
15238 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
15239 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
15241 /* These macros abstract out the exact format of the mnemonic table and
15242 save some repeated characters. */
15244 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
15245 #define TxCE(mnem, op, top, nops, ops, ae, te) \
15246 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
15247 THUMB_VARIANT, do_##ae, do_##te }
15249 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
15250 a T_MNEM_xyz enumerator. */
15251 #define TCE(mnem, aop, top, nops, ops, ae, te) \
15252 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
15253 #define tCE(mnem, aop, top, nops, ops, ae, te) \
15254 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15256 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
15257 infix after the third character. */
15258 #define TxC3(mnem, op, top, nops, ops, ae, te) \
15259 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
15260 THUMB_VARIANT, do_##ae, do_##te }
15261 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
15262 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
15263 THUMB_VARIANT, do_##ae, do_##te }
15264 #define TC3(mnem, aop, top, nops, ops, ae, te) \
15265 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
15266 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
15267 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
15268 #define tC3(mnem, aop, top, nops, ops, ae, te) \
15269 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15270 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
15271 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15273 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
15274 appear in the condition table. */
15275 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
15276 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
15277 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
15279 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
15280 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
15281 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
15282 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
15283 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
15284 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
15285 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
15286 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
15287 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
15288 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
15289 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
15290 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
15291 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
15292 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
15293 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
15294 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
15295 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
15296 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
15297 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
15298 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
15300 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
15301 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
15302 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
15303 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
15305 /* Mnemonic that cannot be conditionalized. The ARM condition-code
15306 field is still 0xE. Many of the Thumb variants can be executed
15307 conditionally, so this is checked separately. */
15308 #define TUE(mnem, op, top, nops, ops, ae, te) \
15309 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
15310 THUMB_VARIANT, do_##ae, do_##te }
15312 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
15313 condition code field. */
15314 #define TUF(mnem, op, top, nops, ops, ae, te) \
15315 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
15316 THUMB_VARIANT, do_##ae, do_##te }
15318 /* ARM-only variants of all the above. */
15319 #define CE(mnem, op, nops, ops, ae) \
15320 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15322 #define C3(mnem, op, nops, ops, ae) \
15323 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15325 /* Legacy mnemonics that always have conditional infix after the third
15327 #define CL(mnem, op, nops, ops, ae) \
15328 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
15329 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15331 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
15332 #define cCE(mnem, op, nops, ops, ae) \
15333 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15335 /* Legacy coprocessor instructions where conditional infix and conditional
15336 suffix are ambiguous. For consistency this includes all FPA instructions,
15337 not just the potentially ambiguous ones. */
15338 #define cCL(mnem, op, nops, ops, ae) \
15339 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
15340 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15342 /* Coprocessor, takes either a suffix or a position-3 infix
15343 (for an FPA corner case). */
15344 #define C3E(mnem, op, nops, ops, ae) \
15345 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
15346 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
15348 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
15349 { #m1 #m2 #m3, OPS##nops ops, \
15350 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
15351 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
15353 #define CM(m1, m2, op, nops, ops, ae) \
15354 xCM_(m1, , m2, op, nops, ops, ae), \
15355 xCM_(m1, eq, m2, op, nops, ops, ae), \
15356 xCM_(m1, ne, m2, op, nops, ops, ae), \
15357 xCM_(m1, cs, m2, op, nops, ops, ae), \
15358 xCM_(m1, hs, m2, op, nops, ops, ae), \
15359 xCM_(m1, cc, m2, op, nops, ops, ae), \
15360 xCM_(m1, ul, m2, op, nops, ops, ae), \
15361 xCM_(m1, lo, m2, op, nops, ops, ae), \
15362 xCM_(m1, mi, m2, op, nops, ops, ae), \
15363 xCM_(m1, pl, m2, op, nops, ops, ae), \
15364 xCM_(m1, vs, m2, op, nops, ops, ae), \
15365 xCM_(m1, vc, m2, op, nops, ops, ae), \
15366 xCM_(m1, hi, m2, op, nops, ops, ae), \
15367 xCM_(m1, ls, m2, op, nops, ops, ae), \
15368 xCM_(m1, ge, m2, op, nops, ops, ae), \
15369 xCM_(m1, lt, m2, op, nops, ops, ae), \
15370 xCM_(m1, gt, m2, op, nops, ops, ae), \
15371 xCM_(m1, le, m2, op, nops, ops, ae), \
15372 xCM_(m1, al, m2, op, nops, ops, ae)
15374 #define UE(mnem, op, nops, ops, ae) \
15375 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
15377 #define UF(mnem, op, nops, ops, ae) \
15378 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
15380 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
15381 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
15382 use the same encoding function for each. */
15383 #define NUF(mnem, op, nops, ops, enc) \
15384 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
15385 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15387 /* Neon data processing, version which indirects through neon_enc_tab for
15388 the various overloaded versions of opcodes. */
15389 #define nUF(mnem, op, nops, ops, enc) \
15390 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
15391 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15393 /* Neon insn with conditional suffix for the ARM version, non-overloaded
15395 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
15396 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
15397 THUMB_VARIANT, do_##enc, do_##enc }
15399 #define NCE(mnem, op, nops, ops, enc) \
15400 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
15402 #define NCEF(mnem, op, nops, ops, enc) \
15403 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
15405 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
15406 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
15407 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
15408 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
15410 #define nCE(mnem, op, nops, ops, enc) \
15411 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
15413 #define nCEF(mnem, op, nops, ops, enc) \
15414 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
15418 /* Thumb-only, unconditional. */
15419 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
15421 static const struct asm_opcode insns
[] =
15423 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
15424 #define THUMB_VARIANT &arm_ext_v4t
15425 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
15426 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
15427 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
15428 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
15429 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
15430 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
15431 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
15432 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
15433 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
15434 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
15435 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
15436 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
15437 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
15438 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
15439 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
15440 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
15442 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
15443 for setting PSR flag bits. They are obsolete in V6 and do not
15444 have Thumb equivalents. */
15445 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
15446 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
15447 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
15448 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
15449 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
15450 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
15451 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
15452 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
15453 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
15455 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
15456 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
15457 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
15458 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
15460 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
15461 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
15462 tCE(str
, 4000000, str
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
15463 tC3(strb
, 4400000, strb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
15465 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15466 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15467 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15468 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15469 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15470 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15472 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
15473 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
15474 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
15475 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
15478 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
15479 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
15480 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
15482 /* Thumb-compatibility pseudo ops. */
15483 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
15484 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
15485 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
15486 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
15487 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
15488 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
15489 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
15490 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
15491 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
15492 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
15493 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
15494 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
15496 /* These may simplify to neg. */
15497 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
15498 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
15500 #undef THUMB_VARIANT
15501 #define THUMB_VARIANT &arm_ext_v6
15502 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
15504 /* V1 instructions with no Thumb analogue prior to V6T2. */
15505 #undef THUMB_VARIANT
15506 #define THUMB_VARIANT &arm_ext_v6t2
15507 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
15508 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
15509 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
15511 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
15512 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
15513 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
15514 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
15516 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15517 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15519 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15520 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
15522 /* V1 instructions with no Thumb analogue at all. */
15523 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
15524 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
15526 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
15527 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
15528 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
15529 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
15530 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
15531 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
15532 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
15533 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
15536 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
15537 #undef THUMB_VARIANT
15538 #define THUMB_VARIANT &arm_ext_v4t
15539 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
15540 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
15542 #undef THUMB_VARIANT
15543 #define THUMB_VARIANT &arm_ext_v6t2
15544 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
15545 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
15547 /* Generic coprocessor instructions. */
15548 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
15549 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15550 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15551 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15552 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15553 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15554 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15557 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
15558 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
15559 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
15562 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
15563 #undef THUMB_VARIANT
15564 #define THUMB_VARIANT &arm_ext_msr
15565 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
15566 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
15569 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
15570 #undef THUMB_VARIANT
15571 #define THUMB_VARIANT &arm_ext_v6t2
15572 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15573 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15574 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15575 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15576 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15577 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15578 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
15579 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
15582 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
15583 #undef THUMB_VARIANT
15584 #define THUMB_VARIANT &arm_ext_v4t
15585 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15586 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15587 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15588 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15589 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15590 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
15593 #define ARM_VARIANT &arm_ext_v4t_5
15594 /* ARM Architecture 4T. */
15595 /* Note: bx (and blx) are required on V5, even if the processor does
15596 not support Thumb. */
15597 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
15600 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
15601 #undef THUMB_VARIANT
15602 #define THUMB_VARIANT &arm_ext_v5t
15603 /* Note: blx has 2 variants; the .value coded here is for
15604 BLX(2). Only this variant has conditional execution. */
15605 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
15606 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
15608 #undef THUMB_VARIANT
15609 #define THUMB_VARIANT &arm_ext_v6t2
15610 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
15611 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15612 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15613 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15614 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
15615 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
15616 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15617 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
15620 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
15621 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15622 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15623 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15624 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15626 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15627 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
15629 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15630 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15631 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15632 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
15634 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15635 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15636 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15637 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15639 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15640 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15642 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
15643 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
15644 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
15645 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
15648 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
15649 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
15650 TC3(ldrd
, 00000d0
, e8500000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
15651 TC3(strd
, 00000f0
, e8400000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
15653 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15654 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15657 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
15658 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
15661 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
15662 #undef THUMB_VARIANT
15663 #define THUMB_VARIANT &arm_ext_v6
15664 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
15665 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
15666 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15667 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15668 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
15669 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15670 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15671 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15672 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15673 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
15675 #undef THUMB_VARIANT
15676 #define THUMB_VARIANT &arm_ext_v6t2
15677 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
15678 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
15679 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15680 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
15682 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
15683 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
15685 /* ARM V6 not included in V7M (eg. integer SIMD). */
15686 #undef THUMB_VARIANT
15687 #define THUMB_VARIANT &arm_ext_v6_notm
15688 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
15689 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
15690 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
15691 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15692 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15693 TCE(qasx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15694 /* Old name for QASX. */
15695 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15696 TCE(qsax
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15697 /* Old name for QSAX. */
15698 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15699 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15700 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15701 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15702 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15703 TCE(sasx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15704 /* Old name for SASX. */
15705 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15706 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15707 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15708 TCE(shasx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15709 /* Old name for SHASX. */
15710 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15711 TCE(shsax
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15712 /* Old name for SHSAX. */
15713 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15714 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15715 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15716 TCE(ssax
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15717 /* Old name for SSAX. */
15718 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15719 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15720 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15721 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15722 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15723 TCE(uasx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15724 /* Old name for UASX. */
15725 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15726 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15727 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15728 TCE(uhasx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15729 /* Old name for UHASX. */
15730 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15731 TCE(uhsax
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15732 /* Old name for UHSAX. */
15733 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15734 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15735 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15736 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15737 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15738 TCE(uqasx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15739 /* Old name for UQASX. */
15740 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15741 TCE(uqsax
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15742 /* Old name for UQSAX. */
15743 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15744 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15745 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15746 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15747 TCE(usax
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15748 /* Old name for USAX. */
15749 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15750 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15751 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
15752 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
15753 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
15754 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
15755 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
15756 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
15757 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
15758 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
15759 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15760 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15761 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15762 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15763 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15764 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15765 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
15766 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
15767 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
15768 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15769 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15770 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15771 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15772 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15773 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15774 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15775 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
15776 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15777 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15778 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15779 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15780 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15781 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15782 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15783 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15784 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15785 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15786 TUF(srsia
, 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
15787 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
15788 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
15789 TUF(srsdb
, 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
15790 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
15791 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
15792 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
15793 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
15794 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
15797 #define ARM_VARIANT &arm_ext_v6k
15798 #undef THUMB_VARIANT
15799 #define THUMB_VARIANT &arm_ext_v6k
15800 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
15801 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
15802 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
15803 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
15805 #undef THUMB_VARIANT
15806 #define THUMB_VARIANT &arm_ext_v6_notm
15807 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
15808 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
15810 #undef THUMB_VARIANT
15811 #define THUMB_VARIANT &arm_ext_v6t2
15812 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
15813 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
15814 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
15815 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
15816 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
15819 #define ARM_VARIANT &arm_ext_v6z
15820 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
15823 #define ARM_VARIANT &arm_ext_v6t2
15824 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
15825 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
15826 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
15827 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
15829 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
15830 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
15831 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
15832 TCE(rbit
, 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
15834 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15835 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15836 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15837 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
15839 UT(cbnz
, b900
, 2, (RR
, EXP
), t_cbz
),
15840 UT(cbz
, b100
, 2, (RR
, EXP
), t_cbz
),
15841 /* ARM does not really have an IT instruction, so always allow it. */
15843 #define ARM_VARIANT &arm_ext_v1
15844 TUE(it
, 0, bf08
, 1, (COND
), it
, t_it
),
15845 TUE(itt
, 0, bf0c
, 1, (COND
), it
, t_it
),
15846 TUE(ite
, 0, bf04
, 1, (COND
), it
, t_it
),
15847 TUE(ittt
, 0, bf0e
, 1, (COND
), it
, t_it
),
15848 TUE(itet
, 0, bf06
, 1, (COND
), it
, t_it
),
15849 TUE(itte
, 0, bf0a
, 1, (COND
), it
, t_it
),
15850 TUE(itee
, 0, bf02
, 1, (COND
), it
, t_it
),
15851 TUE(itttt
, 0, bf0f
, 1, (COND
), it
, t_it
),
15852 TUE(itett
, 0, bf07
, 1, (COND
), it
, t_it
),
15853 TUE(ittet
, 0, bf0b
, 1, (COND
), it
, t_it
),
15854 TUE(iteet
, 0, bf03
, 1, (COND
), it
, t_it
),
15855 TUE(ittte
, 0, bf0d
, 1, (COND
), it
, t_it
),
15856 TUE(itete
, 0, bf05
, 1, (COND
), it
, t_it
),
15857 TUE(ittee
, 0, bf09
, 1, (COND
), it
, t_it
),
15858 TUE(iteee
, 0, bf01
, 1, (COND
), it
, t_it
),
15859 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
15860 TC3(rrx
, 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
15861 TC3(rrxs
, 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
15863 /* Thumb2 only instructions. */
15865 #define ARM_VARIANT NULL
15867 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15868 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
15869 TCE(orn
, 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
15870 TCE(orns
, 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
15871 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
15872 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
15874 /* Thumb-2 hardware division instructions (R and M profiles only). */
15875 #undef THUMB_VARIANT
15876 #define THUMB_VARIANT &arm_ext_div
15877 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15878 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
15880 /* ARM V6M/V7 instructions. */
15882 #define ARM_VARIANT &arm_ext_barrier
15883 #undef THUMB_VARIANT
15884 #define THUMB_VARIANT &arm_ext_barrier
15885 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
15886 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
15887 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
15889 /* ARM V7 instructions. */
15891 #define ARM_VARIANT &arm_ext_v7
15892 #undef THUMB_VARIANT
15893 #define THUMB_VARIANT &arm_ext_v7
15894 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
15895 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
15898 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15899 cCE(wfs
, e200110
, 1, (RR
), rd
),
15900 cCE(rfs
, e300110
, 1, (RR
), rd
),
15901 cCE(wfc
, e400110
, 1, (RR
), rd
),
15902 cCE(rfc
, e500110
, 1, (RR
), rd
),
15904 cCL(ldfs
, c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15905 cCL(ldfd
, c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15906 cCL(ldfe
, c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15907 cCL(ldfp
, c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15909 cCL(stfs
, c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15910 cCL(stfd
, c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15911 cCL(stfe
, c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15912 cCL(stfp
, c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
15914 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
15915 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
15916 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
15917 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
15918 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
15919 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
15920 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
15921 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
15922 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
15923 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
15924 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
15925 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
15927 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
15928 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
15929 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
15930 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
15931 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
15932 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
15933 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
15934 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
15935 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
15936 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
15937 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
15938 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
15940 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
15941 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
15942 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
15943 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
15944 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
15945 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
15946 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
15947 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
15948 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
15949 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
15950 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
15951 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
15953 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
15954 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
15955 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
15956 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
15957 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
15958 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
15959 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
15960 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
15961 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
15962 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
15963 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
15964 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
15966 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
15967 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
15968 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
15969 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
15970 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
15971 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
15972 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
15973 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
15974 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
15975 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
15976 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
15977 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
15979 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
15980 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
15981 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
15982 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
15983 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
15984 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
15985 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
15986 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
15987 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
15988 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
15989 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
15990 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
15992 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
15993 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
15994 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
15995 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
15996 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
15997 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
15998 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
15999 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
16000 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
16001 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
16002 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
16003 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
16005 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
16006 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
16007 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
16008 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
16009 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
16010 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
16011 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
16012 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
16013 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
16014 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
16015 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
16016 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
16018 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
16019 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
16020 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
16021 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
16022 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
16023 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
16024 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
16025 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
16026 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
16027 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
16028 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
16029 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
16031 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
16032 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
16033 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
16034 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
16035 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
16036 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
16037 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
16038 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
16039 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
16040 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
16041 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
16042 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
16044 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
16045 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
16046 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
16047 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
16048 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
16049 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
16050 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
16051 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
16052 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
16053 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
16054 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
16055 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
16057 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
16058 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
16059 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
16060 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
16061 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
16062 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
16063 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
16064 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
16065 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
16066 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
16067 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
16068 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
16070 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
16071 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
16072 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
16073 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
16074 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
16075 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
16076 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
16077 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
16078 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
16079 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
16080 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
16081 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
16083 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
16084 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
16085 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
16086 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
16087 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
16088 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
16089 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
16090 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
16091 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
16092 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
16093 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
16094 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
16096 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
16097 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
16098 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
16099 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
16100 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
16101 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
16102 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
16103 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
16104 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
16105 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
16106 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
16107 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
16109 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
16110 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
16111 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
16112 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
16113 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
16114 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
16115 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
16116 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
16117 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
16118 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
16119 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
16120 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
16122 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16123 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16124 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16125 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16126 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16127 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16128 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16129 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16130 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16131 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16132 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16133 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16135 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16136 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16137 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16138 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16139 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16140 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16141 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16142 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16143 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16144 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16145 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16146 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16148 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16149 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16150 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16151 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16152 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16153 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16154 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16155 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16156 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16157 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16158 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16159 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16161 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16162 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16163 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16164 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16165 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16166 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16167 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16168 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16169 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16170 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16171 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16172 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16174 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16175 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16176 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16177 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16178 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16179 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16180 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16181 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16182 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16183 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16184 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16185 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16187 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16188 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16189 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16190 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16191 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16192 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16193 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16194 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16195 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16196 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16197 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16198 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16200 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16201 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16202 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16203 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16204 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16205 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16206 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16207 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16208 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16209 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16210 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16211 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16213 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16214 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16215 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16216 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16217 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16218 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16219 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16220 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16221 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16222 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16223 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16224 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16226 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16227 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16228 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16229 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16230 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16231 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16232 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16233 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16234 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16235 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16236 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16237 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16239 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16240 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16241 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16242 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16243 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16244 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16245 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16246 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16247 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16248 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16249 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16250 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16252 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16253 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16254 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16255 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16256 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16257 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16258 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16259 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16260 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16261 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16262 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16263 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16265 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16266 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16267 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16268 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16269 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16270 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16271 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16272 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16273 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16274 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16275 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16276 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16278 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16279 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16280 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16281 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16282 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16283 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16284 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16285 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16286 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16287 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16288 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16289 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16291 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
16292 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
16293 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
16294 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
16296 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
16297 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
16298 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
16299 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
16300 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
16301 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
16302 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
16303 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
16304 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
16305 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
16306 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
16307 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
16309 /* The implementation of the FIX instruction is broken on some
16310 assemblers, in that it accepts a precision specifier as well as a
16311 rounding specifier, despite the fact that this is meaningless.
16312 To be more compatible, we accept it as well, though of course it
16313 does not set any bits. */
16314 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
16315 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
16316 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
16317 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
16318 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
16319 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
16320 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
16321 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
16322 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
16323 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
16324 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
16325 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
16326 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
16328 /* Instructions that were new with the real FPA, call them V2. */
16330 #define ARM_VARIANT &fpu_fpa_ext_v2
16331 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
16332 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
16333 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
16334 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
16335 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
16336 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
16339 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
16340 /* Moves and type conversions. */
16341 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16342 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
16343 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
16344 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
16345 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16346 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16347 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16348 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16349 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16350 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16351 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
16352 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
16354 /* Memory operations. */
16355 cCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
16356 cCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
16357 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
16358 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
16359 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
16360 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
16361 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
16362 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
16363 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
16364 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
16365 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
16366 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
16367 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
16368 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
16369 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
16370 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
16371 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
16372 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
16374 /* Monadic operations. */
16375 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16376 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16377 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16379 /* Dyadic operations. */
16380 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16381 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16382 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16383 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16384 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16385 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16386 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16387 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16388 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
16391 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16392 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
16393 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
16394 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
16397 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
16398 /* Moves and type conversions. */
16399 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
16400 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
16401 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
16402 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
16403 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
16404 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
16405 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
16406 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
16407 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
16408 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
16409 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
16410 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
16411 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
16413 /* Memory operations. */
16414 cCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
16415 cCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
16416 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
16417 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
16418 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
16419 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
16420 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
16421 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
16422 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
16423 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
16425 /* Monadic operations. */
16426 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
16427 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
16428 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
16430 /* Dyadic operations. */
16431 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16432 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16433 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16434 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16435 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16436 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16437 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16438 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16439 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
16442 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
16443 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
16444 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
16445 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
16448 #define ARM_VARIANT &fpu_vfp_ext_v2
16449 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
16450 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
16451 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
16452 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
16454 /* Instructions which may belong to either the Neon or VFP instruction sets.
16455 Individual encoder functions perform additional architecture checks. */
16457 #define ARM_VARIANT &fpu_vfp_ext_v1xd
16458 #undef THUMB_VARIANT
16459 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
16460 /* These mnemonics are unique to VFP. */
16461 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
16462 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
16463 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
16464 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
16465 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
16466 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
16467 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
16468 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
16469 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
16470 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
16472 /* Mnemonics shared by Neon and VFP. */
16473 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
16474 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
16475 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
16477 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
16478 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
16480 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
16481 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
16483 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
16484 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
16485 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
16486 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
16487 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
16488 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
16489 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
16490 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
16492 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
16493 nCEF(vcvtb
, vcvt
, 2, (RVS
, RVS
), neon_cvtb
),
16494 nCEF(vcvtt
, vcvt
, 2, (RVS
, RVS
), neon_cvtt
),
16497 /* NOTE: All VMOV encoding is special-cased! */
16498 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
16499 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
16501 #undef THUMB_VARIANT
16502 #define THUMB_VARIANT &fpu_neon_ext_v1
16504 #define ARM_VARIANT &fpu_neon_ext_v1
16505 /* Data processing with three registers of the same length. */
16506 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
16507 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
16508 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
16509 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
16510 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
16511 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
16512 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
16513 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
16514 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
16515 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
16516 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
16517 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
16518 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
16519 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
16520 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
16521 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
16522 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
16523 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
16524 /* If not immediate, fall back to neon_dyadic_i64_su.
16525 shl_imm should accept I8 I16 I32 I64,
16526 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
16527 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
16528 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
16529 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
16530 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
16531 /* Logic ops, types optional & ignored. */
16532 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
16533 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
16534 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
16535 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
16536 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
16537 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
16538 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
16539 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
16540 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
16541 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
16542 /* Bitfield ops, untyped. */
16543 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
16544 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
16545 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
16546 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
16547 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
16548 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
16549 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
16550 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
16551 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
16552 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
16553 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
16554 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
16555 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
16556 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
16557 back to neon_dyadic_if_su. */
16558 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
16559 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
16560 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
16561 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
16562 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
16563 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
16564 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
16565 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
16566 /* Comparison. Type I8 I16 I32 F32. */
16567 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
16568 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
16569 /* As above, D registers only. */
16570 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
16571 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
16572 /* Int and float variants, signedness unimportant. */
16573 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
16574 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
16575 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
16576 /* Add/sub take types I8 I16 I32 I64 F32. */
16577 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
16578 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
16579 /* vtst takes sizes 8, 16, 32. */
16580 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
16581 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
16582 /* VMUL takes I8 I16 I32 F32 P8. */
16583 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
16584 /* VQD{R}MULH takes S16 S32. */
16585 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
16586 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
16587 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
16588 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
16589 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
16590 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
16591 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
16592 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
16593 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
16594 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
16595 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
16596 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
16597 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
16598 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
16599 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
16600 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
16602 /* Two address, int/float. Types S8 S16 S32 F32. */
16603 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
16604 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
16606 /* Data processing with two registers and a shift amount. */
16607 /* Right shifts, and variants with rounding.
16608 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
16609 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
16610 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
16611 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
16612 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
16613 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
16614 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
16615 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
16616 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
16617 /* Shift and insert. Sizes accepted 8 16 32 64. */
16618 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
16619 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
16620 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
16621 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
16622 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
16623 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
16624 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
16625 /* Right shift immediate, saturating & narrowing, with rounding variants.
16626 Types accepted S16 S32 S64 U16 U32 U64. */
16627 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
16628 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
16629 /* As above, unsigned. Types accepted S16 S32 S64. */
16630 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
16631 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
16632 /* Right shift narrowing. Types accepted I16 I32 I64. */
16633 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
16634 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
16635 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
16636 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
16637 /* CVT with optional immediate for fixed-point variant. */
16638 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
16640 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
16641 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
16643 /* Data processing, three registers of different lengths. */
16644 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
16645 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
16646 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16647 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16648 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
16649 /* If not scalar, fall back to neon_dyadic_long.
16650 Vector types as above, scalar types S16 S32 U16 U32. */
16651 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
16652 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
16653 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
16654 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
16655 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
16656 /* Dyadic, narrowing insns. Types I16 I32 I64. */
16657 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16658 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16659 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16660 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
16661 /* Saturating doubling multiplies. Types S16 S32. */
16662 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16663 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16664 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
16665 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
16666 S16 S32 U16 U32. */
16667 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
16669 /* Extract. Size 8. */
16670 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
16671 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
16673 /* Two registers, miscellaneous. */
16674 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
16675 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
16676 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
16677 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
16678 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
16679 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
16680 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
16681 /* Vector replicate. Sizes 8 16 32. */
16682 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
16683 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
16684 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
16685 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
16686 /* VMOVN. Types I16 I32 I64. */
16687 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
16688 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
16689 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
16690 /* VQMOVUN. Types S16 S32 S64. */
16691 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
16692 /* VZIP / VUZP. Sizes 8 16 32. */
16693 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
16694 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
16695 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
16696 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
16697 /* VQABS / VQNEG. Types S8 S16 S32. */
16698 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
16699 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
16700 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
16701 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
16702 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
16703 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
16704 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
16705 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
16706 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
16707 /* Reciprocal estimates. Types U32 F32. */
16708 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
16709 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
16710 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
16711 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
16712 /* VCLS. Types S8 S16 S32. */
16713 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
16714 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
16715 /* VCLZ. Types I8 I16 I32. */
16716 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
16717 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
16718 /* VCNT. Size 8. */
16719 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
16720 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
16721 /* Two address, untyped. */
16722 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
16723 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
16724 /* VTRN. Sizes 8 16 32. */
16725 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
16726 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
16728 /* Table lookup. Size 8. */
16729 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
16730 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
16732 #undef THUMB_VARIANT
16733 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
16735 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
16736 /* Neon element/structure load/store. */
16737 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16738 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16739 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16740 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16741 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16742 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16743 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16744 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
16746 #undef THUMB_VARIANT
16747 #define THUMB_VARIANT &fpu_vfp_ext_v3
16749 #define ARM_VARIANT &fpu_vfp_ext_v3
16750 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
16751 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
16752 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16753 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16754 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16755 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16756 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16757 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16758 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16759 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16760 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16761 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16762 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16763 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16764 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
16765 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
16766 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
16767 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
16769 #undef THUMB_VARIANT
16771 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
16772 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16773 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16774 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16775 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16776 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16777 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
16778 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
16779 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
16782 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
16783 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
16784 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
16785 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
16786 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
16787 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
16788 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
16789 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
16790 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
16791 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
16792 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16793 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16794 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16795 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16796 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16797 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
16798 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16799 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16800 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
16801 cCE(tmcr
, e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
16802 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
16803 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16804 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16805 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16806 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16807 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16808 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
16809 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
16810 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
16811 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
16812 cCE(tmrc
, e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
16813 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
16814 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
16815 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
16816 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
16817 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16818 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16819 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16820 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16821 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16822 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16823 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16824 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16825 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16826 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16827 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16828 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16829 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
16830 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16831 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16832 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16833 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16834 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16835 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16836 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16837 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16838 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16839 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16840 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16841 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16842 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16843 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16844 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16845 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16846 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16847 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16848 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16849 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16850 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16851 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16852 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16853 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16854 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16855 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16856 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16857 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16858 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16859 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16860 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16861 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16862 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16863 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16864 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16865 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16866 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16867 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16868 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16869 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16870 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16871 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
16872 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16873 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16874 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16875 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16876 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16877 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16878 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16879 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16880 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16881 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16882 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16883 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16884 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16885 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16886 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16887 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16888 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16889 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16890 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16891 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16892 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16893 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
16894 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16895 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16896 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16897 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16898 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16899 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16900 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16901 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16902 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16903 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16904 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16905 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16906 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16907 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16908 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16909 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16910 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
16911 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
16912 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16913 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
16914 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
16915 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
16916 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16917 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16918 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16919 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16920 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16921 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16922 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16923 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16924 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16925 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16926 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16927 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16928 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16929 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16930 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
16931 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16932 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16933 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16934 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16935 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16936 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16937 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16938 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16939 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
16940 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16941 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16942 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16943 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16944 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
16947 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16948 cCE(torvscb
, e13f190
, 1, (RR
), iwmmxt_tandorc
),
16949 cCE(torvsch
, e53f190
, 1, (RR
), iwmmxt_tandorc
),
16950 cCE(torvscw
, e93f190
, 1, (RR
), iwmmxt_tandorc
),
16951 cCE(wabsb
, e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16952 cCE(wabsh
, e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16953 cCE(wabsw
, ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
16954 cCE(wabsdiffb
, e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16955 cCE(wabsdiffh
, e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16956 cCE(wabsdiffw
, e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16957 cCE(waddbhusl
, e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16958 cCE(waddbhusm
, e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16959 cCE(waddhc
, e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16960 cCE(waddwc
, ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16961 cCE(waddsubhx
, ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16962 cCE(wavg4
, e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16963 cCE(wavg4r
, e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16964 cCE(wmaddsn
, ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16965 cCE(wmaddsx
, eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16966 cCE(wmaddun
, ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16967 cCE(wmaddux
, e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16968 cCE(wmerge
, e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
16969 cCE(wmiabb
, e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16970 cCE(wmiabt
, e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16971 cCE(wmiatb
, e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16972 cCE(wmiatt
, e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16973 cCE(wmiabbn
, e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16974 cCE(wmiabtn
, e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16975 cCE(wmiatbn
, e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16976 cCE(wmiattn
, e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16977 cCE(wmiawbb
, e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16978 cCE(wmiawbt
, e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16979 cCE(wmiawtb
, ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16980 cCE(wmiawtt
, eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16981 cCE(wmiawbbn
, ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16982 cCE(wmiawbtn
, ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16983 cCE(wmiawtbn
, ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16984 cCE(wmiawttn
, ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16985 cCE(wmulsmr
, ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16986 cCE(wmulumr
, ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16987 cCE(wmulwumr
, ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16988 cCE(wmulwsmr
, ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16989 cCE(wmulwum
, ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16990 cCE(wmulwsm
, ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16991 cCE(wmulwl
, eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16992 cCE(wqmiabb
, e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16993 cCE(wqmiabt
, e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16994 cCE(wqmiatb
, ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16995 cCE(wqmiatt
, eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16996 cCE(wqmiabbn
, ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16997 cCE(wqmiabtn
, ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16998 cCE(wqmiatbn
, ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
16999 cCE(wqmiattn
, ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17000 cCE(wqmulm
, e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17001 cCE(wqmulmr
, e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17002 cCE(wqmulwm
, ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17003 cCE(wqmulwmr
, ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17004 cCE(wsubaddhx
, ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17007 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
17008 cCE(cfldrs
, c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
17009 cCE(cfldrd
, c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
17010 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
17011 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
17012 cCE(cfstrs
, c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
17013 cCE(cfstrd
, c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
17014 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
17015 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
17016 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
17017 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
17018 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
17019 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
17020 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
17021 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
17022 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
17023 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
17024 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
17025 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
17026 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
17027 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
17028 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
17029 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
17030 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
17031 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
17032 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
17033 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
17034 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
17035 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
17036 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
17037 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
17038 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
17039 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
17040 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
17041 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
17042 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
17043 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
17044 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
17045 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
17046 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
17047 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
17048 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
17049 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
17050 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
17051 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
17052 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
17053 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
17054 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
17055 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
17056 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
17057 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
17058 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
17059 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
17060 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
17061 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
17062 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17063 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17064 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17065 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17066 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17067 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17068 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
17069 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
17070 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
17071 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
17072 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17073 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17074 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17075 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17076 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17077 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17078 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17079 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17080 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
17081 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
17082 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
17083 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
17086 #undef THUMB_VARIANT
17113 /* MD interface: bits in the object file. */
17115 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
17116 for use in the a.out file, and stores them in the array pointed to by buf.
17117 This knows about the endian-ness of the target machine and does
17118 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
17119 2 (short) and 4 (long) Floating numbers are put out as a series of
17120 LITTLENUMS (shorts, here at least). */
17123 md_number_to_chars (char * buf
, valueT val
, int n
)
17125 if (target_big_endian
)
17126 number_to_chars_bigendian (buf
, val
, n
);
17128 number_to_chars_littleendian (buf
, val
, n
);
17132 md_chars_to_number (char * buf
, int n
)
17135 unsigned char * where
= (unsigned char *) buf
;
17137 if (target_big_endian
)
17142 result
|= (*where
++ & 255);
17150 result
|= (where
[n
] & 255);
17157 /* MD interface: Sections. */
17159 /* Estimate the size of a frag before relaxing. Assume everything fits in
17163 md_estimate_size_before_relax (fragS
* fragp
,
17164 segT segtype ATTRIBUTE_UNUSED
)
17170 /* Convert a machine dependent frag. */
17173 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
17175 unsigned long insn
;
17176 unsigned long old_op
;
17184 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
17186 old_op
= bfd_get_16(abfd
, buf
);
17187 if (fragp
->fr_symbol
)
17189 exp
.X_op
= O_symbol
;
17190 exp
.X_add_symbol
= fragp
->fr_symbol
;
17194 exp
.X_op
= O_constant
;
17196 exp
.X_add_number
= fragp
->fr_offset
;
17197 opcode
= fragp
->fr_subtype
;
17200 case T_MNEM_ldr_pc
:
17201 case T_MNEM_ldr_pc2
:
17202 case T_MNEM_ldr_sp
:
17203 case T_MNEM_str_sp
:
17210 if (fragp
->fr_var
== 4)
17212 insn
= THUMB_OP32 (opcode
);
17213 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
17215 insn
|= (old_op
& 0x700) << 4;
17219 insn
|= (old_op
& 7) << 12;
17220 insn
|= (old_op
& 0x38) << 13;
17222 insn
|= 0x00000c00;
17223 put_thumb32_insn (buf
, insn
);
17224 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
17228 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
17230 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
17233 if (fragp
->fr_var
== 4)
17235 insn
= THUMB_OP32 (opcode
);
17236 insn
|= (old_op
& 0xf0) << 4;
17237 put_thumb32_insn (buf
, insn
);
17238 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
17242 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
17243 exp
.X_add_number
-= 4;
17251 if (fragp
->fr_var
== 4)
17253 int r0off
= (opcode
== T_MNEM_mov
17254 || opcode
== T_MNEM_movs
) ? 0 : 8;
17255 insn
= THUMB_OP32 (opcode
);
17256 insn
= (insn
& 0xe1ffffff) | 0x10000000;
17257 insn
|= (old_op
& 0x700) << r0off
;
17258 put_thumb32_insn (buf
, insn
);
17259 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
17263 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
17268 if (fragp
->fr_var
== 4)
17270 insn
= THUMB_OP32(opcode
);
17271 put_thumb32_insn (buf
, insn
);
17272 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
17275 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
17279 if (fragp
->fr_var
== 4)
17281 insn
= THUMB_OP32(opcode
);
17282 insn
|= (old_op
& 0xf00) << 14;
17283 put_thumb32_insn (buf
, insn
);
17284 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
17287 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
17290 case T_MNEM_add_sp
:
17291 case T_MNEM_add_pc
:
17292 case T_MNEM_inc_sp
:
17293 case T_MNEM_dec_sp
:
17294 if (fragp
->fr_var
== 4)
17296 /* ??? Choose between add and addw. */
17297 insn
= THUMB_OP32 (opcode
);
17298 insn
|= (old_op
& 0xf0) << 4;
17299 put_thumb32_insn (buf
, insn
);
17300 if (opcode
== T_MNEM_add_pc
)
17301 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
17303 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
17306 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
17314 if (fragp
->fr_var
== 4)
17316 insn
= THUMB_OP32 (opcode
);
17317 insn
|= (old_op
& 0xf0) << 4;
17318 insn
|= (old_op
& 0xf) << 16;
17319 put_thumb32_insn (buf
, insn
);
17320 if (insn
& (1 << 20))
17321 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
17323 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
17326 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
17332 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
17334 fixp
->fx_file
= fragp
->fr_file
;
17335 fixp
->fx_line
= fragp
->fr_line
;
17336 fragp
->fr_fix
+= fragp
->fr_var
;
17339 /* Return the size of a relaxable immediate operand instruction.
17340 SHIFT and SIZE specify the form of the allowable immediate. */
17342 relax_immediate (fragS
*fragp
, int size
, int shift
)
17348 /* ??? Should be able to do better than this. */
17349 if (fragp
->fr_symbol
)
17352 low
= (1 << shift
) - 1;
17353 mask
= (1 << (shift
+ size
)) - (1 << shift
);
17354 offset
= fragp
->fr_offset
;
17355 /* Force misaligned offsets to 32-bit variant. */
17358 if (offset
& ~mask
)
17363 /* Get the address of a symbol during relaxation. */
17365 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
17371 sym
= fragp
->fr_symbol
;
17372 sym_frag
= symbol_get_frag (sym
);
17373 know (S_GET_SEGMENT (sym
) != absolute_section
17374 || sym_frag
== &zero_address_frag
);
17375 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
17377 /* If frag has yet to be reached on this pass, assume it will
17378 move by STRETCH just as we did. If this is not so, it will
17379 be because some frag between grows, and that will force
17383 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
17387 /* Adjust stretch for any alignment frag. Note that if have
17388 been expanding the earlier code, the symbol may be
17389 defined in what appears to be an earlier frag. FIXME:
17390 This doesn't handle the fr_subtype field, which specifies
17391 a maximum number of bytes to skip when doing an
17393 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
17395 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
17398 stretch
= - ((- stretch
)
17399 & ~ ((1 << (int) f
->fr_offset
) - 1));
17401 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
17413 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
17416 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
17421 /* Assume worst case for symbols not known to be in the same section. */
17422 if (!S_IS_DEFINED (fragp
->fr_symbol
)
17423 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
17426 val
= relaxed_symbol_addr (fragp
, stretch
);
17427 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
17428 addr
= (addr
+ 4) & ~3;
17429 /* Force misaligned targets to 32-bit variant. */
17433 if (val
< 0 || val
> 1020)
17438 /* Return the size of a relaxable add/sub immediate instruction. */
17440 relax_addsub (fragS
*fragp
, asection
*sec
)
17445 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
17446 op
= bfd_get_16(sec
->owner
, buf
);
17447 if ((op
& 0xf) == ((op
>> 4) & 0xf))
17448 return relax_immediate (fragp
, 8, 0);
17450 return relax_immediate (fragp
, 3, 0);
17454 /* Return the size of a relaxable branch instruction. BITS is the
17455 size of the offset field in the narrow instruction. */
17458 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
17464 /* Assume worst case for symbols not known to be in the same section. */
17465 if (!S_IS_DEFINED (fragp
->fr_symbol
)
17466 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
17469 val
= relaxed_symbol_addr (fragp
, stretch
);
17470 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
17473 /* Offset is a signed value *2 */
17475 if (val
>= limit
|| val
< -limit
)
17481 /* Relax a machine dependent frag. This returns the amount by which
17482 the current size of the frag should change. */
17485 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
17490 oldsize
= fragp
->fr_var
;
17491 switch (fragp
->fr_subtype
)
17493 case T_MNEM_ldr_pc2
:
17494 newsize
= relax_adr (fragp
, sec
, stretch
);
17496 case T_MNEM_ldr_pc
:
17497 case T_MNEM_ldr_sp
:
17498 case T_MNEM_str_sp
:
17499 newsize
= relax_immediate (fragp
, 8, 2);
17503 newsize
= relax_immediate (fragp
, 5, 2);
17507 newsize
= relax_immediate (fragp
, 5, 1);
17511 newsize
= relax_immediate (fragp
, 5, 0);
17514 newsize
= relax_adr (fragp
, sec
, stretch
);
17520 newsize
= relax_immediate (fragp
, 8, 0);
17523 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
17526 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
17528 case T_MNEM_add_sp
:
17529 case T_MNEM_add_pc
:
17530 newsize
= relax_immediate (fragp
, 8, 2);
17532 case T_MNEM_inc_sp
:
17533 case T_MNEM_dec_sp
:
17534 newsize
= relax_immediate (fragp
, 7, 2);
17540 newsize
= relax_addsub (fragp
, sec
);
17546 fragp
->fr_var
= newsize
;
17547 /* Freeze wide instructions that are at or before the same location as
17548 in the previous pass. This avoids infinite loops.
17549 Don't freeze them unconditionally because targets may be artificially
17550 misaligned by the expansion of preceding frags. */
17551 if (stretch
<= 0 && newsize
> 2)
17553 md_convert_frag (sec
->owner
, sec
, fragp
);
17557 return newsize
- oldsize
;
17560 /* Round up a section size to the appropriate boundary. */
17563 md_section_align (segT segment ATTRIBUTE_UNUSED
,
17566 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
17567 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
17569 /* For a.out, force the section size to be aligned. If we don't do
17570 this, BFD will align it for us, but it will not write out the
17571 final bytes of the section. This may be a bug in BFD, but it is
17572 easier to fix it here since that is how the other a.out targets
17576 align
= bfd_get_section_alignment (stdoutput
, segment
);
17577 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
17584 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
17585 of an rs_align_code fragment. */
17588 arm_handle_align (fragS
* fragP
)
17590 static char const arm_noop
[2][2][4] =
17593 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
17594 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
17597 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
17598 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
17601 static char const thumb_noop
[2][2][2] =
17604 {0xc0, 0x46}, /* LE */
17605 {0x46, 0xc0}, /* BE */
17608 {0x00, 0xbf}, /* LE */
17609 {0xbf, 0x00} /* BE */
17612 static char const wide_thumb_noop
[2][4] =
17613 { /* Wide Thumb-2 */
17614 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
17615 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
17618 unsigned bytes
, fix
, noop_size
;
17621 const char *narrow_noop
= NULL
;
17623 if (fragP
->fr_type
!= rs_align_code
)
17626 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
17627 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
17630 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
17631 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
17633 assert ((fragP
->tc_frag_data
& MODE_RECORDED
) != 0);
17635 if (fragP
->tc_frag_data
& (~ MODE_RECORDED
))
17637 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
17639 narrow_noop
= thumb_noop
[1][target_big_endian
];
17640 noop
= wide_thumb_noop
[target_big_endian
];
17643 noop
= thumb_noop
[0][target_big_endian
];
17648 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
) != 0]
17649 [target_big_endian
];
17653 fragP
->fr_var
= noop_size
;
17655 if (bytes
& (noop_size
- 1))
17657 fix
= bytes
& (noop_size
- 1);
17658 memset (p
, 0, fix
);
17665 if (bytes
& noop_size
)
17667 /* Insert a narrow noop. */
17668 memcpy (p
, narrow_noop
, noop_size
);
17670 bytes
-= noop_size
;
17674 /* Use wide noops for the remainder */
17678 while (bytes
>= noop_size
)
17680 memcpy (p
, noop
, noop_size
);
17682 bytes
-= noop_size
;
17686 fragP
->fr_fix
+= fix
;
17689 /* Called from md_do_align. Used to create an alignment
17690 frag in a code section. */
17693 arm_frag_align_code (int n
, int max
)
17697 /* We assume that there will never be a requirement
17698 to support alignments greater than 32 bytes. */
17699 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
17700 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
17702 p
= frag_var (rs_align_code
,
17703 MAX_MEM_FOR_RS_ALIGN_CODE
,
17705 (relax_substateT
) max
,
17712 /* Perform target specific initialisation of a frag.
17713 Note - despite the name this initialisation is not done when the frag
17714 is created, but only when its type is assigned. A frag can be created
17715 and used a long time before its type is set, so beware of assuming that
17716 this initialisationis performed first. */
17719 arm_init_frag (fragS
* fragP
)
17721 /* If the current ARM vs THUMB mode has not already
17722 been recorded into this frag then do so now. */
17723 if ((fragP
->tc_frag_data
& MODE_RECORDED
) == 0)
17724 fragP
->tc_frag_data
= thumb_mode
| MODE_RECORDED
;
17728 /* When we change sections we need to issue a new mapping symbol. */
17731 arm_elf_change_section (void)
17734 segment_info_type
*seginfo
;
17736 /* Link an unlinked unwind index table section to the .text section. */
17737 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
17738 && elf_linked_to_section (now_seg
) == NULL
)
17739 elf_linked_to_section (now_seg
) = text_section
;
17741 if (!SEG_NORMAL (now_seg
))
17744 flags
= bfd_get_section_flags (stdoutput
, now_seg
);
17746 /* We can ignore sections that only contain debug info. */
17747 if ((flags
& SEC_ALLOC
) == 0)
17750 seginfo
= seg_info (now_seg
);
17751 mapstate
= seginfo
->tc_segment_info_data
.mapstate
;
17752 marked_pr_dependency
= seginfo
->tc_segment_info_data
.marked_pr_dependency
;
17756 arm_elf_section_type (const char * str
, size_t len
)
17758 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
17759 return SHT_ARM_EXIDX
;
17764 /* Code to deal with unwinding tables. */
17766 static void add_unwind_adjustsp (offsetT
);
17768 /* Generate any deferred unwind frame offset. */
17771 flush_pending_unwind (void)
17775 offset
= unwind
.pending_offset
;
17776 unwind
.pending_offset
= 0;
17778 add_unwind_adjustsp (offset
);
17781 /* Add an opcode to this list for this function. Two-byte opcodes should
17782 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
17786 add_unwind_opcode (valueT op
, int length
)
17788 /* Add any deferred stack adjustment. */
17789 if (unwind
.pending_offset
)
17790 flush_pending_unwind ();
17792 unwind
.sp_restored
= 0;
17794 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
17796 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
17797 if (unwind
.opcodes
)
17798 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
17799 unwind
.opcode_alloc
);
17801 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
17806 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
17808 unwind
.opcode_count
++;
17812 /* Add unwind opcodes to adjust the stack pointer. */
17815 add_unwind_adjustsp (offsetT offset
)
17819 if (offset
> 0x200)
17821 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
17826 /* Long form: 0xb2, uleb128. */
17827 /* This might not fit in a word so add the individual bytes,
17828 remembering the list is built in reverse order. */
17829 o
= (valueT
) ((offset
- 0x204) >> 2);
17831 add_unwind_opcode (0, 1);
17833 /* Calculate the uleb128 encoding of the offset. */
17837 bytes
[n
] = o
& 0x7f;
17843 /* Add the insn. */
17845 add_unwind_opcode (bytes
[n
- 1], 1);
17846 add_unwind_opcode (0xb2, 1);
17848 else if (offset
> 0x100)
17850 /* Two short opcodes. */
17851 add_unwind_opcode (0x3f, 1);
17852 op
= (offset
- 0x104) >> 2;
17853 add_unwind_opcode (op
, 1);
17855 else if (offset
> 0)
17857 /* Short opcode. */
17858 op
= (offset
- 4) >> 2;
17859 add_unwind_opcode (op
, 1);
17861 else if (offset
< 0)
17864 while (offset
> 0x100)
17866 add_unwind_opcode (0x7f, 1);
17869 op
= ((offset
- 4) >> 2) | 0x40;
17870 add_unwind_opcode (op
, 1);
17874 /* Finish the list of unwind opcodes for this function. */
17876 finish_unwind_opcodes (void)
17880 if (unwind
.fp_used
)
17882 /* Adjust sp as necessary. */
17883 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
17884 flush_pending_unwind ();
17886 /* After restoring sp from the frame pointer. */
17887 op
= 0x90 | unwind
.fp_reg
;
17888 add_unwind_opcode (op
, 1);
17891 flush_pending_unwind ();
17895 /* Start an exception table entry. If idx is nonzero this is an index table
17899 start_unwind_section (const segT text_seg
, int idx
)
17901 const char * text_name
;
17902 const char * prefix
;
17903 const char * prefix_once
;
17904 const char * group_name
;
17908 size_t sec_name_len
;
17915 prefix
= ELF_STRING_ARM_unwind
;
17916 prefix_once
= ELF_STRING_ARM_unwind_once
;
17917 type
= SHT_ARM_EXIDX
;
17921 prefix
= ELF_STRING_ARM_unwind_info
;
17922 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
17923 type
= SHT_PROGBITS
;
17926 text_name
= segment_name (text_seg
);
17927 if (streq (text_name
, ".text"))
17930 if (strncmp (text_name
, ".gnu.linkonce.t.",
17931 strlen (".gnu.linkonce.t.")) == 0)
17933 prefix
= prefix_once
;
17934 text_name
+= strlen (".gnu.linkonce.t.");
17937 prefix_len
= strlen (prefix
);
17938 text_len
= strlen (text_name
);
17939 sec_name_len
= prefix_len
+ text_len
;
17940 sec_name
= xmalloc (sec_name_len
+ 1);
17941 memcpy (sec_name
, prefix
, prefix_len
);
17942 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
17943 sec_name
[prefix_len
+ text_len
] = '\0';
17949 /* Handle COMDAT group. */
17950 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
17952 group_name
= elf_group_name (text_seg
);
17953 if (group_name
== NULL
)
17955 as_bad (_("Group section `%s' has no group signature"),
17956 segment_name (text_seg
));
17957 ignore_rest_of_line ();
17960 flags
|= SHF_GROUP
;
17964 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
17966 /* Set the section link for index tables. */
17968 elf_linked_to_section (now_seg
) = text_seg
;
17972 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
17973 personality routine data. Returns zero, or the index table value for
17974 and inline entry. */
17977 create_unwind_entry (int have_data
)
17982 /* The current word of data. */
17984 /* The number of bytes left in this word. */
17987 finish_unwind_opcodes ();
17989 /* Remember the current text section. */
17990 unwind
.saved_seg
= now_seg
;
17991 unwind
.saved_subseg
= now_subseg
;
17993 start_unwind_section (now_seg
, 0);
17995 if (unwind
.personality_routine
== NULL
)
17997 if (unwind
.personality_index
== -2)
18000 as_bad (_("handlerdata in cantunwind frame"));
18001 return 1; /* EXIDX_CANTUNWIND. */
18004 /* Use a default personality routine if none is specified. */
18005 if (unwind
.personality_index
== -1)
18007 if (unwind
.opcode_count
> 3)
18008 unwind
.personality_index
= 1;
18010 unwind
.personality_index
= 0;
18013 /* Space for the personality routine entry. */
18014 if (unwind
.personality_index
== 0)
18016 if (unwind
.opcode_count
> 3)
18017 as_bad (_("too many unwind opcodes for personality routine 0"));
18021 /* All the data is inline in the index table. */
18024 while (unwind
.opcode_count
> 0)
18026 unwind
.opcode_count
--;
18027 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
18031 /* Pad with "finish" opcodes. */
18033 data
= (data
<< 8) | 0xb0;
18040 /* We get two opcodes "free" in the first word. */
18041 size
= unwind
.opcode_count
- 2;
18044 /* An extra byte is required for the opcode count. */
18045 size
= unwind
.opcode_count
+ 1;
18047 size
= (size
+ 3) >> 2;
18049 as_bad (_("too many unwind opcodes"));
18051 frag_align (2, 0, 0);
18052 record_alignment (now_seg
, 2);
18053 unwind
.table_entry
= expr_build_dot ();
18055 /* Allocate the table entry. */
18056 ptr
= frag_more ((size
<< 2) + 4);
18057 where
= frag_now_fix () - ((size
<< 2) + 4);
18059 switch (unwind
.personality_index
)
18062 /* ??? Should this be a PLT generating relocation? */
18063 /* Custom personality routine. */
18064 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
18065 BFD_RELOC_ARM_PREL31
);
18070 /* Set the first byte to the number of additional words. */
18075 /* ABI defined personality routines. */
18077 /* Three opcodes bytes are packed into the first word. */
18084 /* The size and first two opcode bytes go in the first word. */
18085 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
18090 /* Should never happen. */
18094 /* Pack the opcodes into words (MSB first), reversing the list at the same
18096 while (unwind
.opcode_count
> 0)
18100 md_number_to_chars (ptr
, data
, 4);
18105 unwind
.opcode_count
--;
18107 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
18110 /* Finish off the last word. */
18113 /* Pad with "finish" opcodes. */
18115 data
= (data
<< 8) | 0xb0;
18117 md_number_to_chars (ptr
, data
, 4);
18122 /* Add an empty descriptor if there is no user-specified data. */
18123 ptr
= frag_more (4);
18124 md_number_to_chars (ptr
, 0, 4);
18131 /* Initialize the DWARF-2 unwind information for this procedure. */
18134 tc_arm_frame_initial_instructions (void)
18136 cfi_add_CFA_def_cfa (REG_SP
, 0);
18138 #endif /* OBJ_ELF */
18140 /* Convert REGNAME to a DWARF-2 register number. */
18143 tc_arm_regname_to_dw2regnum (char *regname
)
18145 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
18155 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
18159 expr
.X_op
= O_secrel
;
18160 expr
.X_add_symbol
= symbol
;
18161 expr
.X_add_number
= 0;
18162 emit_expr (&expr
, size
);
18166 /* MD interface: Symbol and relocation handling. */
18168 /* Return the address within the segment that a PC-relative fixup is
18169 relative to. For ARM, PC-relative fixups applied to instructions
18170 are generally relative to the location of the fixup plus 8 bytes.
18171 Thumb branches are offset by 4, and Thumb loads relative to PC
18172 require special handling. */
18175 md_pcrel_from_section (fixS
* fixP
, segT seg
)
18177 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
18179 /* If this is pc-relative and we are going to emit a relocation
18180 then we just want to put out any pipeline compensation that the linker
18181 will need. Otherwise we want to use the calculated base.
18182 For WinCE we skip the bias for externals as well, since this
18183 is how the MS ARM-CE assembler behaves and we want to be compatible. */
18185 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
18186 || (arm_force_relocation (fixP
)
18188 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
18193 switch (fixP
->fx_r_type
)
18195 /* PC relative addressing on the Thumb is slightly odd as the
18196 bottom two bits of the PC are forced to zero for the
18197 calculation. This happens *after* application of the
18198 pipeline offset. However, Thumb adrl already adjusts for
18199 this, so we need not do it again. */
18200 case BFD_RELOC_ARM_THUMB_ADD
:
18203 case BFD_RELOC_ARM_THUMB_OFFSET
:
18204 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
18205 case BFD_RELOC_ARM_T32_ADD_PC12
:
18206 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
18207 return (base
+ 4) & ~3;
18209 /* Thumb branches are simply offset by +4. */
18210 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
18211 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
18212 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
18213 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
18214 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
18215 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
18216 case BFD_RELOC_THUMB_PCREL_BLX
:
18219 /* ARM mode branches are offset by +8. However, the Windows CE
18220 loader expects the relocation not to take this into account. */
18221 case BFD_RELOC_ARM_PCREL_BRANCH
:
18222 case BFD_RELOC_ARM_PCREL_CALL
:
18223 case BFD_RELOC_ARM_PCREL_JUMP
:
18224 case BFD_RELOC_ARM_PCREL_BLX
:
18225 case BFD_RELOC_ARM_PLT32
:
18227 /* When handling fixups immediately, because we have already
18228 discovered the value of a symbol, or the address of the frag involved
18229 we must account for the offset by +8, as the OS loader will never see the reloc.
18230 see fixup_segment() in write.c
18231 The S_IS_EXTERNAL test handles the case of global symbols.
18232 Those need the calculated base, not just the pipe compensation the linker will need. */
18234 && fixP
->fx_addsy
!= NULL
18235 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
18236 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
18243 /* ARM mode loads relative to PC are also offset by +8. Unlike
18244 branches, the Windows CE loader *does* expect the relocation
18245 to take this into account. */
18246 case BFD_RELOC_ARM_OFFSET_IMM
:
18247 case BFD_RELOC_ARM_OFFSET_IMM8
:
18248 case BFD_RELOC_ARM_HWLITERAL
:
18249 case BFD_RELOC_ARM_LITERAL
:
18250 case BFD_RELOC_ARM_CP_OFF_IMM
:
18254 /* Other PC-relative relocations are un-offset. */
18260 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
18261 Otherwise we have no need to default values of symbols. */
18264 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
18267 if (name
[0] == '_' && name
[1] == 'G'
18268 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
18272 if (symbol_find (name
))
18273 as_bad (_("GOT already in the symbol table"));
18275 GOT_symbol
= symbol_new (name
, undefined_section
,
18276 (valueT
) 0, & zero_address_frag
);
18286 /* Subroutine of md_apply_fix. Check to see if an immediate can be
18287 computed as two separate immediate values, added together. We
18288 already know that this value cannot be computed by just one ARM
18291 static unsigned int
18292 validate_immediate_twopart (unsigned int val
,
18293 unsigned int * highpart
)
18298 for (i
= 0; i
< 32; i
+= 2)
18299 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
18305 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
18307 else if (a
& 0xff0000)
18309 if (a
& 0xff000000)
18311 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
18315 assert (a
& 0xff000000);
18316 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
18319 return (a
& 0xff) | (i
<< 7);
18326 validate_offset_imm (unsigned int val
, int hwse
)
18328 if ((hwse
&& val
> 255) || val
> 4095)
18333 /* Subroutine of md_apply_fix. Do those data_ops which can take a
18334 negative immediate constant by altering the instruction. A bit of
18339 by inverting the second operand, and
18342 by negating the second operand. */
18345 negate_data_op (unsigned long * instruction
,
18346 unsigned long value
)
18349 unsigned long negated
, inverted
;
18351 negated
= encode_arm_immediate (-value
);
18352 inverted
= encode_arm_immediate (~value
);
18354 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
18357 /* First negates. */
18358 case OPCODE_SUB
: /* ADD <-> SUB */
18359 new_inst
= OPCODE_ADD
;
18364 new_inst
= OPCODE_SUB
;
18368 case OPCODE_CMP
: /* CMP <-> CMN */
18369 new_inst
= OPCODE_CMN
;
18374 new_inst
= OPCODE_CMP
;
18378 /* Now Inverted ops. */
18379 case OPCODE_MOV
: /* MOV <-> MVN */
18380 new_inst
= OPCODE_MVN
;
18385 new_inst
= OPCODE_MOV
;
18389 case OPCODE_AND
: /* AND <-> BIC */
18390 new_inst
= OPCODE_BIC
;
18395 new_inst
= OPCODE_AND
;
18399 case OPCODE_ADC
: /* ADC <-> SBC */
18400 new_inst
= OPCODE_SBC
;
18405 new_inst
= OPCODE_ADC
;
18409 /* We cannot do anything. */
18414 if (value
== (unsigned) FAIL
)
18417 *instruction
&= OPCODE_MASK
;
18418 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
18422 /* Like negate_data_op, but for Thumb-2. */
18424 static unsigned int
18425 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
18429 unsigned int negated
, inverted
;
18431 negated
= encode_thumb32_immediate (-value
);
18432 inverted
= encode_thumb32_immediate (~value
);
18434 rd
= (*instruction
>> 8) & 0xf;
18435 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
18438 /* ADD <-> SUB. Includes CMP <-> CMN. */
18439 case T2_OPCODE_SUB
:
18440 new_inst
= T2_OPCODE_ADD
;
18444 case T2_OPCODE_ADD
:
18445 new_inst
= T2_OPCODE_SUB
;
18449 /* ORR <-> ORN. Includes MOV <-> MVN. */
18450 case T2_OPCODE_ORR
:
18451 new_inst
= T2_OPCODE_ORN
;
18455 case T2_OPCODE_ORN
:
18456 new_inst
= T2_OPCODE_ORR
;
18460 /* AND <-> BIC. TST has no inverted equivalent. */
18461 case T2_OPCODE_AND
:
18462 new_inst
= T2_OPCODE_BIC
;
18469 case T2_OPCODE_BIC
:
18470 new_inst
= T2_OPCODE_AND
;
18475 case T2_OPCODE_ADC
:
18476 new_inst
= T2_OPCODE_SBC
;
18480 case T2_OPCODE_SBC
:
18481 new_inst
= T2_OPCODE_ADC
;
18485 /* We cannot do anything. */
18490 if (value
== (unsigned int)FAIL
)
18493 *instruction
&= T2_OPCODE_MASK
;
18494 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
18498 /* Read a 32-bit thumb instruction from buf. */
18499 static unsigned long
18500 get_thumb32_insn (char * buf
)
18502 unsigned long insn
;
18503 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
18504 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
18510 /* We usually want to set the low bit on the address of thumb function
18511 symbols. In particular .word foo - . should have the low bit set.
18512 Generic code tries to fold the difference of two symbols to
18513 a constant. Prevent this and force a relocation when the first symbols
18514 is a thumb function. */
18516 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
18518 if (op
== O_subtract
18519 && l
->X_op
== O_symbol
18520 && r
->X_op
== O_symbol
18521 && THUMB_IS_FUNC (l
->X_add_symbol
))
18523 l
->X_op
= O_subtract
;
18524 l
->X_op_symbol
= r
->X_add_symbol
;
18525 l
->X_add_number
-= r
->X_add_number
;
18528 /* Process as normal. */
18533 md_apply_fix (fixS
* fixP
,
18537 offsetT value
= * valP
;
18539 unsigned int newimm
;
18540 unsigned long temp
;
18542 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
18544 assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
18546 /* Note whether this will delete the relocation. */
18548 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
18551 /* On a 64-bit host, silently truncate 'value' to 32 bits for
18552 consistency with the behaviour on 32-bit hosts. Remember value
18554 value
&= 0xffffffff;
18555 value
^= 0x80000000;
18556 value
-= 0x80000000;
18559 fixP
->fx_addnumber
= value
;
18561 /* Same treatment for fixP->fx_offset. */
18562 fixP
->fx_offset
&= 0xffffffff;
18563 fixP
->fx_offset
^= 0x80000000;
18564 fixP
->fx_offset
-= 0x80000000;
18566 switch (fixP
->fx_r_type
)
18568 case BFD_RELOC_NONE
:
18569 /* This will need to go in the object file. */
18573 case BFD_RELOC_ARM_IMMEDIATE
:
18574 /* We claim that this fixup has been processed here,
18575 even if in fact we generate an error because we do
18576 not have a reloc for it, so tc_gen_reloc will reject it. */
18580 && ! S_IS_DEFINED (fixP
->fx_addsy
))
18582 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18583 _("undefined symbol %s used as an immediate value"),
18584 S_GET_NAME (fixP
->fx_addsy
));
18589 && S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
18591 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18592 _("symbol %s is in a different section"),
18593 S_GET_NAME (fixP
->fx_addsy
));
18597 newimm
= encode_arm_immediate (value
);
18598 temp
= md_chars_to_number (buf
, INSN_SIZE
);
18600 /* If the instruction will fail, see if we can fix things up by
18601 changing the opcode. */
18602 if (newimm
== (unsigned int) FAIL
18603 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
18605 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18606 _("invalid constant (%lx) after fixup"),
18607 (unsigned long) value
);
18611 newimm
|= (temp
& 0xfffff000);
18612 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
18615 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
18617 unsigned int highpart
= 0;
18618 unsigned int newinsn
= 0xe1a00000; /* nop. */
18621 && ! S_IS_DEFINED (fixP
->fx_addsy
))
18623 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18624 _("undefined symbol %s used as an immediate value"),
18625 S_GET_NAME (fixP
->fx_addsy
));
18630 && S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
18632 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18633 _("symbol %s is in a different section"),
18634 S_GET_NAME (fixP
->fx_addsy
));
18638 newimm
= encode_arm_immediate (value
);
18639 temp
= md_chars_to_number (buf
, INSN_SIZE
);
18641 /* If the instruction will fail, see if we can fix things up by
18642 changing the opcode. */
18643 if (newimm
== (unsigned int) FAIL
18644 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
18646 /* No ? OK - try using two ADD instructions to generate
18648 newimm
= validate_immediate_twopart (value
, & highpart
);
18650 /* Yes - then make sure that the second instruction is
18652 if (newimm
!= (unsigned int) FAIL
)
18654 /* Still No ? Try using a negated value. */
18655 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
18656 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
18657 /* Otherwise - give up. */
18660 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18661 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
18666 /* Replace the first operand in the 2nd instruction (which
18667 is the PC) with the destination register. We have
18668 already added in the PC in the first instruction and we
18669 do not want to do it again. */
18670 newinsn
&= ~ 0xf0000;
18671 newinsn
|= ((newinsn
& 0x0f000) << 4);
18674 newimm
|= (temp
& 0xfffff000);
18675 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
18677 highpart
|= (newinsn
& 0xfffff000);
18678 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
18682 case BFD_RELOC_ARM_OFFSET_IMM
:
18683 if (!fixP
->fx_done
&& seg
->use_rela_p
)
18686 case BFD_RELOC_ARM_LITERAL
:
18692 if (validate_offset_imm (value
, 0) == FAIL
)
18694 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
18695 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18696 _("invalid literal constant: pool needs to be closer"));
18698 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18699 _("bad immediate value for offset (%ld)"),
18704 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18705 newval
&= 0xff7ff000;
18706 newval
|= value
| (sign
? INDEX_UP
: 0);
18707 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18710 case BFD_RELOC_ARM_OFFSET_IMM8
:
18711 case BFD_RELOC_ARM_HWLITERAL
:
18717 if (validate_offset_imm (value
, 1) == FAIL
)
18719 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
18720 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18721 _("invalid literal constant: pool needs to be closer"));
18723 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
18728 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18729 newval
&= 0xff7ff0f0;
18730 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
18731 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18734 case BFD_RELOC_ARM_T32_OFFSET_U8
:
18735 if (value
< 0 || value
> 1020 || value
% 4 != 0)
18736 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18737 _("bad immediate value for offset (%ld)"), (long) value
);
18740 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
18742 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
18745 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
18746 /* This is a complicated relocation used for all varieties of Thumb32
18747 load/store instruction with immediate offset:
18749 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
18750 *4, optional writeback(W)
18751 (doubleword load/store)
18753 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
18754 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
18755 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
18756 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
18757 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
18759 Uppercase letters indicate bits that are already encoded at
18760 this point. Lowercase letters are our problem. For the
18761 second block of instructions, the secondary opcode nybble
18762 (bits 8..11) is present, and bit 23 is zero, even if this is
18763 a PC-relative operation. */
18764 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18766 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
18768 if ((newval
& 0xf0000000) == 0xe0000000)
18770 /* Doubleword load/store: 8-bit offset, scaled by 4. */
18772 newval
|= (1 << 23);
18775 if (value
% 4 != 0)
18777 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18778 _("offset not a multiple of 4"));
18784 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18785 _("offset out of range"));
18790 else if ((newval
& 0x000f0000) == 0x000f0000)
18792 /* PC-relative, 12-bit offset. */
18794 newval
|= (1 << 23);
18799 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18800 _("offset out of range"));
18805 else if ((newval
& 0x00000100) == 0x00000100)
18807 /* Writeback: 8-bit, +/- offset. */
18809 newval
|= (1 << 9);
18814 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18815 _("offset out of range"));
18820 else if ((newval
& 0x00000f00) == 0x00000e00)
18822 /* T-instruction: positive 8-bit offset. */
18823 if (value
< 0 || value
> 0xff)
18825 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18826 _("offset out of range"));
18834 /* Positive 12-bit or negative 8-bit offset. */
18838 newval
|= (1 << 23);
18848 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18849 _("offset out of range"));
18856 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
18857 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
18860 case BFD_RELOC_ARM_SHIFT_IMM
:
18861 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18862 if (((unsigned long) value
) > 32
18864 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
18866 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18867 _("shift expression is too large"));
18872 /* Shifts of zero must be done as lsl. */
18874 else if (value
== 32)
18876 newval
&= 0xfffff07f;
18877 newval
|= (value
& 0x1f) << 7;
18878 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18881 case BFD_RELOC_ARM_T32_IMMEDIATE
:
18882 case BFD_RELOC_ARM_T32_ADD_IMM
:
18883 case BFD_RELOC_ARM_T32_IMM12
:
18884 case BFD_RELOC_ARM_T32_ADD_PC12
:
18885 /* We claim that this fixup has been processed here,
18886 even if in fact we generate an error because we do
18887 not have a reloc for it, so tc_gen_reloc will reject it. */
18891 && ! S_IS_DEFINED (fixP
->fx_addsy
))
18893 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18894 _("undefined symbol %s used as an immediate value"),
18895 S_GET_NAME (fixP
->fx_addsy
));
18899 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18901 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
18904 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
18905 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
18907 newimm
= encode_thumb32_immediate (value
);
18908 if (newimm
== (unsigned int) FAIL
)
18909 newimm
= thumb32_negate_data_op (&newval
, value
);
18911 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
18912 && newimm
== (unsigned int) FAIL
)
18914 /* Turn add/sum into addw/subw. */
18915 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
18916 newval
= (newval
& 0xfeffffff) | 0x02000000;
18918 /* 12 bit immediate for addw/subw. */
18922 newval
^= 0x00a00000;
18925 newimm
= (unsigned int) FAIL
;
18930 if (newimm
== (unsigned int)FAIL
)
18932 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18933 _("invalid constant (%lx) after fixup"),
18934 (unsigned long) value
);
18938 newval
|= (newimm
& 0x800) << 15;
18939 newval
|= (newimm
& 0x700) << 4;
18940 newval
|= (newimm
& 0x0ff);
18942 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
18943 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
18946 case BFD_RELOC_ARM_SMC
:
18947 if (((unsigned long) value
) > 0xffff)
18948 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18949 _("invalid smc expression"));
18950 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18951 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
18952 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18955 case BFD_RELOC_ARM_SWI
:
18956 if (fixP
->tc_fix_data
!= 0)
18958 if (((unsigned long) value
) > 0xff)
18959 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18960 _("invalid swi expression"));
18961 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
18963 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
18967 if (((unsigned long) value
) > 0x00ffffff)
18968 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18969 _("invalid swi expression"));
18970 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18972 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18976 case BFD_RELOC_ARM_MULTI
:
18977 if (((unsigned long) value
) > 0xffff)
18978 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
18979 _("invalid expression in load/store multiple"));
18980 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
18981 md_number_to_chars (buf
, newval
, INSN_SIZE
);
18985 case BFD_RELOC_ARM_PCREL_CALL
:
18986 newval
= md_chars_to_number (buf
, INSN_SIZE
);
18987 if ((newval
& 0xf0000000) == 0xf0000000)
18991 goto arm_branch_common
;
18993 case BFD_RELOC_ARM_PCREL_JUMP
:
18994 case BFD_RELOC_ARM_PLT32
:
18996 case BFD_RELOC_ARM_PCREL_BRANCH
:
18998 goto arm_branch_common
;
19000 case BFD_RELOC_ARM_PCREL_BLX
:
19003 /* We are going to store value (shifted right by two) in the
19004 instruction, in a 24 bit, signed field. Bits 26 through 32 either
19005 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
19006 also be be clear. */
19008 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19009 _("misaligned branch destination"));
19010 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
19011 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
19012 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19013 _("branch out of range"));
19015 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19017 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19018 newval
|= (value
>> 2) & 0x00ffffff;
19019 /* Set the H bit on BLX instructions. */
19023 newval
|= 0x01000000;
19025 newval
&= ~0x01000000;
19027 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19031 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
19032 /* CBZ can only branch forward. */
19034 /* Attempts to use CBZ to branch to the next instruction
19035 (which, strictly speaking, are prohibited) will be turned into
19038 FIXME: It may be better to remove the instruction completely and
19039 perform relaxation. */
19042 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19043 newval
= 0xbf00; /* NOP encoding T1 */
19044 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19049 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19050 _("branch out of range"));
19052 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19054 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19055 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
19056 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19061 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
19062 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
19063 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19064 _("branch out of range"));
19066 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19068 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19069 newval
|= (value
& 0x1ff) >> 1;
19070 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19074 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
19075 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
19076 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19077 _("branch out of range"));
19079 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19081 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19082 newval
|= (value
& 0xfff) >> 1;
19083 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19087 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19088 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
19089 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19090 _("conditional branch out of range"));
19092 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19095 addressT S
, J1
, J2
, lo
, hi
;
19097 S
= (value
& 0x00100000) >> 20;
19098 J2
= (value
& 0x00080000) >> 19;
19099 J1
= (value
& 0x00040000) >> 18;
19100 hi
= (value
& 0x0003f000) >> 12;
19101 lo
= (value
& 0x00000ffe) >> 1;
19103 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19104 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19105 newval
|= (S
<< 10) | hi
;
19106 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
19107 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19108 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
19112 case BFD_RELOC_THUMB_PCREL_BLX
:
19113 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19114 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
19115 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19116 _("branch out of range"));
19118 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
19119 /* For a BLX instruction, make sure that the relocation is rounded up
19120 to a word boundary. This follows the semantics of the instruction
19121 which specifies that bit 1 of the target address will come from bit
19122 1 of the base address. */
19123 value
= (value
+ 1) & ~ 1;
19125 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19129 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19130 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19131 newval
|= (value
& 0x7fffff) >> 12;
19132 newval2
|= (value
& 0xfff) >> 1;
19133 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19134 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
19138 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19139 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
19140 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19141 _("branch out of range"));
19143 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19146 addressT S
, I1
, I2
, lo
, hi
;
19148 S
= (value
& 0x01000000) >> 24;
19149 I1
= (value
& 0x00800000) >> 23;
19150 I2
= (value
& 0x00400000) >> 22;
19151 hi
= (value
& 0x003ff000) >> 12;
19152 lo
= (value
& 0x00000ffe) >> 1;
19157 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19158 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19159 newval
|= (S
<< 10) | hi
;
19160 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
19161 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19162 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
19167 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19168 md_number_to_chars (buf
, value
, 1);
19172 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19173 md_number_to_chars (buf
, value
, 2);
19177 case BFD_RELOC_ARM_TLS_GD32
:
19178 case BFD_RELOC_ARM_TLS_LE32
:
19179 case BFD_RELOC_ARM_TLS_IE32
:
19180 case BFD_RELOC_ARM_TLS_LDM32
:
19181 case BFD_RELOC_ARM_TLS_LDO32
:
19182 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
19185 case BFD_RELOC_ARM_GOT32
:
19186 case BFD_RELOC_ARM_GOTOFF
:
19187 case BFD_RELOC_ARM_TARGET2
:
19188 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19189 md_number_to_chars (buf
, 0, 4);
19193 case BFD_RELOC_RVA
:
19195 case BFD_RELOC_ARM_TARGET1
:
19196 case BFD_RELOC_ARM_ROSEGREL32
:
19197 case BFD_RELOC_ARM_SBREL32
:
19198 case BFD_RELOC_32_PCREL
:
19200 case BFD_RELOC_32_SECREL
:
19202 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19204 /* For WinCE we only do this for pcrel fixups. */
19205 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
19207 md_number_to_chars (buf
, value
, 4);
19211 case BFD_RELOC_ARM_PREL31
:
19212 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19214 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
19215 if ((value
^ (value
>> 1)) & 0x40000000)
19217 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19218 _("rel31 relocation overflow"));
19220 newval
|= value
& 0x7fffffff;
19221 md_number_to_chars (buf
, newval
, 4);
19226 case BFD_RELOC_ARM_CP_OFF_IMM
:
19227 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
19228 if (value
< -1023 || value
> 1023 || (value
& 3))
19229 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19230 _("co-processor offset out of range"));
19235 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
19236 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
19237 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19239 newval
= get_thumb32_insn (buf
);
19240 newval
&= 0xff7fff00;
19241 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
19242 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
19243 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
19244 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19246 put_thumb32_insn (buf
, newval
);
19249 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
19250 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
19251 if (value
< -255 || value
> 255)
19252 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19253 _("co-processor offset out of range"));
19255 goto cp_off_common
;
19257 case BFD_RELOC_ARM_THUMB_OFFSET
:
19258 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19259 /* Exactly what ranges, and where the offset is inserted depends
19260 on the type of instruction, we can establish this from the
19262 switch (newval
>> 12)
19264 case 4: /* PC load. */
19265 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
19266 forced to zero for these loads; md_pcrel_from has already
19267 compensated for this. */
19269 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19270 _("invalid offset, target not word aligned (0x%08lX)"),
19271 (((unsigned long) fixP
->fx_frag
->fr_address
19272 + (unsigned long) fixP
->fx_where
) & ~3)
19273 + (unsigned long) value
);
19275 if (value
& ~0x3fc)
19276 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19277 _("invalid offset, value too big (0x%08lX)"),
19280 newval
|= value
>> 2;
19283 case 9: /* SP load/store. */
19284 if (value
& ~0x3fc)
19285 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19286 _("invalid offset, value too big (0x%08lX)"),
19288 newval
|= value
>> 2;
19291 case 6: /* Word load/store. */
19293 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19294 _("invalid offset, value too big (0x%08lX)"),
19296 newval
|= value
<< 4; /* 6 - 2. */
19299 case 7: /* Byte load/store. */
19301 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19302 _("invalid offset, value too big (0x%08lX)"),
19304 newval
|= value
<< 6;
19307 case 8: /* Halfword load/store. */
19309 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19310 _("invalid offset, value too big (0x%08lX)"),
19312 newval
|= value
<< 5; /* 6 - 1. */
19316 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19317 "Unable to process relocation for thumb opcode: %lx",
19318 (unsigned long) newval
);
19321 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19324 case BFD_RELOC_ARM_THUMB_ADD
:
19325 /* This is a complicated relocation, since we use it for all of
19326 the following immediate relocations:
19330 9bit ADD/SUB SP word-aligned
19331 10bit ADD PC/SP word-aligned
19333 The type of instruction being processed is encoded in the
19340 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19342 int rd
= (newval
>> 4) & 0xf;
19343 int rs
= newval
& 0xf;
19344 int subtract
= !!(newval
& 0x8000);
19346 /* Check for HI regs, only very restricted cases allowed:
19347 Adjusting SP, and using PC or SP to get an address. */
19348 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
19349 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
19350 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19351 _("invalid Hi register with immediate"));
19353 /* If value is negative, choose the opposite instruction. */
19357 subtract
= !subtract
;
19359 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19360 _("immediate value out of range"));
19365 if (value
& ~0x1fc)
19366 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19367 _("invalid immediate for stack address calculation"));
19368 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
19369 newval
|= value
>> 2;
19371 else if (rs
== REG_PC
|| rs
== REG_SP
)
19373 if (subtract
|| value
& ~0x3fc)
19374 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19375 _("invalid immediate for address calculation (value = 0x%08lX)"),
19376 (unsigned long) value
);
19377 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
19379 newval
|= value
>> 2;
19384 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19385 _("immediate value out of range"));
19386 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
19387 newval
|= (rd
<< 8) | value
;
19392 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19393 _("immediate value out of range"));
19394 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
19395 newval
|= rd
| (rs
<< 3) | (value
<< 6);
19398 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19401 case BFD_RELOC_ARM_THUMB_IMM
:
19402 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19403 if (value
< 0 || value
> 255)
19404 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19405 _("invalid immediate: %ld is out of range"),
19408 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19411 case BFD_RELOC_ARM_THUMB_SHIFT
:
19412 /* 5bit shift value (0..32). LSL cannot take 32. */
19413 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
19414 temp
= newval
& 0xf800;
19415 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
19416 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19417 _("invalid shift value: %ld"), (long) value
);
19418 /* Shifts of zero must be encoded as LSL. */
19420 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
19421 /* Shifts of 32 are encoded as zero. */
19422 else if (value
== 32)
19424 newval
|= value
<< 6;
19425 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19428 case BFD_RELOC_VTABLE_INHERIT
:
19429 case BFD_RELOC_VTABLE_ENTRY
:
19433 case BFD_RELOC_ARM_MOVW
:
19434 case BFD_RELOC_ARM_MOVT
:
19435 case BFD_RELOC_ARM_THUMB_MOVW
:
19436 case BFD_RELOC_ARM_THUMB_MOVT
:
19437 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19439 /* REL format relocations are limited to a 16-bit addend. */
19440 if (!fixP
->fx_done
)
19442 if (value
< -0x8000 || value
> 0x7fff)
19443 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19444 _("offset out of range"));
19446 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
19447 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
19452 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
19453 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
19455 newval
= get_thumb32_insn (buf
);
19456 newval
&= 0xfbf08f00;
19457 newval
|= (value
& 0xf000) << 4;
19458 newval
|= (value
& 0x0800) << 15;
19459 newval
|= (value
& 0x0700) << 4;
19460 newval
|= (value
& 0x00ff);
19461 put_thumb32_insn (buf
, newval
);
19465 newval
= md_chars_to_number (buf
, 4);
19466 newval
&= 0xfff0f000;
19467 newval
|= value
& 0x0fff;
19468 newval
|= (value
& 0xf000) << 4;
19469 md_number_to_chars (buf
, newval
, 4);
19474 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
19475 case BFD_RELOC_ARM_ALU_PC_G0
:
19476 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
19477 case BFD_RELOC_ARM_ALU_PC_G1
:
19478 case BFD_RELOC_ARM_ALU_PC_G2
:
19479 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
19480 case BFD_RELOC_ARM_ALU_SB_G0
:
19481 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
19482 case BFD_RELOC_ARM_ALU_SB_G1
:
19483 case BFD_RELOC_ARM_ALU_SB_G2
:
19484 assert (!fixP
->fx_done
);
19485 if (!seg
->use_rela_p
)
19488 bfd_vma encoded_addend
;
19489 bfd_vma addend_abs
= abs (value
);
19491 /* Check that the absolute value of the addend can be
19492 expressed as an 8-bit constant plus a rotation. */
19493 encoded_addend
= encode_arm_immediate (addend_abs
);
19494 if (encoded_addend
== (unsigned int) FAIL
)
19495 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19496 _("the offset 0x%08lX is not representable"),
19497 (unsigned long) addend_abs
);
19499 /* Extract the instruction. */
19500 insn
= md_chars_to_number (buf
, INSN_SIZE
);
19502 /* If the addend is positive, use an ADD instruction.
19503 Otherwise use a SUB. Take care not to destroy the S bit. */
19504 insn
&= 0xff1fffff;
19510 /* Place the encoded addend into the first 12 bits of the
19512 insn
&= 0xfffff000;
19513 insn
|= encoded_addend
;
19515 /* Update the instruction. */
19516 md_number_to_chars (buf
, insn
, INSN_SIZE
);
19520 case BFD_RELOC_ARM_LDR_PC_G0
:
19521 case BFD_RELOC_ARM_LDR_PC_G1
:
19522 case BFD_RELOC_ARM_LDR_PC_G2
:
19523 case BFD_RELOC_ARM_LDR_SB_G0
:
19524 case BFD_RELOC_ARM_LDR_SB_G1
:
19525 case BFD_RELOC_ARM_LDR_SB_G2
:
19526 assert (!fixP
->fx_done
);
19527 if (!seg
->use_rela_p
)
19530 bfd_vma addend_abs
= abs (value
);
19532 /* Check that the absolute value of the addend can be
19533 encoded in 12 bits. */
19534 if (addend_abs
>= 0x1000)
19535 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19536 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
19537 (unsigned long) addend_abs
);
19539 /* Extract the instruction. */
19540 insn
= md_chars_to_number (buf
, INSN_SIZE
);
19542 /* If the addend is negative, clear bit 23 of the instruction.
19543 Otherwise set it. */
19545 insn
&= ~(1 << 23);
19549 /* Place the absolute value of the addend into the first 12 bits
19550 of the instruction. */
19551 insn
&= 0xfffff000;
19552 insn
|= addend_abs
;
19554 /* Update the instruction. */
19555 md_number_to_chars (buf
, insn
, INSN_SIZE
);
19559 case BFD_RELOC_ARM_LDRS_PC_G0
:
19560 case BFD_RELOC_ARM_LDRS_PC_G1
:
19561 case BFD_RELOC_ARM_LDRS_PC_G2
:
19562 case BFD_RELOC_ARM_LDRS_SB_G0
:
19563 case BFD_RELOC_ARM_LDRS_SB_G1
:
19564 case BFD_RELOC_ARM_LDRS_SB_G2
:
19565 assert (!fixP
->fx_done
);
19566 if (!seg
->use_rela_p
)
19569 bfd_vma addend_abs
= abs (value
);
19571 /* Check that the absolute value of the addend can be
19572 encoded in 8 bits. */
19573 if (addend_abs
>= 0x100)
19574 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19575 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
19576 (unsigned long) addend_abs
);
19578 /* Extract the instruction. */
19579 insn
= md_chars_to_number (buf
, INSN_SIZE
);
19581 /* If the addend is negative, clear bit 23 of the instruction.
19582 Otherwise set it. */
19584 insn
&= ~(1 << 23);
19588 /* Place the first four bits of the absolute value of the addend
19589 into the first 4 bits of the instruction, and the remaining
19590 four into bits 8 .. 11. */
19591 insn
&= 0xfffff0f0;
19592 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
19594 /* Update the instruction. */
19595 md_number_to_chars (buf
, insn
, INSN_SIZE
);
19599 case BFD_RELOC_ARM_LDC_PC_G0
:
19600 case BFD_RELOC_ARM_LDC_PC_G1
:
19601 case BFD_RELOC_ARM_LDC_PC_G2
:
19602 case BFD_RELOC_ARM_LDC_SB_G0
:
19603 case BFD_RELOC_ARM_LDC_SB_G1
:
19604 case BFD_RELOC_ARM_LDC_SB_G2
:
19605 assert (!fixP
->fx_done
);
19606 if (!seg
->use_rela_p
)
19609 bfd_vma addend_abs
= abs (value
);
19611 /* Check that the absolute value of the addend is a multiple of
19612 four and, when divided by four, fits in 8 bits. */
19613 if (addend_abs
& 0x3)
19614 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19615 _("bad offset 0x%08lX (must be word-aligned)"),
19616 (unsigned long) addend_abs
);
19618 if ((addend_abs
>> 2) > 0xff)
19619 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19620 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
19621 (unsigned long) addend_abs
);
19623 /* Extract the instruction. */
19624 insn
= md_chars_to_number (buf
, INSN_SIZE
);
19626 /* If the addend is negative, clear bit 23 of the instruction.
19627 Otherwise set it. */
19629 insn
&= ~(1 << 23);
19633 /* Place the addend (divided by four) into the first eight
19634 bits of the instruction. */
19635 insn
&= 0xfffffff0;
19636 insn
|= addend_abs
>> 2;
19638 /* Update the instruction. */
19639 md_number_to_chars (buf
, insn
, INSN_SIZE
);
19643 case BFD_RELOC_ARM_V4BX
:
19644 /* This will need to go in the object file. */
19648 case BFD_RELOC_UNUSED
:
19650 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19651 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
19655 /* Translate internal representation of relocation info to BFD target
19659 tc_gen_reloc (asection
*section
, fixS
*fixp
)
19662 bfd_reloc_code_real_type code
;
19664 reloc
= xmalloc (sizeof (arelent
));
19666 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
19667 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
19668 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
19670 if (fixp
->fx_pcrel
)
19672 if (section
->use_rela_p
)
19673 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
19675 fixp
->fx_offset
= reloc
->address
;
19677 reloc
->addend
= fixp
->fx_offset
;
19679 switch (fixp
->fx_r_type
)
19682 if (fixp
->fx_pcrel
)
19684 code
= BFD_RELOC_8_PCREL
;
19689 if (fixp
->fx_pcrel
)
19691 code
= BFD_RELOC_16_PCREL
;
19696 if (fixp
->fx_pcrel
)
19698 code
= BFD_RELOC_32_PCREL
;
19702 case BFD_RELOC_ARM_MOVW
:
19703 if (fixp
->fx_pcrel
)
19705 code
= BFD_RELOC_ARM_MOVW_PCREL
;
19709 case BFD_RELOC_ARM_MOVT
:
19710 if (fixp
->fx_pcrel
)
19712 code
= BFD_RELOC_ARM_MOVT_PCREL
;
19716 case BFD_RELOC_ARM_THUMB_MOVW
:
19717 if (fixp
->fx_pcrel
)
19719 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
19723 case BFD_RELOC_ARM_THUMB_MOVT
:
19724 if (fixp
->fx_pcrel
)
19726 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
19730 case BFD_RELOC_NONE
:
19731 case BFD_RELOC_ARM_PCREL_BRANCH
:
19732 case BFD_RELOC_ARM_PCREL_BLX
:
19733 case BFD_RELOC_RVA
:
19734 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
19735 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
19736 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
19737 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19738 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19739 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19740 case BFD_RELOC_THUMB_PCREL_BLX
:
19741 case BFD_RELOC_VTABLE_ENTRY
:
19742 case BFD_RELOC_VTABLE_INHERIT
:
19744 case BFD_RELOC_32_SECREL
:
19746 code
= fixp
->fx_r_type
;
19749 case BFD_RELOC_ARM_LITERAL
:
19750 case BFD_RELOC_ARM_HWLITERAL
:
19751 /* If this is called then the a literal has
19752 been referenced across a section boundary. */
19753 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19754 _("literal referenced across section boundary"));
19758 case BFD_RELOC_ARM_GOT32
:
19759 case BFD_RELOC_ARM_GOTOFF
:
19760 case BFD_RELOC_ARM_PLT32
:
19761 case BFD_RELOC_ARM_TARGET1
:
19762 case BFD_RELOC_ARM_ROSEGREL32
:
19763 case BFD_RELOC_ARM_SBREL32
:
19764 case BFD_RELOC_ARM_PREL31
:
19765 case BFD_RELOC_ARM_TARGET2
:
19766 case BFD_RELOC_ARM_TLS_LE32
:
19767 case BFD_RELOC_ARM_TLS_LDO32
:
19768 case BFD_RELOC_ARM_PCREL_CALL
:
19769 case BFD_RELOC_ARM_PCREL_JUMP
:
19770 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
19771 case BFD_RELOC_ARM_ALU_PC_G0
:
19772 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
19773 case BFD_RELOC_ARM_ALU_PC_G1
:
19774 case BFD_RELOC_ARM_ALU_PC_G2
:
19775 case BFD_RELOC_ARM_LDR_PC_G0
:
19776 case BFD_RELOC_ARM_LDR_PC_G1
:
19777 case BFD_RELOC_ARM_LDR_PC_G2
:
19778 case BFD_RELOC_ARM_LDRS_PC_G0
:
19779 case BFD_RELOC_ARM_LDRS_PC_G1
:
19780 case BFD_RELOC_ARM_LDRS_PC_G2
:
19781 case BFD_RELOC_ARM_LDC_PC_G0
:
19782 case BFD_RELOC_ARM_LDC_PC_G1
:
19783 case BFD_RELOC_ARM_LDC_PC_G2
:
19784 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
19785 case BFD_RELOC_ARM_ALU_SB_G0
:
19786 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
19787 case BFD_RELOC_ARM_ALU_SB_G1
:
19788 case BFD_RELOC_ARM_ALU_SB_G2
:
19789 case BFD_RELOC_ARM_LDR_SB_G0
:
19790 case BFD_RELOC_ARM_LDR_SB_G1
:
19791 case BFD_RELOC_ARM_LDR_SB_G2
:
19792 case BFD_RELOC_ARM_LDRS_SB_G0
:
19793 case BFD_RELOC_ARM_LDRS_SB_G1
:
19794 case BFD_RELOC_ARM_LDRS_SB_G2
:
19795 case BFD_RELOC_ARM_LDC_SB_G0
:
19796 case BFD_RELOC_ARM_LDC_SB_G1
:
19797 case BFD_RELOC_ARM_LDC_SB_G2
:
19798 case BFD_RELOC_ARM_V4BX
:
19799 code
= fixp
->fx_r_type
;
19802 case BFD_RELOC_ARM_TLS_GD32
:
19803 case BFD_RELOC_ARM_TLS_IE32
:
19804 case BFD_RELOC_ARM_TLS_LDM32
:
19805 /* BFD will include the symbol's address in the addend.
19806 But we don't want that, so subtract it out again here. */
19807 if (!S_IS_COMMON (fixp
->fx_addsy
))
19808 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
19809 code
= fixp
->fx_r_type
;
19813 case BFD_RELOC_ARM_IMMEDIATE
:
19814 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19815 _("internal relocation (type: IMMEDIATE) not fixed up"));
19818 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
19819 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19820 _("ADRL used for a symbol not defined in the same file"));
19823 case BFD_RELOC_ARM_OFFSET_IMM
:
19824 if (section
->use_rela_p
)
19826 code
= fixp
->fx_r_type
;
19830 if (fixp
->fx_addsy
!= NULL
19831 && !S_IS_DEFINED (fixp
->fx_addsy
)
19832 && S_IS_LOCAL (fixp
->fx_addsy
))
19834 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19835 _("undefined local label `%s'"),
19836 S_GET_NAME (fixp
->fx_addsy
));
19840 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19841 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
19848 switch (fixp
->fx_r_type
)
19850 case BFD_RELOC_NONE
: type
= "NONE"; break;
19851 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
19852 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
19853 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
19854 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
19855 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
19856 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
19857 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
19858 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
19859 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
19860 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
19861 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
19862 default: type
= _("<unknown>"); break;
19864 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19865 _("cannot represent %s relocation in this object file format"),
19872 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
19874 && fixp
->fx_addsy
== GOT_symbol
)
19876 code
= BFD_RELOC_ARM_GOTPC
;
19877 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
19881 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
19883 if (reloc
->howto
== NULL
)
19885 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
19886 _("cannot represent %s relocation in this object file format"),
19887 bfd_get_reloc_code_name (code
));
19891 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
19892 vtable entry to be used in the relocation's section offset. */
19893 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
19894 reloc
->address
= fixp
->fx_offset
;
19899 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
19902 cons_fix_new_arm (fragS
* frag
,
19907 bfd_reloc_code_real_type type
;
19911 FIXME: @@ Should look at CPU word size. */
19915 type
= BFD_RELOC_8
;
19918 type
= BFD_RELOC_16
;
19922 type
= BFD_RELOC_32
;
19925 type
= BFD_RELOC_64
;
19930 if (exp
->X_op
== O_secrel
)
19932 exp
->X_op
= O_symbol
;
19933 type
= BFD_RELOC_32_SECREL
;
19937 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
19940 #if defined (OBJ_COFF)
19942 arm_validate_fix (fixS
* fixP
)
19944 /* If the destination of the branch is a defined symbol which does not have
19945 the THUMB_FUNC attribute, then we must be calling a function which has
19946 the (interfacearm) attribute. We look for the Thumb entry point to that
19947 function and change the branch to refer to that function instead. */
19948 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
19949 && fixP
->fx_addsy
!= NULL
19950 && S_IS_DEFINED (fixP
->fx_addsy
)
19951 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
19953 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
19959 arm_force_relocation (struct fix
* fixp
)
19961 #if defined (OBJ_COFF) && defined (TE_PE)
19962 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
19966 /* Resolve these relocations even if the symbol is extern or weak. */
19967 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
19968 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
19969 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
19970 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
19971 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
19972 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
19973 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
19976 /* Always leave these relocations for the linker. */
19977 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
19978 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
19979 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
19982 /* Always generate relocations against function symbols. */
19983 if (fixp
->fx_r_type
== BFD_RELOC_32
19985 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
19988 return generic_force_reloc (fixp
);
19991 #if defined (OBJ_ELF) || defined (OBJ_COFF)
19992 /* Relocations against function names must be left unadjusted,
19993 so that the linker can use this information to generate interworking
19994 stubs. The MIPS version of this function
19995 also prevents relocations that are mips-16 specific, but I do not
19996 know why it does this.
19999 There is one other problem that ought to be addressed here, but
20000 which currently is not: Taking the address of a label (rather
20001 than a function) and then later jumping to that address. Such
20002 addresses also ought to have their bottom bit set (assuming that
20003 they reside in Thumb code), but at the moment they will not. */
20006 arm_fix_adjustable (fixS
* fixP
)
20008 if (fixP
->fx_addsy
== NULL
)
20011 /* Preserve relocations against symbols with function type. */
20012 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
20015 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
20016 && fixP
->fx_subsy
== NULL
)
20019 /* We need the symbol name for the VTABLE entries. */
20020 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
20021 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
20024 /* Don't allow symbols to be discarded on GOT related relocs. */
20025 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
20026 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
20027 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
20028 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
20029 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
20030 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
20031 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
20032 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
20033 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
20036 /* Similarly for group relocations. */
20037 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
20038 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
20039 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
20042 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
20043 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
20044 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
20045 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
20046 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
20047 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
20048 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
20049 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
20050 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
20055 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
20060 elf32_arm_target_format (void)
20063 return (target_big_endian
20064 ? "elf32-bigarm-symbian"
20065 : "elf32-littlearm-symbian");
20066 #elif defined (TE_VXWORKS)
20067 return (target_big_endian
20068 ? "elf32-bigarm-vxworks"
20069 : "elf32-littlearm-vxworks");
20071 if (target_big_endian
)
20072 return "elf32-bigarm";
20074 return "elf32-littlearm";
20079 armelf_frob_symbol (symbolS
* symp
,
20082 elf_frob_symbol (symp
, puntp
);
20086 /* MD interface: Finalization. */
20088 /* A good place to do this, although this was probably not intended
20089 for this kind of use. We need to dump the literal pool before
20090 references are made to a null symbol pointer. */
20095 literal_pool
* pool
;
20097 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
20099 /* Put it at the end of the relevant section. */
20100 subseg_set (pool
->section
, pool
->sub_section
);
20102 arm_elf_change_section ();
20108 /* Adjust the symbol table. This marks Thumb symbols as distinct from
20112 arm_adjust_symtab (void)
20117 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
20119 if (ARM_IS_THUMB (sym
))
20121 if (THUMB_IS_FUNC (sym
))
20123 /* Mark the symbol as a Thumb function. */
20124 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
20125 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
20126 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
20128 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
20129 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
20131 as_bad (_("%s: unexpected function type: %d"),
20132 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
20134 else switch (S_GET_STORAGE_CLASS (sym
))
20137 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
20140 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
20143 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
20151 if (ARM_IS_INTERWORK (sym
))
20152 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
20159 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
20161 if (ARM_IS_THUMB (sym
))
20163 elf_symbol_type
* elf_sym
;
20165 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
20166 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
20168 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
20169 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
20171 /* If it's a .thumb_func, declare it as so,
20172 otherwise tag label as .code 16. */
20173 if (THUMB_IS_FUNC (sym
))
20174 elf_sym
->internal_elf_sym
.st_info
=
20175 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
20176 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
20177 elf_sym
->internal_elf_sym
.st_info
=
20178 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
20185 /* MD interface: Initialization. */
20188 set_constant_flonums (void)
20192 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
20193 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
20197 /* Auto-select Thumb mode if it's the only available instruction set for the
20198 given architecture. */
20201 autoselect_thumb_from_cpu_variant (void)
20203 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
20204 opcode_select (16);
20213 if ( (arm_ops_hsh
= hash_new ()) == NULL
20214 || (arm_cond_hsh
= hash_new ()) == NULL
20215 || (arm_shift_hsh
= hash_new ()) == NULL
20216 || (arm_psr_hsh
= hash_new ()) == NULL
20217 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
20218 || (arm_reg_hsh
= hash_new ()) == NULL
20219 || (arm_reloc_hsh
= hash_new ()) == NULL
20220 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
20221 as_fatal (_("virtual memory exhausted"));
20223 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
20224 hash_insert (arm_ops_hsh
, insns
[i
].template, (void *) (insns
+ i
));
20225 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
20226 hash_insert (arm_cond_hsh
, conds
[i
].template, (void *) (conds
+ i
));
20227 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
20228 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
20229 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
20230 hash_insert (arm_psr_hsh
, psrs
[i
].template, (void *) (psrs
+ i
));
20231 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
20232 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template, (void *) (v7m_psrs
+ i
));
20233 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
20234 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
20236 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
20238 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template,
20239 (void *) (barrier_opt_names
+ i
));
20241 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
20242 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (void *) (reloc_names
+ i
));
20245 set_constant_flonums ();
20247 /* Set the cpu variant based on the command-line options. We prefer
20248 -mcpu= over -march= if both are set (as for GCC); and we prefer
20249 -mfpu= over any other way of setting the floating point unit.
20250 Use of legacy options with new options are faulted. */
20253 if (mcpu_cpu_opt
|| march_cpu_opt
)
20254 as_bad (_("use of old and new-style options to set CPU type"));
20256 mcpu_cpu_opt
= legacy_cpu
;
20258 else if (!mcpu_cpu_opt
)
20259 mcpu_cpu_opt
= march_cpu_opt
;
20264 as_bad (_("use of old and new-style options to set FPU type"));
20266 mfpu_opt
= legacy_fpu
;
20268 else if (!mfpu_opt
)
20270 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
20271 /* Some environments specify a default FPU. If they don't, infer it
20272 from the processor. */
20274 mfpu_opt
= mcpu_fpu_opt
;
20276 mfpu_opt
= march_fpu_opt
;
20278 mfpu_opt
= &fpu_default
;
20284 if (mcpu_cpu_opt
!= NULL
)
20285 mfpu_opt
= &fpu_default
;
20286 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
20287 mfpu_opt
= &fpu_arch_vfp_v2
;
20289 mfpu_opt
= &fpu_arch_fpa
;
20295 mcpu_cpu_opt
= &cpu_default
;
20296 selected_cpu
= cpu_default
;
20300 selected_cpu
= *mcpu_cpu_opt
;
20302 mcpu_cpu_opt
= &arm_arch_any
;
20305 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
20307 autoselect_thumb_from_cpu_variant ();
20309 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
20311 #if defined OBJ_COFF || defined OBJ_ELF
20313 unsigned int flags
= 0;
20315 #if defined OBJ_ELF
20316 flags
= meabi_flags
;
20318 switch (meabi_flags
)
20320 case EF_ARM_EABI_UNKNOWN
:
20322 /* Set the flags in the private structure. */
20323 if (uses_apcs_26
) flags
|= F_APCS26
;
20324 if (support_interwork
) flags
|= F_INTERWORK
;
20325 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
20326 if (pic_code
) flags
|= F_PIC
;
20327 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
20328 flags
|= F_SOFT_FLOAT
;
20330 switch (mfloat_abi_opt
)
20332 case ARM_FLOAT_ABI_SOFT
:
20333 case ARM_FLOAT_ABI_SOFTFP
:
20334 flags
|= F_SOFT_FLOAT
;
20337 case ARM_FLOAT_ABI_HARD
:
20338 if (flags
& F_SOFT_FLOAT
)
20339 as_bad (_("hard-float conflicts with specified fpu"));
20343 /* Using pure-endian doubles (even if soft-float). */
20344 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
20345 flags
|= F_VFP_FLOAT
;
20347 #if defined OBJ_ELF
20348 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
20349 flags
|= EF_ARM_MAVERICK_FLOAT
;
20352 case EF_ARM_EABI_VER4
:
20353 case EF_ARM_EABI_VER5
:
20354 /* No additional flags to set. */
20361 bfd_set_private_flags (stdoutput
, flags
);
20363 /* We have run out flags in the COFF header to encode the
20364 status of ATPCS support, so instead we create a dummy,
20365 empty, debug section called .arm.atpcs. */
20370 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
20374 bfd_set_section_flags
20375 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
20376 bfd_set_section_size (stdoutput
, sec
, 0);
20377 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
20383 /* Record the CPU type as well. */
20384 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
20385 mach
= bfd_mach_arm_iWMMXt2
;
20386 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
20387 mach
= bfd_mach_arm_iWMMXt
;
20388 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
20389 mach
= bfd_mach_arm_XScale
;
20390 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
20391 mach
= bfd_mach_arm_ep9312
;
20392 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
20393 mach
= bfd_mach_arm_5TE
;
20394 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
20396 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
20397 mach
= bfd_mach_arm_5T
;
20399 mach
= bfd_mach_arm_5
;
20401 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
20403 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
20404 mach
= bfd_mach_arm_4T
;
20406 mach
= bfd_mach_arm_4
;
20408 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
20409 mach
= bfd_mach_arm_3M
;
20410 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
20411 mach
= bfd_mach_arm_3
;
20412 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
20413 mach
= bfd_mach_arm_2a
;
20414 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
20415 mach
= bfd_mach_arm_2
;
20417 mach
= bfd_mach_arm_unknown
;
20419 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
20422 /* Command line processing. */
20425 Invocation line includes a switch not recognized by the base assembler.
20426 See if it's a processor-specific option.
20428 This routine is somewhat complicated by the need for backwards
20429 compatibility (since older releases of gcc can't be changed).
20430 The new options try to make the interface as compatible as
20433 New options (supported) are:
20435 -mcpu=<cpu name> Assemble for selected processor
20436 -march=<architecture name> Assemble for selected architecture
20437 -mfpu=<fpu architecture> Assemble for selected FPU.
20438 -EB/-mbig-endian Big-endian
20439 -EL/-mlittle-endian Little-endian
20440 -k Generate PIC code
20441 -mthumb Start in Thumb mode
20442 -mthumb-interwork Code supports ARM/Thumb interworking
20444 -m[no-]warn-deprecated Warn about deprecated features
20446 For now we will also provide support for:
20448 -mapcs-32 32-bit Program counter
20449 -mapcs-26 26-bit Program counter
20450 -macps-float Floats passed in FP registers
20451 -mapcs-reentrant Reentrant code
20453 (sometime these will probably be replaced with -mapcs=<list of options>
20454 and -matpcs=<list of options>)
20456 The remaining options are only supported for back-wards compatibility.
20457 Cpu variants, the arm part is optional:
20458 -m[arm]1 Currently not supported.
20459 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
20460 -m[arm]3 Arm 3 processor
20461 -m[arm]6[xx], Arm 6 processors
20462 -m[arm]7[xx][t][[d]m] Arm 7 processors
20463 -m[arm]8[10] Arm 8 processors
20464 -m[arm]9[20][tdmi] Arm 9 processors
20465 -mstrongarm[110[0]] StrongARM processors
20466 -mxscale XScale processors
20467 -m[arm]v[2345[t[e]]] Arm architectures
20468 -mall All (except the ARM1)
20470 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
20471 -mfpe-old (No float load/store multiples)
20472 -mvfpxd VFP Single precision
20474 -mno-fpu Disable all floating point instructions
20476 The following CPU names are recognized:
20477 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
20478 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
20479 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
20480 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
20481 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
20482 arm10t arm10e, arm1020t, arm1020e, arm10200e,
20483 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
20487 const char * md_shortopts
= "m:k";
20489 #ifdef ARM_BI_ENDIAN
20490 #define OPTION_EB (OPTION_MD_BASE + 0)
20491 #define OPTION_EL (OPTION_MD_BASE + 1)
20493 #if TARGET_BYTES_BIG_ENDIAN
20494 #define OPTION_EB (OPTION_MD_BASE + 0)
20496 #define OPTION_EL (OPTION_MD_BASE + 1)
20499 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
20501 struct option md_longopts
[] =
20504 {"EB", no_argument
, NULL
, OPTION_EB
},
20507 {"EL", no_argument
, NULL
, OPTION_EL
},
20509 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
20510 {NULL
, no_argument
, NULL
, 0}
20513 size_t md_longopts_size
= sizeof (md_longopts
);
20515 struct arm_option_table
20517 char *option
; /* Option name to match. */
20518 char *help
; /* Help information. */
20519 int *var
; /* Variable to change. */
20520 int value
; /* What to change it to. */
20521 char *deprecated
; /* If non-null, print this message. */
20524 struct arm_option_table arm_opts
[] =
20526 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
20527 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
20528 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
20529 &support_interwork
, 1, NULL
},
20530 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
20531 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
20532 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
20534 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
20535 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
20536 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
20537 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
20540 /* These are recognized by the assembler, but have no affect on code. */
20541 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
20542 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
20544 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
20545 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
20546 &warn_on_deprecated
, 0, NULL
},
20547 {NULL
, NULL
, NULL
, 0, NULL
}
20550 struct arm_legacy_option_table
20552 char *option
; /* Option name to match. */
20553 const arm_feature_set
**var
; /* Variable to change. */
20554 const arm_feature_set value
; /* What to change it to. */
20555 char *deprecated
; /* If non-null, print this message. */
20558 const struct arm_legacy_option_table arm_legacy_opts
[] =
20560 /* DON'T add any new processors to this list -- we want the whole list
20561 to go away... Add them to the processors table instead. */
20562 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
20563 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
20564 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
20565 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
20566 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
20567 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
20568 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
20569 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
20570 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
20571 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
20572 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
20573 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
20574 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
20575 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
20576 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
20577 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
20578 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
20579 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
20580 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
20581 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
20582 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
20583 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
20584 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
20585 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
20586 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
20587 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
20588 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
20589 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
20590 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
20591 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
20592 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
20593 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
20594 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
20595 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
20596 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
20597 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
20598 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
20599 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
20600 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
20601 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
20602 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
20603 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
20604 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
20605 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
20606 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
20607 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
20608 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
20609 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
20610 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
20611 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
20612 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
20613 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
20614 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
20615 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
20616 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
20617 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
20618 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
20619 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
20620 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
20621 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
20622 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
20623 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
20624 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
20625 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
20626 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
20627 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
20628 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
20629 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
20630 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
20631 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
20632 N_("use -mcpu=strongarm110")},
20633 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
20634 N_("use -mcpu=strongarm1100")},
20635 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
20636 N_("use -mcpu=strongarm1110")},
20637 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
20638 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
20639 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
20641 /* Architecture variants -- don't add any more to this list either. */
20642 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
20643 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
20644 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
20645 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
20646 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
20647 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
20648 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
20649 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
20650 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
20651 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
20652 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
20653 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
20654 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
20655 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
20656 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
20657 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
20658 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
20659 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
20661 /* Floating point variants -- don't add any more to this list either. */
20662 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
20663 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
20664 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
20665 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
20666 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
20668 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
20671 struct arm_cpu_option_table
20674 const arm_feature_set value
;
20675 /* For some CPUs we assume an FPU unless the user explicitly sets
20677 const arm_feature_set default_fpu
;
20678 /* The canonical name of the CPU, or NULL to use NAME converted to upper
20680 const char *canonical_name
;
20683 /* This list should, at a minimum, contain all the cpu names
20684 recognized by GCC. */
20685 static const struct arm_cpu_option_table arm_cpus
[] =
20687 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
20688 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
20689 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
20690 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
20691 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
20692 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20693 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20694 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20695 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20696 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20697 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20698 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20699 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20700 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20701 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20702 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
20703 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20704 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20705 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20706 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20707 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20708 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20709 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20710 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20711 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20712 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20713 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20714 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
20715 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20716 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20717 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20718 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20719 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20720 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20721 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20722 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20723 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20724 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20725 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20726 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
20727 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20728 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20729 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20730 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
20731 {"fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20732 {"fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
20733 /* For V5 or later processors we default to using VFP; but the user
20734 should really set the FPU type explicitly. */
20735 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20736 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20737 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
20738 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
20739 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
20740 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20741 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
20742 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20743 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
20744 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
20745 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20746 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20747 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20748 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20749 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20750 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
20751 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
20752 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20753 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20754 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
20755 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
20756 {"fa626te", ARM_ARCH_V5TE
, FPU_NONE
, NULL
},
20757 {"fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
20758 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
20759 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
20760 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
20761 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
20762 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
20763 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
20764 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
20765 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
20766 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
20767 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
20768 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
20769 | FPU_NEON_EXT_V1
),
20771 {"cortex-a9", ARM_ARCH_V7A
, ARM_FEATURE(0, FPU_VFP_V3
20772 | FPU_NEON_EXT_V1
),
20774 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
20775 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
20776 {"cortex-m1", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
20777 {"cortex-m0", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
20778 /* ??? XSCALE is really an architecture. */
20779 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
20780 /* ??? iwmmxt is not a processor. */
20781 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
20782 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
20783 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
20785 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
20786 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
20789 struct arm_arch_option_table
20792 const arm_feature_set value
;
20793 const arm_feature_set default_fpu
;
20796 /* This list should, at a minimum, contain all the architecture names
20797 recognized by GCC. */
20798 static const struct arm_arch_option_table arm_archs
[] =
20800 {"all", ARM_ANY
, FPU_ARCH_FPA
},
20801 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
20802 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
20803 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
20804 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
20805 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
20806 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
20807 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
20808 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
20809 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
20810 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
20811 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
20812 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
20813 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
20814 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
20815 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
20816 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
20817 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
20818 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
20819 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
20820 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
20821 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
20822 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
20823 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
20824 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
20825 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
20826 {"armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
},
20827 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
20828 /* The official spelling of the ARMv7 profile variants is the dashed form.
20829 Accept the non-dashed form for compatibility with old toolchains. */
20830 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
20831 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
20832 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
20833 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
20834 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
20835 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
20836 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
20837 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
20838 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
20839 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
20842 /* ISA extensions in the co-processor space. */
20843 struct arm_option_cpu_value_table
20846 const arm_feature_set value
;
20849 static const struct arm_option_cpu_value_table arm_extensions
[] =
20851 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
20852 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
20853 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
20854 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
20855 {NULL
, ARM_ARCH_NONE
}
20858 /* This list should, at a minimum, contain all the fpu names
20859 recognized by GCC. */
20860 static const struct arm_option_cpu_value_table arm_fpus
[] =
20862 {"softfpa", FPU_NONE
},
20863 {"fpe", FPU_ARCH_FPE
},
20864 {"fpe2", FPU_ARCH_FPE
},
20865 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
20866 {"fpa", FPU_ARCH_FPA
},
20867 {"fpa10", FPU_ARCH_FPA
},
20868 {"fpa11", FPU_ARCH_FPA
},
20869 {"arm7500fe", FPU_ARCH_FPA
},
20870 {"softvfp", FPU_ARCH_VFP
},
20871 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
20872 {"vfp", FPU_ARCH_VFP_V2
},
20873 {"vfp9", FPU_ARCH_VFP_V2
},
20874 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
20875 {"vfp10", FPU_ARCH_VFP_V2
},
20876 {"vfp10-r0", FPU_ARCH_VFP_V1
},
20877 {"vfpxd", FPU_ARCH_VFP_V1xD
},
20878 {"vfpv2", FPU_ARCH_VFP_V2
},
20879 {"vfpv3", FPU_ARCH_VFP_V3
},
20880 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
20881 {"arm1020t", FPU_ARCH_VFP_V1
},
20882 {"arm1020e", FPU_ARCH_VFP_V2
},
20883 {"arm1136jfs", FPU_ARCH_VFP_V2
},
20884 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
20885 {"maverick", FPU_ARCH_MAVERICK
},
20886 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
20887 {"neon-fp16", FPU_ARCH_NEON_FP16
},
20888 {NULL
, ARM_ARCH_NONE
}
20891 struct arm_option_value_table
20897 static const struct arm_option_value_table arm_float_abis
[] =
20899 {"hard", ARM_FLOAT_ABI_HARD
},
20900 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
20901 {"soft", ARM_FLOAT_ABI_SOFT
},
20906 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
20907 static const struct arm_option_value_table arm_eabis
[] =
20909 {"gnu", EF_ARM_EABI_UNKNOWN
},
20910 {"4", EF_ARM_EABI_VER4
},
20911 {"5", EF_ARM_EABI_VER5
},
20916 struct arm_long_option_table
20918 char * option
; /* Substring to match. */
20919 char * help
; /* Help information. */
20920 int (* func
) (char * subopt
); /* Function to decode sub-option. */
20921 char * deprecated
; /* If non-null, print this message. */
20925 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
20927 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
20929 /* Copy the feature set, so that we can modify it. */
20930 *ext_set
= **opt_p
;
20933 while (str
!= NULL
&& *str
!= 0)
20935 const struct arm_option_cpu_value_table
* opt
;
20941 as_bad (_("invalid architectural extension"));
20946 ext
= strchr (str
, '+');
20949 optlen
= ext
- str
;
20951 optlen
= strlen (str
);
20955 as_bad (_("missing architectural extension"));
20959 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
20960 if (strncmp (opt
->name
, str
, optlen
) == 0)
20962 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
20966 if (opt
->name
== NULL
)
20968 as_bad (_("unknown architectural extension `%s'"), str
);
20979 arm_parse_cpu (char * str
)
20981 const struct arm_cpu_option_table
* opt
;
20982 char * ext
= strchr (str
, '+');
20986 optlen
= ext
- str
;
20988 optlen
= strlen (str
);
20992 as_bad (_("missing cpu name `%s'"), str
);
20996 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
20997 if (strncmp (opt
->name
, str
, optlen
) == 0)
20999 mcpu_cpu_opt
= &opt
->value
;
21000 mcpu_fpu_opt
= &opt
->default_fpu
;
21001 if (opt
->canonical_name
)
21002 strcpy (selected_cpu_name
, opt
->canonical_name
);
21006 for (i
= 0; i
< optlen
; i
++)
21007 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
21008 selected_cpu_name
[i
] = 0;
21012 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
21017 as_bad (_("unknown cpu `%s'"), str
);
21022 arm_parse_arch (char * str
)
21024 const struct arm_arch_option_table
*opt
;
21025 char *ext
= strchr (str
, '+');
21029 optlen
= ext
- str
;
21031 optlen
= strlen (str
);
21035 as_bad (_("missing architecture name `%s'"), str
);
21039 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
21040 if (streq (opt
->name
, str
))
21042 march_cpu_opt
= &opt
->value
;
21043 march_fpu_opt
= &opt
->default_fpu
;
21044 strcpy (selected_cpu_name
, opt
->name
);
21047 return arm_parse_extension (ext
, &march_cpu_opt
);
21052 as_bad (_("unknown architecture `%s'\n"), str
);
21057 arm_parse_fpu (char * str
)
21059 const struct arm_option_cpu_value_table
* opt
;
21061 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
21062 if (streq (opt
->name
, str
))
21064 mfpu_opt
= &opt
->value
;
21068 as_bad (_("unknown floating point format `%s'\n"), str
);
21073 arm_parse_float_abi (char * str
)
21075 const struct arm_option_value_table
* opt
;
21077 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
21078 if (streq (opt
->name
, str
))
21080 mfloat_abi_opt
= opt
->value
;
21084 as_bad (_("unknown floating point abi `%s'\n"), str
);
21090 arm_parse_eabi (char * str
)
21092 const struct arm_option_value_table
*opt
;
21094 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
21095 if (streq (opt
->name
, str
))
21097 meabi_flags
= opt
->value
;
21100 as_bad (_("unknown EABI `%s'\n"), str
);
21105 struct arm_long_option_table arm_long_opts
[] =
21107 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
21108 arm_parse_cpu
, NULL
},
21109 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
21110 arm_parse_arch
, NULL
},
21111 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
21112 arm_parse_fpu
, NULL
},
21113 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
21114 arm_parse_float_abi
, NULL
},
21116 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
21117 arm_parse_eabi
, NULL
},
21119 {NULL
, NULL
, 0, NULL
}
21123 md_parse_option (int c
, char * arg
)
21125 struct arm_option_table
*opt
;
21126 const struct arm_legacy_option_table
*fopt
;
21127 struct arm_long_option_table
*lopt
;
21133 target_big_endian
= 1;
21139 target_big_endian
= 0;
21143 case OPTION_FIX_V4BX
:
21148 /* Listing option. Just ignore these, we don't support additional
21153 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
21155 if (c
== opt
->option
[0]
21156 && ((arg
== NULL
&& opt
->option
[1] == 0)
21157 || streq (arg
, opt
->option
+ 1)))
21159 /* If the option is deprecated, tell the user. */
21160 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
21161 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
21162 arg
? arg
: "", _(opt
->deprecated
));
21164 if (opt
->var
!= NULL
)
21165 *opt
->var
= opt
->value
;
21171 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
21173 if (c
== fopt
->option
[0]
21174 && ((arg
== NULL
&& fopt
->option
[1] == 0)
21175 || streq (arg
, fopt
->option
+ 1)))
21177 /* If the option is deprecated, tell the user. */
21178 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
21179 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
21180 arg
? arg
: "", _(fopt
->deprecated
));
21182 if (fopt
->var
!= NULL
)
21183 *fopt
->var
= &fopt
->value
;
21189 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
21191 /* These options are expected to have an argument. */
21192 if (c
== lopt
->option
[0]
21194 && strncmp (arg
, lopt
->option
+ 1,
21195 strlen (lopt
->option
+ 1)) == 0)
21197 /* If the option is deprecated, tell the user. */
21198 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
21199 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
21200 _(lopt
->deprecated
));
21202 /* Call the sup-option parser. */
21203 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
21214 md_show_usage (FILE * fp
)
21216 struct arm_option_table
*opt
;
21217 struct arm_long_option_table
*lopt
;
21219 fprintf (fp
, _(" ARM-specific assembler options:\n"));
21221 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
21222 if (opt
->help
!= NULL
)
21223 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
21225 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
21226 if (lopt
->help
!= NULL
)
21227 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
21231 -EB assemble code for a big-endian cpu\n"));
21236 -EL assemble code for a little-endian cpu\n"));
21240 --fix-v4bx Allow BX in ARMv4 code\n"));
21248 arm_feature_set flags
;
21249 } cpu_arch_ver_table
;
21251 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
21252 least features first. */
21253 static const cpu_arch_ver_table cpu_arch_ver
[] =
21259 {4, ARM_ARCH_V5TE
},
21260 {5, ARM_ARCH_V5TEJ
},
21264 {11, ARM_ARCH_V6M
},
21265 {8, ARM_ARCH_V6T2
},
21266 {10, ARM_ARCH_V7A
},
21267 {10, ARM_ARCH_V7R
},
21268 {10, ARM_ARCH_V7M
},
21272 /* Set an attribute if it has not already been set by the user. */
21274 aeabi_set_attribute_int (int tag
, int value
)
21277 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
21278 || !attributes_set_explicitly
[tag
])
21279 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
21283 aeabi_set_attribute_string (int tag
, const char *value
)
21286 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
21287 || !attributes_set_explicitly
[tag
])
21288 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
21291 /* Set the public EABI object attributes. */
21293 aeabi_set_public_attributes (void)
21296 arm_feature_set flags
;
21297 arm_feature_set tmp
;
21298 const cpu_arch_ver_table
*p
;
21300 /* Choose the architecture based on the capabilities of the requested cpu
21301 (if any) and/or the instructions actually used. */
21302 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
21303 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
21304 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
21305 /*Allow the user to override the reported architecture. */
21308 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
21309 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
21314 for (p
= cpu_arch_ver
; p
->val
; p
++)
21316 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
21319 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
21323 /* Tag_CPU_name. */
21324 if (selected_cpu_name
[0])
21328 p
= selected_cpu_name
;
21329 if (strncmp (p
, "armv", 4) == 0)
21334 for (i
= 0; p
[i
]; i
++)
21335 p
[i
] = TOUPPER (p
[i
]);
21337 aeabi_set_attribute_string (Tag_CPU_name
, p
);
21339 /* Tag_CPU_arch. */
21340 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
21341 /* Tag_CPU_arch_profile. */
21342 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
21343 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'A');
21344 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
21345 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'R');
21346 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
21347 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'M');
21348 /* Tag_ARM_ISA_use. */
21349 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
21351 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
21352 /* Tag_THUMB_ISA_use. */
21353 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
21355 aeabi_set_attribute_int (Tag_THUMB_ISA_use
,
21356 ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
) ? 2 : 1);
21357 /* Tag_VFP_arch. */
21358 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
21359 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
21360 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3
))
21361 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
21362 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
21363 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
21364 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
21365 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
21366 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
21367 /* Tag_WMMX_arch. */
21368 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
21369 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
21370 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
21371 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
21372 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
21373 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
21374 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
21375 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
21376 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_fp16
))
21377 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
21380 /* Add the default contents for the .ARM.attributes section. */
21384 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
21387 aeabi_set_public_attributes ();
21389 #endif /* OBJ_ELF */
21392 /* Parse a .cpu directive. */
21395 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
21397 const struct arm_cpu_option_table
*opt
;
21401 name
= input_line_pointer
;
21402 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
21403 input_line_pointer
++;
21404 saved_char
= *input_line_pointer
;
21405 *input_line_pointer
= 0;
21407 /* Skip the first "all" entry. */
21408 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
21409 if (streq (opt
->name
, name
))
21411 mcpu_cpu_opt
= &opt
->value
;
21412 selected_cpu
= opt
->value
;
21413 if (opt
->canonical_name
)
21414 strcpy (selected_cpu_name
, opt
->canonical_name
);
21418 for (i
= 0; opt
->name
[i
]; i
++)
21419 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
21420 selected_cpu_name
[i
] = 0;
21422 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
21423 *input_line_pointer
= saved_char
;
21424 demand_empty_rest_of_line ();
21427 as_bad (_("unknown cpu `%s'"), name
);
21428 *input_line_pointer
= saved_char
;
21429 ignore_rest_of_line ();
21433 /* Parse a .arch directive. */
21436 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
21438 const struct arm_arch_option_table
*opt
;
21442 name
= input_line_pointer
;
21443 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
21444 input_line_pointer
++;
21445 saved_char
= *input_line_pointer
;
21446 *input_line_pointer
= 0;
21448 /* Skip the first "all" entry. */
21449 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
21450 if (streq (opt
->name
, name
))
21452 mcpu_cpu_opt
= &opt
->value
;
21453 selected_cpu
= opt
->value
;
21454 strcpy (selected_cpu_name
, opt
->name
);
21455 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
21456 *input_line_pointer
= saved_char
;
21457 demand_empty_rest_of_line ();
21461 as_bad (_("unknown architecture `%s'\n"), name
);
21462 *input_line_pointer
= saved_char
;
21463 ignore_rest_of_line ();
21467 /* Parse a .object_arch directive. */
21470 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
21472 const struct arm_arch_option_table
*opt
;
21476 name
= input_line_pointer
;
21477 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
21478 input_line_pointer
++;
21479 saved_char
= *input_line_pointer
;
21480 *input_line_pointer
= 0;
21482 /* Skip the first "all" entry. */
21483 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
21484 if (streq (opt
->name
, name
))
21486 object_arch
= &opt
->value
;
21487 *input_line_pointer
= saved_char
;
21488 demand_empty_rest_of_line ();
21492 as_bad (_("unknown architecture `%s'\n"), name
);
21493 *input_line_pointer
= saved_char
;
21494 ignore_rest_of_line ();
21497 /* Parse a .fpu directive. */
21500 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
21502 const struct arm_option_cpu_value_table
*opt
;
21506 name
= input_line_pointer
;
21507 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
21508 input_line_pointer
++;
21509 saved_char
= *input_line_pointer
;
21510 *input_line_pointer
= 0;
21512 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
21513 if (streq (opt
->name
, name
))
21515 mfpu_opt
= &opt
->value
;
21516 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
21517 *input_line_pointer
= saved_char
;
21518 demand_empty_rest_of_line ();
21522 as_bad (_("unknown floating point format `%s'\n"), name
);
21523 *input_line_pointer
= saved_char
;
21524 ignore_rest_of_line ();
21527 /* Copy symbol information. */
21530 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
21532 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
21536 /* Given a symbolic attribute NAME, return the proper integer value.
21537 Returns -1 if the attribute is not known. */
21540 arm_convert_symbolic_attribute (const char *name
)
21542 static const struct
21547 attribute_table
[] =
21549 /* When you modify this table you should
21550 also modify the list in doc/c-arm.texi. */
21551 #define T(tag) {#tag, tag}
21552 T (Tag_CPU_raw_name
),
21555 T (Tag_CPU_arch_profile
),
21556 T (Tag_ARM_ISA_use
),
21557 T (Tag_THUMB_ISA_use
),
21560 T (Tag_Advanced_SIMD_arch
),
21561 T (Tag_PCS_config
),
21562 T (Tag_ABI_PCS_R9_use
),
21563 T (Tag_ABI_PCS_RW_data
),
21564 T (Tag_ABI_PCS_RO_data
),
21565 T (Tag_ABI_PCS_GOT_use
),
21566 T (Tag_ABI_PCS_wchar_t
),
21567 T (Tag_ABI_FP_rounding
),
21568 T (Tag_ABI_FP_denormal
),
21569 T (Tag_ABI_FP_exceptions
),
21570 T (Tag_ABI_FP_user_exceptions
),
21571 T (Tag_ABI_FP_number_model
),
21572 T (Tag_ABI_align8_needed
),
21573 T (Tag_ABI_align8_preserved
),
21574 T (Tag_ABI_enum_size
),
21575 T (Tag_ABI_HardFP_use
),
21576 T (Tag_ABI_VFP_args
),
21577 T (Tag_ABI_WMMX_args
),
21578 T (Tag_ABI_optimization_goals
),
21579 T (Tag_ABI_FP_optimization_goals
),
21580 T (Tag_compatibility
),
21581 T (Tag_CPU_unaligned_access
),
21582 T (Tag_VFP_HP_extension
),
21583 T (Tag_ABI_FP_16bit_format
),
21584 T (Tag_nodefaults
),
21585 T (Tag_also_compatible_with
),
21586 T (Tag_conformance
),
21588 T (Tag_Virtualization_use
),
21589 T (Tag_MPextension_use
)
21597 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
21598 if (strcmp (name
, attribute_table
[i
].name
) == 0)
21599 return attribute_table
[i
].tag
;
21603 #endif /* OBJ_ELF */