1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
54 symbolS
* table_entry
;
55 symbolS
* personality_routine
;
56 int personality_index
;
57 /* The segment containing the function. */
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes
;
64 /* The number of bytes pushed to the stack. */
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset
;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
74 /* Nonzero if an unwind_setfp directive has been seen. */
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored
:1;
82 /* Results from operand parsing worker functions. */
86 PARSE_OPERAND_SUCCESS
,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result
;
98 /* Types of processor to assemble for. */
100 #if defined __XSCALE__
101 #define CPU_DEFAULT ARM_ARCH_XSCALE
103 #if defined __thumb__
104 #define CPU_DEFAULT ARM_ARCH_V5T
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 static arm_feature_set cpu_variant
;
130 static arm_feature_set arm_arch_used
;
131 static arm_feature_set thumb_arch_used
;
133 /* Flags stored in private area of BFD structure. */
134 static int uses_apcs_26
= FALSE
;
135 static int atpcs
= FALSE
;
136 static int support_interwork
= FALSE
;
137 static int uses_apcs_float
= FALSE
;
138 static int pic_code
= FALSE
;
139 static int fix_v4bx
= FALSE
;
140 /* Warn on using deprecated features. */
141 static int warn_on_deprecated
= TRUE
;
144 /* Variables that we set while parsing command-line options. Once all
145 options have been read we re-process these values to set the real
147 static const arm_feature_set
*legacy_cpu
= NULL
;
148 static const arm_feature_set
*legacy_fpu
= NULL
;
150 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
151 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
152 static const arm_feature_set
*march_cpu_opt
= NULL
;
153 static const arm_feature_set
*march_fpu_opt
= NULL
;
154 static const arm_feature_set
*mfpu_opt
= NULL
;
155 static const arm_feature_set
*object_arch
= NULL
;
157 /* Constants for known architecture features. */
158 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
159 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
160 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
161 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
162 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
163 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
164 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
165 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
166 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
169 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
172 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
173 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
174 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
175 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
176 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
177 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
178 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
179 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
180 static const arm_feature_set arm_ext_v4t_5
=
181 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
182 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
183 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
184 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
185 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
186 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
187 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
188 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
189 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
190 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
191 static const arm_feature_set arm_ext_barrier
= ARM_FEATURE (ARM_EXT_BARRIER
, 0);
192 static const arm_feature_set arm_ext_msr
= ARM_FEATURE (ARM_EXT_THUMB_MSR
, 0);
193 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
194 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
195 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
196 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
197 static const arm_feature_set arm_ext_m
=
198 ARM_FEATURE (ARM_EXT_V6M
| ARM_EXT_V7M
, 0);
200 static const arm_feature_set arm_arch_any
= ARM_ANY
;
201 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
202 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
203 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
205 static const arm_feature_set arm_cext_iwmmxt2
=
206 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
207 static const arm_feature_set arm_cext_iwmmxt
=
208 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
209 static const arm_feature_set arm_cext_xscale
=
210 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
211 static const arm_feature_set arm_cext_maverick
=
212 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
213 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
214 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
215 static const arm_feature_set fpu_vfp_ext_v1xd
=
216 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
217 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
218 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
219 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
220 static const arm_feature_set fpu_vfp_ext_d32
=
221 ARM_FEATURE (0, FPU_VFP_EXT_D32
);
222 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
223 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
224 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
225 static const arm_feature_set fpu_neon_fp16
= ARM_FEATURE (0, FPU_NEON_FP16
);
227 static int mfloat_abi_opt
= -1;
228 /* Record user cpu selection for object attributes. */
229 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
230 /* Must be long enough to hold any of the names in arm_cpus. */
231 static char selected_cpu_name
[16];
234 static int meabi_flags
= EABI_DEFAULT
;
236 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
239 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
244 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
249 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
250 symbolS
* GOT_symbol
;
253 /* 0: assemble for ARM,
254 1: assemble for Thumb,
255 2: assemble for Thumb even though target CPU does not support thumb
257 static int thumb_mode
= 0;
258 /* A value distinct from the possible values for thumb_mode that we
259 can use to record whether thumb_mode has been copied into the
260 tc_frag_data field of a frag. */
261 #define MODE_RECORDED (1 << 4)
263 /* Specifies the intrinsic IT insn behavior mode. */
264 enum implicit_it_mode
266 IMPLICIT_IT_MODE_NEVER
= 0x00,
267 IMPLICIT_IT_MODE_ARM
= 0x01,
268 IMPLICIT_IT_MODE_THUMB
= 0x02,
269 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
271 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
273 /* If unified_syntax is true, we are processing the new unified
274 ARM/Thumb syntax. Important differences from the old ARM mode:
276 - Immediate operands do not require a # prefix.
277 - Conditional affixes always appear at the end of the
278 instruction. (For backward compatibility, those instructions
279 that formerly had them in the middle, continue to accept them
281 - The IT instruction may appear, and if it does is validated
282 against subsequent conditional affixes. It does not generate
285 Important differences from the old Thumb mode:
287 - Immediate operands do not require a # prefix.
288 - Most of the V6T2 instructions are only available in unified mode.
289 - The .N and .W suffixes are recognized and honored (it is an error
290 if they cannot be honored).
291 - All instructions set the flags if and only if they have an 's' affix.
292 - Conditional affixes may be used. They are validated against
293 preceding IT instructions. Unlike ARM mode, you cannot use a
294 conditional affix except in the scope of an IT instruction. */
296 static bfd_boolean unified_syntax
= FALSE
;
311 enum neon_el_type type
;
315 #define NEON_MAX_TYPE_ELS 4
319 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
323 enum it_instruction_type
328 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
329 if inside, should be the last one. */
330 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
331 i.e. BKPT and NOP. */
332 IT_INSN
/* The IT insn has been parsed. */
338 unsigned long instruction
;
342 /* "uncond_value" is set to the value in place of the conditional field in
343 unconditional versions of the instruction, or -1 if nothing is
346 struct neon_type vectype
;
347 /* Set to the opcode if the instruction needs relaxation.
348 Zero if the instruction is not relaxed. */
352 bfd_reloc_code_real_type type
;
357 enum it_instruction_type it_insn_type
;
363 struct neon_type_el vectype
;
364 unsigned present
: 1; /* Operand present. */
365 unsigned isreg
: 1; /* Operand was a register. */
366 unsigned immisreg
: 1; /* .imm field is a second register. */
367 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
368 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
369 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
370 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
371 instructions. This allows us to disambiguate ARM <-> vector insns. */
372 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
373 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
374 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
375 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
376 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
377 unsigned writeback
: 1; /* Operand has trailing ! */
378 unsigned preind
: 1; /* Preindexed address. */
379 unsigned postind
: 1; /* Postindexed address. */
380 unsigned negative
: 1; /* Index register was negated. */
381 unsigned shifted
: 1; /* Shift applied to operation. */
382 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
386 static struct arm_it inst
;
388 #define NUM_FLOAT_VALS 8
390 const char * fp_const
[] =
392 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
395 /* Number of littlenums required to hold an extended precision number. */
396 #define MAX_LITTLENUMS 6
398 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
408 #define CP_T_X 0x00008000
409 #define CP_T_Y 0x00400000
411 #define CONDS_BIT 0x00100000
412 #define LOAD_BIT 0x00100000
414 #define DOUBLE_LOAD_FLAG 0x00000001
418 const char * template_name
;
422 #define COND_ALWAYS 0xE
426 const char * template_name
;
430 struct asm_barrier_opt
432 const char * template_name
;
436 /* The bit that distinguishes CPSR and SPSR. */
437 #define SPSR_BIT (1 << 22)
439 /* The individual PSR flag bits. */
440 #define PSR_c (1 << 16)
441 #define PSR_x (1 << 17)
442 #define PSR_s (1 << 18)
443 #define PSR_f (1 << 19)
448 bfd_reloc_code_real_type reloc
;
453 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
454 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
459 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
462 /* Bits for DEFINED field in neon_typed_alias. */
463 #define NTA_HASTYPE 1
464 #define NTA_HASINDEX 2
466 struct neon_typed_alias
468 unsigned char defined
;
470 struct neon_type_el eltype
;
473 /* ARM register categories. This includes coprocessor numbers and various
474 architecture extensions' registers. */
500 /* Structure for a hash table entry for a register.
501 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
502 information which states whether a vector type or index is specified (for a
503 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
507 unsigned char number
;
509 unsigned char builtin
;
510 struct neon_typed_alias
* neon
;
513 /* Diagnostics used when we don't get a register of the expected type. */
514 const char * const reg_expected_msgs
[] =
516 N_("ARM register expected"),
517 N_("bad or missing co-processor number"),
518 N_("co-processor register expected"),
519 N_("FPA register expected"),
520 N_("VFP single precision register expected"),
521 N_("VFP/Neon double precision register expected"),
522 N_("Neon quad precision register expected"),
523 N_("VFP single or double precision register expected"),
524 N_("Neon double or quad precision register expected"),
525 N_("VFP single, double or Neon quad precision register expected"),
526 N_("VFP system register expected"),
527 N_("Maverick MVF register expected"),
528 N_("Maverick MVD register expected"),
529 N_("Maverick MVFX register expected"),
530 N_("Maverick MVDX register expected"),
531 N_("Maverick MVAX register expected"),
532 N_("Maverick DSPSC register expected"),
533 N_("iWMMXt data register expected"),
534 N_("iWMMXt control register expected"),
535 N_("iWMMXt scalar register expected"),
536 N_("XScale accumulator register expected"),
539 /* Some well known registers that we refer to directly elsewhere. */
544 /* ARM instructions take 4bytes in the object file, Thumb instructions
550 /* Basic string to match. */
551 const char * template_name
;
553 /* Parameters to instruction. */
554 unsigned char operands
[8];
556 /* Conditional tag - see opcode_lookup. */
557 unsigned int tag
: 4;
559 /* Basic instruction code. */
560 unsigned int avalue
: 28;
562 /* Thumb-format instruction code. */
565 /* Which architecture variant provides this instruction. */
566 const arm_feature_set
* avariant
;
567 const arm_feature_set
* tvariant
;
569 /* Function to call to encode instruction in ARM format. */
570 void (* aencode
) (void);
572 /* Function to call to encode instruction in Thumb format. */
573 void (* tencode
) (void);
576 /* Defines for various bits that we will want to toggle. */
577 #define INST_IMMEDIATE 0x02000000
578 #define OFFSET_REG 0x02000000
579 #define HWOFFSET_IMM 0x00400000
580 #define SHIFT_BY_REG 0x00000010
581 #define PRE_INDEX 0x01000000
582 #define INDEX_UP 0x00800000
583 #define WRITE_BACK 0x00200000
584 #define LDM_TYPE_2_OR_3 0x00400000
585 #define CPSI_MMOD 0x00020000
587 #define LITERAL_MASK 0xf000f000
588 #define OPCODE_MASK 0xfe1fffff
589 #define V4_STR_BIT 0x00000020
591 #define T2_SUBS_PC_LR 0xf3de8f00
593 #define DATA_OP_SHIFT 21
595 #define T2_OPCODE_MASK 0xfe1fffff
596 #define T2_DATA_OP_SHIFT 21
598 /* Codes to distinguish the arithmetic instructions. */
609 #define OPCODE_CMP 10
610 #define OPCODE_CMN 11
611 #define OPCODE_ORR 12
612 #define OPCODE_MOV 13
613 #define OPCODE_BIC 14
614 #define OPCODE_MVN 15
616 #define T2_OPCODE_AND 0
617 #define T2_OPCODE_BIC 1
618 #define T2_OPCODE_ORR 2
619 #define T2_OPCODE_ORN 3
620 #define T2_OPCODE_EOR 4
621 #define T2_OPCODE_ADD 8
622 #define T2_OPCODE_ADC 10
623 #define T2_OPCODE_SBC 11
624 #define T2_OPCODE_SUB 13
625 #define T2_OPCODE_RSB 14
627 #define T_OPCODE_MUL 0x4340
628 #define T_OPCODE_TST 0x4200
629 #define T_OPCODE_CMN 0x42c0
630 #define T_OPCODE_NEG 0x4240
631 #define T_OPCODE_MVN 0x43c0
633 #define T_OPCODE_ADD_R3 0x1800
634 #define T_OPCODE_SUB_R3 0x1a00
635 #define T_OPCODE_ADD_HI 0x4400
636 #define T_OPCODE_ADD_ST 0xb000
637 #define T_OPCODE_SUB_ST 0xb080
638 #define T_OPCODE_ADD_SP 0xa800
639 #define T_OPCODE_ADD_PC 0xa000
640 #define T_OPCODE_ADD_I8 0x3000
641 #define T_OPCODE_SUB_I8 0x3800
642 #define T_OPCODE_ADD_I3 0x1c00
643 #define T_OPCODE_SUB_I3 0x1e00
645 #define T_OPCODE_ASR_R 0x4100
646 #define T_OPCODE_LSL_R 0x4080
647 #define T_OPCODE_LSR_R 0x40c0
648 #define T_OPCODE_ROR_R 0x41c0
649 #define T_OPCODE_ASR_I 0x1000
650 #define T_OPCODE_LSL_I 0x0000
651 #define T_OPCODE_LSR_I 0x0800
653 #define T_OPCODE_MOV_I8 0x2000
654 #define T_OPCODE_CMP_I8 0x2800
655 #define T_OPCODE_CMP_LR 0x4280
656 #define T_OPCODE_MOV_HR 0x4600
657 #define T_OPCODE_CMP_HR 0x4500
659 #define T_OPCODE_LDR_PC 0x4800
660 #define T_OPCODE_LDR_SP 0x9800
661 #define T_OPCODE_STR_SP 0x9000
662 #define T_OPCODE_LDR_IW 0x6800
663 #define T_OPCODE_STR_IW 0x6000
664 #define T_OPCODE_LDR_IH 0x8800
665 #define T_OPCODE_STR_IH 0x8000
666 #define T_OPCODE_LDR_IB 0x7800
667 #define T_OPCODE_STR_IB 0x7000
668 #define T_OPCODE_LDR_RW 0x5800
669 #define T_OPCODE_STR_RW 0x5000
670 #define T_OPCODE_LDR_RH 0x5a00
671 #define T_OPCODE_STR_RH 0x5200
672 #define T_OPCODE_LDR_RB 0x5c00
673 #define T_OPCODE_STR_RB 0x5400
675 #define T_OPCODE_PUSH 0xb400
676 #define T_OPCODE_POP 0xbc00
678 #define T_OPCODE_BRANCH 0xe000
680 #define THUMB_SIZE 2 /* Size of thumb instruction. */
681 #define THUMB_PP_PC_LR 0x0100
682 #define THUMB_LOAD_BIT 0x0800
683 #define THUMB2_LOAD_BIT 0x00100000
685 #define BAD_ARGS _("bad arguments to instruction")
686 #define BAD_SP _("r13 not allowed here")
687 #define BAD_PC _("r15 not allowed here")
688 #define BAD_COND _("instruction cannot be conditional")
689 #define BAD_OVERLAP _("registers may not be the same")
690 #define BAD_HIREG _("lo register required")
691 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
692 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
693 #define BAD_BRANCH _("branch must be last instruction in IT block")
694 #define BAD_NOT_IT _("instruction not allowed in IT block")
695 #define BAD_FPU _("selected FPU does not support instruction")
696 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
697 #define BAD_IT_COND _("incorrect condition in IT block")
698 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
699 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
701 static struct hash_control
* arm_ops_hsh
;
702 static struct hash_control
* arm_cond_hsh
;
703 static struct hash_control
* arm_shift_hsh
;
704 static struct hash_control
* arm_psr_hsh
;
705 static struct hash_control
* arm_v7m_psr_hsh
;
706 static struct hash_control
* arm_reg_hsh
;
707 static struct hash_control
* arm_reloc_hsh
;
708 static struct hash_control
* arm_barrier_opt_hsh
;
710 /* Stuff needed to resolve the label ambiguity
719 symbolS
* last_label_seen
;
720 static int label_is_thumb_function_name
= FALSE
;
722 /* Literal pool structure. Held on a per-section
723 and per-sub-section basis. */
725 #define MAX_LITERAL_POOL_SIZE 1024
726 typedef struct literal_pool
728 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
729 unsigned int next_free_entry
;
734 struct literal_pool
* next
;
737 /* Pointer to a linked list of literal pools. */
738 literal_pool
* list_of_pools
= NULL
;
741 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
743 static struct current_it now_it
;
747 now_it_compatible (int cond
)
749 return (cond
& ~1) == (now_it
.cc
& ~1);
753 conditional_insn (void)
755 return inst
.cond
!= COND_ALWAYS
;
758 static int in_it_block (void);
760 static int handle_it_state (void);
762 static void force_automatic_it_block_close (void);
764 static void it_fsm_post_encode (void);
766 #define set_it_insn_type(type) \
769 inst.it_insn_type = type; \
770 if (handle_it_state () == FAIL) \
775 #define set_it_insn_type_nonvoid(type, failret) \
778 inst.it_insn_type = type; \
779 if (handle_it_state () == FAIL) \
784 #define set_it_insn_type_last() \
787 if (inst.cond == COND_ALWAYS) \
788 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
790 set_it_insn_type (INSIDE_IT_LAST_INSN); \
796 /* This array holds the chars that always start a comment. If the
797 pre-processor is disabled, these aren't very useful. */
798 const char comment_chars
[] = "@";
800 /* This array holds the chars that only start a comment at the beginning of
801 a line. If the line seems to have the form '# 123 filename'
802 .line and .file directives will appear in the pre-processed output. */
803 /* Note that input_file.c hand checks for '#' at the beginning of the
804 first line of the input file. This is because the compiler outputs
805 #NO_APP at the beginning of its output. */
806 /* Also note that comments like this one will always work. */
807 const char line_comment_chars
[] = "#";
809 const char line_separator_chars
[] = ";";
811 /* Chars that can be used to separate mant
812 from exp in floating point numbers. */
813 const char EXP_CHARS
[] = "eE";
815 /* Chars that mean this number is a floating point constant. */
819 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
821 /* Prefix characters that indicate the start of an immediate
823 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
825 /* Separator character handling. */
827 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
830 skip_past_char (char ** str
, char c
)
841 #define skip_past_comma(str) skip_past_char (str, ',')
843 /* Arithmetic expressions (possibly involving symbols). */
845 /* Return TRUE if anything in the expression is a bignum. */
848 walk_no_bignums (symbolS
* sp
)
850 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
853 if (symbol_get_value_expression (sp
)->X_add_symbol
)
855 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
856 || (symbol_get_value_expression (sp
)->X_op_symbol
857 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
863 static int in_my_get_expression
= 0;
865 /* Third argument to my_get_expression. */
866 #define GE_NO_PREFIX 0
867 #define GE_IMM_PREFIX 1
868 #define GE_OPT_PREFIX 2
869 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
870 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
871 #define GE_OPT_PREFIX_BIG 3
874 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
879 /* In unified syntax, all prefixes are optional. */
881 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
886 case GE_NO_PREFIX
: break;
888 if (!is_immediate_prefix (**str
))
890 inst
.error
= _("immediate expression requires a # prefix");
896 case GE_OPT_PREFIX_BIG
:
897 if (is_immediate_prefix (**str
))
903 memset (ep
, 0, sizeof (expressionS
));
905 save_in
= input_line_pointer
;
906 input_line_pointer
= *str
;
907 in_my_get_expression
= 1;
908 seg
= expression (ep
);
909 in_my_get_expression
= 0;
911 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
913 /* We found a bad or missing expression in md_operand(). */
914 *str
= input_line_pointer
;
915 input_line_pointer
= save_in
;
916 if (inst
.error
== NULL
)
917 inst
.error
= (ep
->X_op
== O_absent
918 ? _("missing expression") :_("bad expression"));
923 if (seg
!= absolute_section
924 && seg
!= text_section
925 && seg
!= data_section
926 && seg
!= bss_section
927 && seg
!= undefined_section
)
929 inst
.error
= _("bad segment");
930 *str
= input_line_pointer
;
931 input_line_pointer
= save_in
;
936 /* Get rid of any bignums now, so that we don't generate an error for which
937 we can't establish a line number later on. Big numbers are never valid
938 in instructions, which is where this routine is always called. */
939 if (prefix_mode
!= GE_OPT_PREFIX_BIG
940 && (ep
->X_op
== O_big
942 && (walk_no_bignums (ep
->X_add_symbol
)
944 && walk_no_bignums (ep
->X_op_symbol
))))))
946 inst
.error
= _("invalid constant");
947 *str
= input_line_pointer
;
948 input_line_pointer
= save_in
;
952 *str
= input_line_pointer
;
953 input_line_pointer
= save_in
;
957 /* Turn a string in input_line_pointer into a floating point constant
958 of type TYPE, and store the appropriate bytes in *LITP. The number
959 of LITTLENUMS emitted is stored in *SIZEP. An error message is
960 returned, or NULL on OK.
962 Note that fp constants aren't represent in the normal way on the ARM.
963 In big endian mode, things are as expected. However, in little endian
964 mode fp constants are big-endian word-wise, and little-endian byte-wise
965 within the words. For example, (double) 1.1 in big endian mode is
966 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
967 the byte sequence 99 99 f1 3f 9a 99 99 99.
969 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
972 md_atof (int type
, char * litP
, int * sizeP
)
975 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1007 return _("Unrecognized or unsupported floating point constant");
1010 t
= atof_ieee (input_line_pointer
, type
, words
);
1012 input_line_pointer
= t
;
1013 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1015 if (target_big_endian
)
1017 for (i
= 0; i
< prec
; i
++)
1019 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1020 litP
+= sizeof (LITTLENUM_TYPE
);
1025 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1026 for (i
= prec
- 1; i
>= 0; i
--)
1028 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1029 litP
+= sizeof (LITTLENUM_TYPE
);
1032 /* For a 4 byte float the order of elements in `words' is 1 0.
1033 For an 8 byte float the order is 1 0 3 2. */
1034 for (i
= 0; i
< prec
; i
+= 2)
1036 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1037 sizeof (LITTLENUM_TYPE
));
1038 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1039 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1040 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1047 /* We handle all bad expressions here, so that we can report the faulty
1048 instruction in the error message. */
1050 md_operand (expressionS
* expr
)
1052 if (in_my_get_expression
)
1053 expr
->X_op
= O_illegal
;
1056 /* Immediate values. */
1058 /* Generic immediate-value read function for use in directives.
1059 Accepts anything that 'expression' can fold to a constant.
1060 *val receives the number. */
1063 immediate_for_directive (int *val
)
1066 exp
.X_op
= O_illegal
;
1068 if (is_immediate_prefix (*input_line_pointer
))
1070 input_line_pointer
++;
1074 if (exp
.X_op
!= O_constant
)
1076 as_bad (_("expected #constant"));
1077 ignore_rest_of_line ();
1080 *val
= exp
.X_add_number
;
1085 /* Register parsing. */
1087 /* Generic register parser. CCP points to what should be the
1088 beginning of a register name. If it is indeed a valid register
1089 name, advance CCP over it and return the reg_entry structure;
1090 otherwise return NULL. Does not issue diagnostics. */
1092 static struct reg_entry
*
1093 arm_reg_parse_multi (char **ccp
)
1097 struct reg_entry
*reg
;
1099 #ifdef REGISTER_PREFIX
1100 if (*start
!= REGISTER_PREFIX
)
1104 #ifdef OPTIONAL_REGISTER_PREFIX
1105 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1110 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1115 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1117 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1127 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1128 enum arm_reg_type type
)
1130 /* Alternative syntaxes are accepted for a few register classes. */
1137 /* Generic coprocessor register names are allowed for these. */
1138 if (reg
&& reg
->type
== REG_TYPE_CN
)
1143 /* For backward compatibility, a bare number is valid here. */
1145 unsigned long processor
= strtoul (start
, ccp
, 10);
1146 if (*ccp
!= start
&& processor
<= 15)
1150 case REG_TYPE_MMXWC
:
1151 /* WC includes WCG. ??? I'm not sure this is true for all
1152 instructions that take WC registers. */
1153 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1164 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1165 return value is the register number or FAIL. */
1168 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1171 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1174 /* Do not allow a scalar (reg+index) to parse as a register. */
1175 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1178 if (reg
&& reg
->type
== type
)
1181 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1188 /* Parse a Neon type specifier. *STR should point at the leading '.'
1189 character. Does no verification at this stage that the type fits the opcode
1196 Can all be legally parsed by this function.
1198 Fills in neon_type struct pointer with parsed information, and updates STR
1199 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1200 type, FAIL if not. */
1203 parse_neon_type (struct neon_type
*type
, char **str
)
1210 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1212 enum neon_el_type thistype
= NT_untyped
;
1213 unsigned thissize
= -1u;
1220 /* Just a size without an explicit type. */
1224 switch (TOLOWER (*ptr
))
1226 case 'i': thistype
= NT_integer
; break;
1227 case 'f': thistype
= NT_float
; break;
1228 case 'p': thistype
= NT_poly
; break;
1229 case 's': thistype
= NT_signed
; break;
1230 case 'u': thistype
= NT_unsigned
; break;
1232 thistype
= NT_float
;
1237 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1243 /* .f is an abbreviation for .f32. */
1244 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1249 thissize
= strtoul (ptr
, &ptr
, 10);
1251 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1254 as_bad (_("bad size %d in type specifier"), thissize
);
1262 type
->el
[type
->elems
].type
= thistype
;
1263 type
->el
[type
->elems
].size
= thissize
;
1268 /* Empty/missing type is not a successful parse. */
1269 if (type
->elems
== 0)
1277 /* Errors may be set multiple times during parsing or bit encoding
1278 (particularly in the Neon bits), but usually the earliest error which is set
1279 will be the most meaningful. Avoid overwriting it with later (cascading)
1280 errors by calling this function. */
1283 first_error (const char *err
)
1289 /* Parse a single type, e.g. ".s32", leading period included. */
1291 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1294 struct neon_type optype
;
1298 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1300 if (optype
.elems
== 1)
1301 *vectype
= optype
.el
[0];
1304 first_error (_("only one type should be specified for operand"));
1310 first_error (_("vector type expected"));
1322 /* Special meanings for indices (which have a range of 0-7), which will fit into
1325 #define NEON_ALL_LANES 15
1326 #define NEON_INTERLEAVE_LANES 14
1328 /* Parse either a register or a scalar, with an optional type. Return the
1329 register number, and optionally fill in the actual type of the register
1330 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1331 type/index information in *TYPEINFO. */
1334 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1335 enum arm_reg_type
*rtype
,
1336 struct neon_typed_alias
*typeinfo
)
1339 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1340 struct neon_typed_alias atype
;
1341 struct neon_type_el parsetype
;
1345 atype
.eltype
.type
= NT_invtype
;
1346 atype
.eltype
.size
= -1;
1348 /* Try alternate syntax for some types of register. Note these are mutually
1349 exclusive with the Neon syntax extensions. */
1352 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1360 /* Undo polymorphism when a set of register types may be accepted. */
1361 if ((type
== REG_TYPE_NDQ
1362 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1363 || (type
== REG_TYPE_VFSD
1364 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1365 || (type
== REG_TYPE_NSDQ
1366 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1367 || reg
->type
== REG_TYPE_NQ
))
1368 || (type
== REG_TYPE_MMXWC
1369 && (reg
->type
== REG_TYPE_MMXWCG
)))
1372 if (type
!= reg
->type
)
1378 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1380 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1382 first_error (_("can't redefine type for operand"));
1385 atype
.defined
|= NTA_HASTYPE
;
1386 atype
.eltype
= parsetype
;
1389 if (skip_past_char (&str
, '[') == SUCCESS
)
1391 if (type
!= REG_TYPE_VFD
)
1393 first_error (_("only D registers may be indexed"));
1397 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1399 first_error (_("can't change index for operand"));
1403 atype
.defined
|= NTA_HASINDEX
;
1405 if (skip_past_char (&str
, ']') == SUCCESS
)
1406 atype
.index
= NEON_ALL_LANES
;
1411 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1413 if (exp
.X_op
!= O_constant
)
1415 first_error (_("constant expression required"));
1419 if (skip_past_char (&str
, ']') == FAIL
)
1422 atype
.index
= exp
.X_add_number
;
1437 /* Like arm_reg_parse, but allow allow the following extra features:
1438 - If RTYPE is non-zero, return the (possibly restricted) type of the
1439 register (e.g. Neon double or quad reg when either has been requested).
1440 - If this is a Neon vector type with additional type information, fill
1441 in the struct pointed to by VECTYPE (if non-NULL).
1442 This function will fault on encountering a scalar. */
1445 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1446 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1448 struct neon_typed_alias atype
;
1450 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1455 /* Do not allow a scalar (reg+index) to parse as a register. */
1456 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1458 first_error (_("register operand expected, but got scalar"));
1463 *vectype
= atype
.eltype
;
1470 #define NEON_SCALAR_REG(X) ((X) >> 4)
1471 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1473 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1474 have enough information to be able to do a good job bounds-checking. So, we
1475 just do easy checks here, and do further checks later. */
1478 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1482 struct neon_typed_alias atype
;
1484 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1486 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1489 if (atype
.index
== NEON_ALL_LANES
)
1491 first_error (_("scalar must have an index"));
1494 else if (atype
.index
>= 64 / elsize
)
1496 first_error (_("scalar index out of range"));
1501 *type
= atype
.eltype
;
1505 return reg
* 16 + atype
.index
;
1508 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1511 parse_reg_list (char ** strp
)
1513 char * str
= * strp
;
1517 /* We come back here if we get ranges concatenated by '+' or '|'. */
1532 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1534 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1544 first_error (_("bad range in register list"));
1548 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1550 if (range
& (1 << i
))
1552 (_("Warning: duplicated register (r%d) in register list"),
1560 if (range
& (1 << reg
))
1561 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1563 else if (reg
<= cur_reg
)
1564 as_tsktsk (_("Warning: register range not in ascending order"));
1569 while (skip_past_comma (&str
) != FAIL
1570 || (in_range
= 1, *str
++ == '-'));
1575 first_error (_("missing `}'"));
1583 if (my_get_expression (&expr
, &str
, GE_NO_PREFIX
))
1586 if (expr
.X_op
== O_constant
)
1588 if (expr
.X_add_number
1589 != (expr
.X_add_number
& 0x0000ffff))
1591 inst
.error
= _("invalid register mask");
1595 if ((range
& expr
.X_add_number
) != 0)
1597 int regno
= range
& expr
.X_add_number
;
1600 regno
= (1 << regno
) - 1;
1602 (_("Warning: duplicated register (r%d) in register list"),
1606 range
|= expr
.X_add_number
;
1610 if (inst
.reloc
.type
!= 0)
1612 inst
.error
= _("expression too complex");
1616 memcpy (&inst
.reloc
.exp
, &expr
, sizeof (expressionS
));
1617 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1618 inst
.reloc
.pc_rel
= 0;
1622 if (*str
== '|' || *str
== '+')
1628 while (another_range
);
1634 /* Types of registers in a list. */
1643 /* Parse a VFP register list. If the string is invalid return FAIL.
1644 Otherwise return the number of registers, and set PBASE to the first
1645 register. Parses registers of type ETYPE.
1646 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1647 - Q registers can be used to specify pairs of D registers
1648 - { } can be omitted from around a singleton register list
1649 FIXME: This is not implemented, as it would require backtracking in
1652 This could be done (the meaning isn't really ambiguous), but doesn't
1653 fit in well with the current parsing framework.
1654 - 32 D registers may be used (also true for VFPv3).
1655 FIXME: Types are ignored in these register lists, which is probably a
1659 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1664 enum arm_reg_type regtype
= 0;
1668 unsigned long mask
= 0;
1673 inst
.error
= _("expecting {");
1682 regtype
= REG_TYPE_VFS
;
1687 regtype
= REG_TYPE_VFD
;
1690 case REGLIST_NEON_D
:
1691 regtype
= REG_TYPE_NDQ
;
1695 if (etype
!= REGLIST_VFP_S
)
1697 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1698 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1702 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1705 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1712 base_reg
= max_regs
;
1716 int setmask
= 1, addregs
= 1;
1718 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1720 if (new_base
== FAIL
)
1722 first_error (_(reg_expected_msgs
[regtype
]));
1726 if (new_base
>= max_regs
)
1728 first_error (_("register out of range in list"));
1732 /* Note: a value of 2 * n is returned for the register Q<n>. */
1733 if (regtype
== REG_TYPE_NQ
)
1739 if (new_base
< base_reg
)
1740 base_reg
= new_base
;
1742 if (mask
& (setmask
<< new_base
))
1744 first_error (_("invalid register list"));
1748 if ((mask
>> new_base
) != 0 && ! warned
)
1750 as_tsktsk (_("register list not in ascending order"));
1754 mask
|= setmask
<< new_base
;
1757 if (*str
== '-') /* We have the start of a range expression */
1763 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1766 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1770 if (high_range
>= max_regs
)
1772 first_error (_("register out of range in list"));
1776 if (regtype
== REG_TYPE_NQ
)
1777 high_range
= high_range
+ 1;
1779 if (high_range
<= new_base
)
1781 inst
.error
= _("register range not in ascending order");
1785 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1787 if (mask
& (setmask
<< new_base
))
1789 inst
.error
= _("invalid register list");
1793 mask
|= setmask
<< new_base
;
1798 while (skip_past_comma (&str
) != FAIL
);
1802 /* Sanity check -- should have raised a parse error above. */
1803 if (count
== 0 || count
> max_regs
)
1808 /* Final test -- the registers must be consecutive. */
1810 for (i
= 0; i
< count
; i
++)
1812 if ((mask
& (1u << i
)) == 0)
1814 inst
.error
= _("non-contiguous register range");
1824 /* True if two alias types are the same. */
1827 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1835 if (a
->defined
!= b
->defined
)
1838 if ((a
->defined
& NTA_HASTYPE
) != 0
1839 && (a
->eltype
.type
!= b
->eltype
.type
1840 || a
->eltype
.size
!= b
->eltype
.size
))
1843 if ((a
->defined
& NTA_HASINDEX
) != 0
1844 && (a
->index
!= b
->index
))
1850 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1851 The base register is put in *PBASE.
1852 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1854 The register stride (minus one) is put in bit 4 of the return value.
1855 Bits [6:5] encode the list length (minus one).
1856 The type of the list elements is put in *ELTYPE, if non-NULL. */
1858 #define NEON_LANE(X) ((X) & 0xf)
1859 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1860 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1863 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1864 struct neon_type_el
*eltype
)
1871 int leading_brace
= 0;
1872 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1874 const char *const incr_error
= _("register stride must be 1 or 2");
1875 const char *const type_error
= _("mismatched element/structure types in list");
1876 struct neon_typed_alias firsttype
;
1878 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1883 struct neon_typed_alias atype
;
1884 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1888 first_error (_(reg_expected_msgs
[rtype
]));
1895 if (rtype
== REG_TYPE_NQ
)
1902 else if (reg_incr
== -1)
1904 reg_incr
= getreg
- base_reg
;
1905 if (reg_incr
< 1 || reg_incr
> 2)
1907 first_error (_(incr_error
));
1911 else if (getreg
!= base_reg
+ reg_incr
* count
)
1913 first_error (_(incr_error
));
1917 if (! neon_alias_types_same (&atype
, &firsttype
))
1919 first_error (_(type_error
));
1923 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1927 struct neon_typed_alias htype
;
1928 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1930 lane
= NEON_INTERLEAVE_LANES
;
1931 else if (lane
!= NEON_INTERLEAVE_LANES
)
1933 first_error (_(type_error
));
1938 else if (reg_incr
!= 1)
1940 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1944 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1947 first_error (_(reg_expected_msgs
[rtype
]));
1950 if (! neon_alias_types_same (&htype
, &firsttype
))
1952 first_error (_(type_error
));
1955 count
+= hireg
+ dregs
- getreg
;
1959 /* If we're using Q registers, we can't use [] or [n] syntax. */
1960 if (rtype
== REG_TYPE_NQ
)
1966 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1970 else if (lane
!= atype
.index
)
1972 first_error (_(type_error
));
1976 else if (lane
== -1)
1977 lane
= NEON_INTERLEAVE_LANES
;
1978 else if (lane
!= NEON_INTERLEAVE_LANES
)
1980 first_error (_(type_error
));
1985 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1987 /* No lane set by [x]. We must be interleaving structures. */
1989 lane
= NEON_INTERLEAVE_LANES
;
1992 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
1993 || (count
> 1 && reg_incr
== -1))
1995 first_error (_("error parsing element/structure list"));
1999 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2001 first_error (_("expected }"));
2009 *eltype
= firsttype
.eltype
;
2014 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2017 /* Parse an explicit relocation suffix on an expression. This is
2018 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2019 arm_reloc_hsh contains no entries, so this function can only
2020 succeed if there is no () after the word. Returns -1 on error,
2021 BFD_RELOC_UNUSED if there wasn't any suffix. */
2023 parse_reloc (char **str
)
2025 struct reloc_entry
*r
;
2029 return BFD_RELOC_UNUSED
;
2034 while (*q
&& *q
!= ')' && *q
!= ',')
2039 if ((r
= hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2046 /* Directives: register aliases. */
2048 static struct reg_entry
*
2049 insert_reg_alias (char *str
, int number
, int type
)
2051 struct reg_entry
*new_reg
;
2054 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2056 if (new_reg
->builtin
)
2057 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2059 /* Only warn about a redefinition if it's not defined as the
2061 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2062 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2067 name
= xstrdup (str
);
2068 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2070 new_reg
->name
= name
;
2071 new_reg
->number
= number
;
2072 new_reg
->type
= type
;
2073 new_reg
->builtin
= FALSE
;
2074 new_reg
->neon
= NULL
;
2076 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2083 insert_neon_reg_alias (char *str
, int number
, int type
,
2084 struct neon_typed_alias
*atype
)
2086 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2090 first_error (_("attempt to redefine typed alias"));
2096 reg
->neon
= xmalloc (sizeof (struct neon_typed_alias
));
2097 *reg
->neon
= *atype
;
2101 /* Look for the .req directive. This is of the form:
2103 new_register_name .req existing_register_name
2105 If we find one, or if it looks sufficiently like one that we want to
2106 handle any error here, return TRUE. Otherwise return FALSE. */
2109 create_register_alias (char * newname
, char *p
)
2111 struct reg_entry
*old
;
2112 char *oldname
, *nbuf
;
2115 /* The input scrubber ensures that whitespace after the mnemonic is
2116 collapsed to single spaces. */
2118 if (strncmp (oldname
, " .req ", 6) != 0)
2122 if (*oldname
== '\0')
2125 old
= hash_find (arm_reg_hsh
, oldname
);
2128 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2132 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2133 the desired alias name, and p points to its end. If not, then
2134 the desired alias name is in the global original_case_string. */
2135 #ifdef TC_CASE_SENSITIVE
2138 newname
= original_case_string
;
2139 nlen
= strlen (newname
);
2142 nbuf
= alloca (nlen
+ 1);
2143 memcpy (nbuf
, newname
, nlen
);
2146 /* Create aliases under the new name as stated; an all-lowercase
2147 version of the new name; and an all-uppercase version of the new
2149 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2151 for (p
= nbuf
; *p
; p
++)
2154 if (strncmp (nbuf
, newname
, nlen
))
2156 /* If this attempt to create an additional alias fails, do not bother
2157 trying to create the all-lower case alias. We will fail and issue
2158 a second, duplicate error message. This situation arises when the
2159 programmer does something like:
2162 The second .req creates the "Foo" alias but then fails to create
2163 the artificial FOO alias because it has already been created by the
2165 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2169 for (p
= nbuf
; *p
; p
++)
2172 if (strncmp (nbuf
, newname
, nlen
))
2173 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2179 /* Create a Neon typed/indexed register alias using directives, e.g.:
2184 These typed registers can be used instead of the types specified after the
2185 Neon mnemonic, so long as all operands given have types. Types can also be
2186 specified directly, e.g.:
2187 vadd d0.s32, d1.s32, d2.s32 */
2190 create_neon_reg_alias (char *newname
, char *p
)
2192 enum arm_reg_type basetype
;
2193 struct reg_entry
*basereg
;
2194 struct reg_entry mybasereg
;
2195 struct neon_type ntype
;
2196 struct neon_typed_alias typeinfo
;
2197 char *namebuf
, *nameend
;
2200 typeinfo
.defined
= 0;
2201 typeinfo
.eltype
.type
= NT_invtype
;
2202 typeinfo
.eltype
.size
= -1;
2203 typeinfo
.index
= -1;
2207 if (strncmp (p
, " .dn ", 5) == 0)
2208 basetype
= REG_TYPE_VFD
;
2209 else if (strncmp (p
, " .qn ", 5) == 0)
2210 basetype
= REG_TYPE_NQ
;
2219 basereg
= arm_reg_parse_multi (&p
);
2221 if (basereg
&& basereg
->type
!= basetype
)
2223 as_bad (_("bad type for register"));
2227 if (basereg
== NULL
)
2230 /* Try parsing as an integer. */
2231 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2232 if (exp
.X_op
!= O_constant
)
2234 as_bad (_("expression must be constant"));
2237 basereg
= &mybasereg
;
2238 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2244 typeinfo
= *basereg
->neon
;
2246 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2248 /* We got a type. */
2249 if (typeinfo
.defined
& NTA_HASTYPE
)
2251 as_bad (_("can't redefine the type of a register alias"));
2255 typeinfo
.defined
|= NTA_HASTYPE
;
2256 if (ntype
.elems
!= 1)
2258 as_bad (_("you must specify a single type only"));
2261 typeinfo
.eltype
= ntype
.el
[0];
2264 if (skip_past_char (&p
, '[') == SUCCESS
)
2267 /* We got a scalar index. */
2269 if (typeinfo
.defined
& NTA_HASINDEX
)
2271 as_bad (_("can't redefine the index of a scalar alias"));
2275 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2277 if (exp
.X_op
!= O_constant
)
2279 as_bad (_("scalar index must be constant"));
2283 typeinfo
.defined
|= NTA_HASINDEX
;
2284 typeinfo
.index
= exp
.X_add_number
;
2286 if (skip_past_char (&p
, ']') == FAIL
)
2288 as_bad (_("expecting ]"));
2293 namelen
= nameend
- newname
;
2294 namebuf
= alloca (namelen
+ 1);
2295 strncpy (namebuf
, newname
, namelen
);
2296 namebuf
[namelen
] = '\0';
2298 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2299 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2301 /* Insert name in all uppercase. */
2302 for (p
= namebuf
; *p
; p
++)
2305 if (strncmp (namebuf
, newname
, namelen
))
2306 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2307 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2309 /* Insert name in all lowercase. */
2310 for (p
= namebuf
; *p
; p
++)
2313 if (strncmp (namebuf
, newname
, namelen
))
2314 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2315 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2320 /* Should never be called, as .req goes between the alias and the
2321 register name, not at the beginning of the line. */
2324 s_req (int a ATTRIBUTE_UNUSED
)
2326 as_bad (_("invalid syntax for .req directive"));
2330 s_dn (int a ATTRIBUTE_UNUSED
)
2332 as_bad (_("invalid syntax for .dn directive"));
2336 s_qn (int a ATTRIBUTE_UNUSED
)
2338 as_bad (_("invalid syntax for .qn directive"));
2341 /* The .unreq directive deletes an alias which was previously defined
2342 by .req. For example:
2348 s_unreq (int a ATTRIBUTE_UNUSED
)
2353 name
= input_line_pointer
;
2355 while (*input_line_pointer
!= 0
2356 && *input_line_pointer
!= ' '
2357 && *input_line_pointer
!= '\n')
2358 ++input_line_pointer
;
2360 saved_char
= *input_line_pointer
;
2361 *input_line_pointer
= 0;
2364 as_bad (_("invalid syntax for .unreq directive"));
2367 struct reg_entry
*reg
= hash_find (arm_reg_hsh
, name
);
2370 as_bad (_("unknown register alias '%s'"), name
);
2371 else if (reg
->builtin
)
2372 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2379 hash_delete (arm_reg_hsh
, name
, FALSE
);
2380 free ((char *) reg
->name
);
2385 /* Also locate the all upper case and all lower case versions.
2386 Do not complain if we cannot find one or the other as it
2387 was probably deleted above. */
2389 nbuf
= strdup (name
);
2390 for (p
= nbuf
; *p
; p
++)
2392 reg
= hash_find (arm_reg_hsh
, nbuf
);
2395 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2396 free ((char *) reg
->name
);
2402 for (p
= nbuf
; *p
; p
++)
2404 reg
= hash_find (arm_reg_hsh
, nbuf
);
2407 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2408 free ((char *) reg
->name
);
2418 *input_line_pointer
= saved_char
;
2419 demand_empty_rest_of_line ();
2422 /* Directives: Instruction set selection. */
2425 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2426 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2427 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2428 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2430 /* Create a new mapping symbol for the transition to STATE. */
2433 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2436 const char * symname
;
2443 type
= BSF_NO_FLAGS
;
2447 type
= BSF_NO_FLAGS
;
2451 type
= BSF_NO_FLAGS
;
2457 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2458 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2463 THUMB_SET_FUNC (symbolP
, 0);
2464 ARM_SET_THUMB (symbolP
, 0);
2465 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2469 THUMB_SET_FUNC (symbolP
, 1);
2470 ARM_SET_THUMB (symbolP
, 1);
2471 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2479 /* Save the mapping symbols for future reference. Also check that
2480 we do not place two mapping symbols at the same offset within a
2481 frag. We'll handle overlap between frags in
2482 check_mapping_symbols. */
2485 know (frag
->tc_frag_data
.first_map
== NULL
);
2486 frag
->tc_frag_data
.first_map
= symbolP
;
2488 if (frag
->tc_frag_data
.last_map
!= NULL
)
2489 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) < S_GET_VALUE (symbolP
));
2490 frag
->tc_frag_data
.last_map
= symbolP
;
2493 /* We must sometimes convert a region marked as code to data during
2494 code alignment, if an odd number of bytes have to be padded. The
2495 code mapping symbol is pushed to an aligned address. */
2498 insert_data_mapping_symbol (enum mstate state
,
2499 valueT value
, fragS
*frag
, offsetT bytes
)
2501 /* If there was already a mapping symbol, remove it. */
2502 if (frag
->tc_frag_data
.last_map
!= NULL
2503 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2505 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2509 know (frag
->tc_frag_data
.first_map
== symp
);
2510 frag
->tc_frag_data
.first_map
= NULL
;
2512 frag
->tc_frag_data
.last_map
= NULL
;
2513 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2516 make_mapping_symbol (MAP_DATA
, value
, frag
);
2517 make_mapping_symbol (state
, value
+ bytes
, frag
);
2520 static void mapping_state_2 (enum mstate state
, int max_chars
);
2522 /* Set the mapping state to STATE. Only call this when about to
2523 emit some STATE bytes to the file. */
2526 mapping_state (enum mstate state
)
2528 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2530 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2532 if (mapstate
== state
)
2533 /* The mapping symbol has already been emitted.
2534 There is nothing else to do. */
2536 else if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2537 /* This case will be evaluated later in the next else. */
2539 else if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2540 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2542 /* Only add the symbol if the offset is > 0:
2543 if we're at the first frag, check it's size > 0;
2544 if we're not at the first frag, then for sure
2545 the offset is > 0. */
2546 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2547 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2550 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2553 mapping_state_2 (state
, 0);
2557 /* Same as mapping_state, but MAX_CHARS bytes have already been
2558 allocated. Put the mapping symbol that far back. */
2561 mapping_state_2 (enum mstate state
, int max_chars
)
2563 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2565 if (!SEG_NORMAL (now_seg
))
2568 if (mapstate
== state
)
2569 /* The mapping symbol has already been emitted.
2570 There is nothing else to do. */
2573 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2574 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2577 #define mapping_state(x) /* nothing */
2578 #define mapping_state_2(x, y) /* nothing */
2581 /* Find the real, Thumb encoded start of a Thumb function. */
2585 find_real_start (symbolS
* symbolP
)
2588 const char * name
= S_GET_NAME (symbolP
);
2589 symbolS
* new_target
;
2591 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2592 #define STUB_NAME ".real_start_of"
2597 /* The compiler may generate BL instructions to local labels because
2598 it needs to perform a branch to a far away location. These labels
2599 do not have a corresponding ".real_start_of" label. We check
2600 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2601 the ".real_start_of" convention for nonlocal branches. */
2602 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2605 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2606 new_target
= symbol_find (real_start
);
2608 if (new_target
== NULL
)
2610 as_warn (_("Failed to find real start of function: %s\n"), name
);
2611 new_target
= symbolP
;
2619 opcode_select (int width
)
2626 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2627 as_bad (_("selected processor does not support THUMB opcodes"));
2630 /* No need to force the alignment, since we will have been
2631 coming from ARM mode, which is word-aligned. */
2632 record_alignment (now_seg
, 1);
2639 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2640 as_bad (_("selected processor does not support ARM opcodes"));
2645 frag_align (2, 0, 0);
2647 record_alignment (now_seg
, 1);
2652 as_bad (_("invalid instruction size selected (%d)"), width
);
2657 s_arm (int ignore ATTRIBUTE_UNUSED
)
2660 demand_empty_rest_of_line ();
2664 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2667 demand_empty_rest_of_line ();
2671 s_code (int unused ATTRIBUTE_UNUSED
)
2675 temp
= get_absolute_expression ();
2680 opcode_select (temp
);
2684 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2689 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2691 /* If we are not already in thumb mode go into it, EVEN if
2692 the target processor does not support thumb instructions.
2693 This is used by gcc/config/arm/lib1funcs.asm for example
2694 to compile interworking support functions even if the
2695 target processor should not support interworking. */
2699 record_alignment (now_seg
, 1);
2702 demand_empty_rest_of_line ();
2706 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2710 /* The following label is the name/address of the start of a Thumb function.
2711 We need to know this for the interworking support. */
2712 label_is_thumb_function_name
= TRUE
;
2715 /* Perform a .set directive, but also mark the alias as
2716 being a thumb function. */
2719 s_thumb_set (int equiv
)
2721 /* XXX the following is a duplicate of the code for s_set() in read.c
2722 We cannot just call that code as we need to get at the symbol that
2729 /* Especial apologies for the random logic:
2730 This just grew, and could be parsed much more simply!
2732 name
= input_line_pointer
;
2733 delim
= get_symbol_end ();
2734 end_name
= input_line_pointer
;
2737 if (*input_line_pointer
!= ',')
2740 as_bad (_("expected comma after name \"%s\""), name
);
2742 ignore_rest_of_line ();
2746 input_line_pointer
++;
2749 if (name
[0] == '.' && name
[1] == '\0')
2751 /* XXX - this should not happen to .thumb_set. */
2755 if ((symbolP
= symbol_find (name
)) == NULL
2756 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2759 /* When doing symbol listings, play games with dummy fragments living
2760 outside the normal fragment chain to record the file and line info
2762 if (listing
& LISTING_SYMBOLS
)
2764 extern struct list_info_struct
* listing_tail
;
2765 fragS
* dummy_frag
= xmalloc (sizeof (fragS
));
2767 memset (dummy_frag
, 0, sizeof (fragS
));
2768 dummy_frag
->fr_type
= rs_fill
;
2769 dummy_frag
->line
= listing_tail
;
2770 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2771 dummy_frag
->fr_symbol
= symbolP
;
2775 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2778 /* "set" symbols are local unless otherwise specified. */
2779 SF_SET_LOCAL (symbolP
);
2780 #endif /* OBJ_COFF */
2781 } /* Make a new symbol. */
2783 symbol_table_insert (symbolP
);
2788 && S_IS_DEFINED (symbolP
)
2789 && S_GET_SEGMENT (symbolP
) != reg_section
)
2790 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2792 pseudo_set (symbolP
);
2794 demand_empty_rest_of_line ();
2796 /* XXX Now we come to the Thumb specific bit of code. */
2798 THUMB_SET_FUNC (symbolP
, 1);
2799 ARM_SET_THUMB (symbolP
, 1);
2800 #if defined OBJ_ELF || defined OBJ_COFF
2801 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2805 /* Directives: Mode selection. */
2807 /* .syntax [unified|divided] - choose the new unified syntax
2808 (same for Arm and Thumb encoding, modulo slight differences in what
2809 can be represented) or the old divergent syntax for each mode. */
2811 s_syntax (int unused ATTRIBUTE_UNUSED
)
2815 name
= input_line_pointer
;
2816 delim
= get_symbol_end ();
2818 if (!strcasecmp (name
, "unified"))
2819 unified_syntax
= TRUE
;
2820 else if (!strcasecmp (name
, "divided"))
2821 unified_syntax
= FALSE
;
2824 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2827 *input_line_pointer
= delim
;
2828 demand_empty_rest_of_line ();
2831 /* Directives: sectioning and alignment. */
2833 /* Same as s_align_ptwo but align 0 => align 2. */
2836 s_align (int unused ATTRIBUTE_UNUSED
)
2841 long max_alignment
= 15;
2843 temp
= get_absolute_expression ();
2844 if (temp
> max_alignment
)
2845 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2848 as_bad (_("alignment negative. 0 assumed."));
2852 if (*input_line_pointer
== ',')
2854 input_line_pointer
++;
2855 temp_fill
= get_absolute_expression ();
2867 /* Only make a frag if we HAVE to. */
2868 if (temp
&& !need_pass_2
)
2870 if (!fill_p
&& subseg_text_p (now_seg
))
2871 frag_align_code (temp
, 0);
2873 frag_align (temp
, (int) temp_fill
, 0);
2875 demand_empty_rest_of_line ();
2877 record_alignment (now_seg
, temp
);
2881 s_bss (int ignore ATTRIBUTE_UNUSED
)
2883 /* We don't support putting frags in the BSS segment, we fake it by
2884 marking in_bss, then looking at s_skip for clues. */
2885 subseg_set (bss_section
, 0);
2886 demand_empty_rest_of_line ();
2888 #ifdef md_elf_section_change_hook
2889 md_elf_section_change_hook ();
2894 s_even (int ignore ATTRIBUTE_UNUSED
)
2896 /* Never make frag if expect extra pass. */
2898 frag_align (1, 0, 0);
2900 record_alignment (now_seg
, 1);
2902 demand_empty_rest_of_line ();
2905 /* Directives: Literal pools. */
2907 static literal_pool
*
2908 find_literal_pool (void)
2910 literal_pool
* pool
;
2912 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2914 if (pool
->section
== now_seg
2915 && pool
->sub_section
== now_subseg
)
2922 static literal_pool
*
2923 find_or_make_literal_pool (void)
2925 /* Next literal pool ID number. */
2926 static unsigned int latest_pool_num
= 1;
2927 literal_pool
* pool
;
2929 pool
= find_literal_pool ();
2933 /* Create a new pool. */
2934 pool
= xmalloc (sizeof (* pool
));
2938 pool
->next_free_entry
= 0;
2939 pool
->section
= now_seg
;
2940 pool
->sub_section
= now_subseg
;
2941 pool
->next
= list_of_pools
;
2942 pool
->symbol
= NULL
;
2944 /* Add it to the list. */
2945 list_of_pools
= pool
;
2948 /* New pools, and emptied pools, will have a NULL symbol. */
2949 if (pool
->symbol
== NULL
)
2951 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2952 (valueT
) 0, &zero_address_frag
);
2953 pool
->id
= latest_pool_num
++;
2960 /* Add the literal in the global 'inst'
2961 structure to the relevant literal pool. */
2964 add_to_lit_pool (void)
2966 literal_pool
* pool
;
2969 pool
= find_or_make_literal_pool ();
2971 /* Check if this literal value is already in the pool. */
2972 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2974 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2975 && (inst
.reloc
.exp
.X_op
== O_constant
)
2976 && (pool
->literals
[entry
].X_add_number
2977 == inst
.reloc
.exp
.X_add_number
)
2978 && (pool
->literals
[entry
].X_unsigned
2979 == inst
.reloc
.exp
.X_unsigned
))
2982 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2983 && (inst
.reloc
.exp
.X_op
== O_symbol
)
2984 && (pool
->literals
[entry
].X_add_number
2985 == inst
.reloc
.exp
.X_add_number
)
2986 && (pool
->literals
[entry
].X_add_symbol
2987 == inst
.reloc
.exp
.X_add_symbol
)
2988 && (pool
->literals
[entry
].X_op_symbol
2989 == inst
.reloc
.exp
.X_op_symbol
))
2993 /* Do we need to create a new entry? */
2994 if (entry
== pool
->next_free_entry
)
2996 if (entry
>= MAX_LITERAL_POOL_SIZE
)
2998 inst
.error
= _("literal pool overflow");
3002 pool
->literals
[entry
] = inst
.reloc
.exp
;
3003 pool
->next_free_entry
+= 1;
3006 inst
.reloc
.exp
.X_op
= O_symbol
;
3007 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
3008 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3013 /* Can't use symbol_new here, so have to create a symbol and then at
3014 a later date assign it a value. Thats what these functions do. */
3017 symbol_locate (symbolS
* symbolP
,
3018 const char * name
, /* It is copied, the caller can modify. */
3019 segT segment
, /* Segment identifier (SEG_<something>). */
3020 valueT valu
, /* Symbol value. */
3021 fragS
* frag
) /* Associated fragment. */
3023 unsigned int name_length
;
3024 char * preserved_copy_of_name
;
3026 name_length
= strlen (name
) + 1; /* +1 for \0. */
3027 obstack_grow (¬es
, name
, name_length
);
3028 preserved_copy_of_name
= obstack_finish (¬es
);
3030 #ifdef tc_canonicalize_symbol_name
3031 preserved_copy_of_name
=
3032 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3035 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3037 S_SET_SEGMENT (symbolP
, segment
);
3038 S_SET_VALUE (symbolP
, valu
);
3039 symbol_clear_list_pointers (symbolP
);
3041 symbol_set_frag (symbolP
, frag
);
3043 /* Link to end of symbol chain. */
3045 extern int symbol_table_frozen
;
3047 if (symbol_table_frozen
)
3051 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3053 obj_symbol_new_hook (symbolP
);
3055 #ifdef tc_symbol_new_hook
3056 tc_symbol_new_hook (symbolP
);
3060 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3061 #endif /* DEBUG_SYMS */
3066 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3069 literal_pool
* pool
;
3072 pool
= find_literal_pool ();
3074 || pool
->symbol
== NULL
3075 || pool
->next_free_entry
== 0)
3078 mapping_state (MAP_DATA
);
3080 /* Align pool as you have word accesses.
3081 Only make a frag if we have to. */
3083 frag_align (2, 0, 0);
3085 record_alignment (now_seg
, 2);
3087 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3089 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3090 (valueT
) frag_now_fix (), frag_now
);
3091 symbol_table_insert (pool
->symbol
);
3093 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3095 #if defined OBJ_COFF || defined OBJ_ELF
3096 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3099 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3100 /* First output the expression in the instruction to the pool. */
3101 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
3103 /* Mark the pool as empty. */
3104 pool
->next_free_entry
= 0;
3105 pool
->symbol
= NULL
;
3109 /* Forward declarations for functions below, in the MD interface
3111 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3112 static valueT
create_unwind_entry (int);
3113 static void start_unwind_section (const segT
, int);
3114 static void add_unwind_opcode (valueT
, int);
3115 static void flush_pending_unwind (void);
3117 /* Directives: Data. */
3120 s_arm_elf_cons (int nbytes
)
3124 #ifdef md_flush_pending_output
3125 md_flush_pending_output ();
3128 if (is_it_end_of_statement ())
3130 demand_empty_rest_of_line ();
3134 #ifdef md_cons_align
3135 md_cons_align (nbytes
);
3138 mapping_state (MAP_DATA
);
3142 char *base
= input_line_pointer
;
3146 if (exp
.X_op
!= O_symbol
)
3147 emit_expr (&exp
, (unsigned int) nbytes
);
3150 char *before_reloc
= input_line_pointer
;
3151 reloc
= parse_reloc (&input_line_pointer
);
3154 as_bad (_("unrecognized relocation suffix"));
3155 ignore_rest_of_line ();
3158 else if (reloc
== BFD_RELOC_UNUSED
)
3159 emit_expr (&exp
, (unsigned int) nbytes
);
3162 reloc_howto_type
*howto
= bfd_reloc_type_lookup (stdoutput
, reloc
);
3163 int size
= bfd_get_reloc_size (howto
);
3165 if (reloc
== BFD_RELOC_ARM_PLT32
)
3167 as_bad (_("(plt) is only valid on branch targets"));
3168 reloc
= BFD_RELOC_UNUSED
;
3173 as_bad (_("%s relocations do not fit in %d bytes"),
3174 howto
->name
, nbytes
);
3177 /* We've parsed an expression stopping at O_symbol.
3178 But there may be more expression left now that we
3179 have parsed the relocation marker. Parse it again.
3180 XXX Surely there is a cleaner way to do this. */
3181 char *p
= input_line_pointer
;
3183 char *save_buf
= alloca (input_line_pointer
- base
);
3184 memcpy (save_buf
, base
, input_line_pointer
- base
);
3185 memmove (base
+ (input_line_pointer
- before_reloc
),
3186 base
, before_reloc
- base
);
3188 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3190 memcpy (base
, save_buf
, p
- base
);
3192 offset
= nbytes
- size
;
3193 p
= frag_more ((int) nbytes
);
3194 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3195 size
, &exp
, 0, reloc
);
3200 while (*input_line_pointer
++ == ',');
3202 /* Put terminator back into stream. */
3203 input_line_pointer
--;
3204 demand_empty_rest_of_line ();
3207 /* Emit an expression containing a 32-bit thumb instruction.
3208 Implementation based on put_thumb32_insn. */
3211 emit_thumb32_expr (expressionS
* exp
)
3213 expressionS exp_high
= *exp
;
3215 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3216 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3217 exp
->X_add_number
&= 0xffff;
3218 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3221 /* Guess the instruction size based on the opcode. */
3224 thumb_insn_size (int opcode
)
3226 if ((unsigned int) opcode
< 0xe800u
)
3228 else if ((unsigned int) opcode
>= 0xe8000000u
)
3235 emit_insn (expressionS
*exp
, int nbytes
)
3239 if (exp
->X_op
== O_constant
)
3244 size
= thumb_insn_size (exp
->X_add_number
);
3248 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3250 as_bad (_(".inst.n operand too big. "\
3251 "Use .inst.w instead"));
3256 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3257 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3259 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3261 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3262 emit_thumb32_expr (exp
);
3264 emit_expr (exp
, (unsigned int) size
);
3266 it_fsm_post_encode ();
3270 as_bad (_("cannot determine Thumb instruction size. " \
3271 "Use .inst.n/.inst.w instead"));
3274 as_bad (_("constant expression required"));
3279 /* Like s_arm_elf_cons but do not use md_cons_align and
3280 set the mapping state to MAP_ARM/MAP_THUMB. */
3283 s_arm_elf_inst (int nbytes
)
3285 if (is_it_end_of_statement ())
3287 demand_empty_rest_of_line ();
3291 /* Calling mapping_state () here will not change ARM/THUMB,
3292 but will ensure not to be in DATA state. */
3295 mapping_state (MAP_THUMB
);
3300 as_bad (_("width suffixes are invalid in ARM mode"));
3301 ignore_rest_of_line ();
3307 mapping_state (MAP_ARM
);
3316 if (! emit_insn (& exp
, nbytes
))
3318 ignore_rest_of_line ();
3322 while (*input_line_pointer
++ == ',');
3324 /* Put terminator back into stream. */
3325 input_line_pointer
--;
3326 demand_empty_rest_of_line ();
3329 /* Parse a .rel31 directive. */
3332 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3339 if (*input_line_pointer
== '1')
3340 highbit
= 0x80000000;
3341 else if (*input_line_pointer
!= '0')
3342 as_bad (_("expected 0 or 1"));
3344 input_line_pointer
++;
3345 if (*input_line_pointer
!= ',')
3346 as_bad (_("missing comma"));
3347 input_line_pointer
++;
3349 #ifdef md_flush_pending_output
3350 md_flush_pending_output ();
3353 #ifdef md_cons_align
3357 mapping_state (MAP_DATA
);
3362 md_number_to_chars (p
, highbit
, 4);
3363 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3364 BFD_RELOC_ARM_PREL31
);
3366 demand_empty_rest_of_line ();
3369 /* Directives: AEABI stack-unwind tables. */
3371 /* Parse an unwind_fnstart directive. Simply records the current location. */
3374 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3376 demand_empty_rest_of_line ();
3377 if (unwind
.proc_start
)
3379 as_bad (_("duplicate .fnstart directive"));
3383 /* Mark the start of the function. */
3384 unwind
.proc_start
= expr_build_dot ();
3386 /* Reset the rest of the unwind info. */
3387 unwind
.opcode_count
= 0;
3388 unwind
.table_entry
= NULL
;
3389 unwind
.personality_routine
= NULL
;
3390 unwind
.personality_index
= -1;
3391 unwind
.frame_size
= 0;
3392 unwind
.fp_offset
= 0;
3393 unwind
.fp_reg
= REG_SP
;
3395 unwind
.sp_restored
= 0;
3399 /* Parse a handlerdata directive. Creates the exception handling table entry
3400 for the function. */
3403 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3405 demand_empty_rest_of_line ();
3406 if (!unwind
.proc_start
)
3407 as_bad (MISSING_FNSTART
);
3409 if (unwind
.table_entry
)
3410 as_bad (_("duplicate .handlerdata directive"));
3412 create_unwind_entry (1);
3415 /* Parse an unwind_fnend directive. Generates the index table entry. */
3418 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3423 unsigned int marked_pr_dependency
;
3425 demand_empty_rest_of_line ();
3427 if (!unwind
.proc_start
)
3429 as_bad (_(".fnend directive without .fnstart"));
3433 /* Add eh table entry. */
3434 if (unwind
.table_entry
== NULL
)
3435 val
= create_unwind_entry (0);
3439 /* Add index table entry. This is two words. */
3440 start_unwind_section (unwind
.saved_seg
, 1);
3441 frag_align (2, 0, 0);
3442 record_alignment (now_seg
, 2);
3444 ptr
= frag_more (8);
3445 where
= frag_now_fix () - 8;
3447 /* Self relative offset of the function start. */
3448 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3449 BFD_RELOC_ARM_PREL31
);
3451 /* Indicate dependency on EHABI-defined personality routines to the
3452 linker, if it hasn't been done already. */
3453 marked_pr_dependency
3454 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3455 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3456 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3458 static const char *const name
[] =
3460 "__aeabi_unwind_cpp_pr0",
3461 "__aeabi_unwind_cpp_pr1",
3462 "__aeabi_unwind_cpp_pr2"
3464 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3465 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3466 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3467 |= 1 << unwind
.personality_index
;
3471 /* Inline exception table entry. */
3472 md_number_to_chars (ptr
+ 4, val
, 4);
3474 /* Self relative offset of the table entry. */
3475 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3476 BFD_RELOC_ARM_PREL31
);
3478 /* Restore the original section. */
3479 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3481 unwind
.proc_start
= NULL
;
3485 /* Parse an unwind_cantunwind directive. */
3488 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3490 demand_empty_rest_of_line ();
3491 if (!unwind
.proc_start
)
3492 as_bad (MISSING_FNSTART
);
3494 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3495 as_bad (_("personality routine specified for cantunwind frame"));
3497 unwind
.personality_index
= -2;
3501 /* Parse a personalityindex directive. */
3504 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3508 if (!unwind
.proc_start
)
3509 as_bad (MISSING_FNSTART
);
3511 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3512 as_bad (_("duplicate .personalityindex directive"));
3516 if (exp
.X_op
!= O_constant
3517 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3519 as_bad (_("bad personality routine number"));
3520 ignore_rest_of_line ();
3524 unwind
.personality_index
= exp
.X_add_number
;
3526 demand_empty_rest_of_line ();
3530 /* Parse a personality directive. */
3533 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3537 if (!unwind
.proc_start
)
3538 as_bad (MISSING_FNSTART
);
3540 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3541 as_bad (_("duplicate .personality directive"));
3543 name
= input_line_pointer
;
3544 c
= get_symbol_end ();
3545 p
= input_line_pointer
;
3546 unwind
.personality_routine
= symbol_find_or_make (name
);
3548 demand_empty_rest_of_line ();
3552 /* Parse a directive saving core registers. */
3555 s_arm_unwind_save_core (void)
3561 range
= parse_reg_list (&input_line_pointer
);
3564 as_bad (_("expected register list"));
3565 ignore_rest_of_line ();
3569 demand_empty_rest_of_line ();
3571 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3572 into .unwind_save {..., sp...}. We aren't bothered about the value of
3573 ip because it is clobbered by calls. */
3574 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3575 && (range
& 0x3000) == 0x1000)
3577 unwind
.opcode_count
--;
3578 unwind
.sp_restored
= 0;
3579 range
= (range
| 0x2000) & ~0x1000;
3580 unwind
.pending_offset
= 0;
3586 /* See if we can use the short opcodes. These pop a block of up to 8
3587 registers starting with r4, plus maybe r14. */
3588 for (n
= 0; n
< 8; n
++)
3590 /* Break at the first non-saved register. */
3591 if ((range
& (1 << (n
+ 4))) == 0)
3594 /* See if there are any other bits set. */
3595 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3597 /* Use the long form. */
3598 op
= 0x8000 | ((range
>> 4) & 0xfff);
3599 add_unwind_opcode (op
, 2);
3603 /* Use the short form. */
3605 op
= 0xa8; /* Pop r14. */
3607 op
= 0xa0; /* Do not pop r14. */
3609 add_unwind_opcode (op
, 1);
3616 op
= 0xb100 | (range
& 0xf);
3617 add_unwind_opcode (op
, 2);
3620 /* Record the number of bytes pushed. */
3621 for (n
= 0; n
< 16; n
++)
3623 if (range
& (1 << n
))
3624 unwind
.frame_size
+= 4;
3629 /* Parse a directive saving FPA registers. */
3632 s_arm_unwind_save_fpa (int reg
)
3638 /* Get Number of registers to transfer. */
3639 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3642 exp
.X_op
= O_illegal
;
3644 if (exp
.X_op
!= O_constant
)
3646 as_bad (_("expected , <constant>"));
3647 ignore_rest_of_line ();
3651 num_regs
= exp
.X_add_number
;
3653 if (num_regs
< 1 || num_regs
> 4)
3655 as_bad (_("number of registers must be in the range [1:4]"));
3656 ignore_rest_of_line ();
3660 demand_empty_rest_of_line ();
3665 op
= 0xb4 | (num_regs
- 1);
3666 add_unwind_opcode (op
, 1);
3671 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3672 add_unwind_opcode (op
, 2);
3674 unwind
.frame_size
+= num_regs
* 12;
3678 /* Parse a directive saving VFP registers for ARMv6 and above. */
3681 s_arm_unwind_save_vfp_armv6 (void)
3686 int num_vfpv3_regs
= 0;
3687 int num_regs_below_16
;
3689 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3692 as_bad (_("expected register list"));
3693 ignore_rest_of_line ();
3697 demand_empty_rest_of_line ();
3699 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3700 than FSTMX/FLDMX-style ones). */
3702 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3704 num_vfpv3_regs
= count
;
3705 else if (start
+ count
> 16)
3706 num_vfpv3_regs
= start
+ count
- 16;
3708 if (num_vfpv3_regs
> 0)
3710 int start_offset
= start
> 16 ? start
- 16 : 0;
3711 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3712 add_unwind_opcode (op
, 2);
3715 /* Generate opcode for registers numbered in the range 0 .. 15. */
3716 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3717 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3718 if (num_regs_below_16
> 0)
3720 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3721 add_unwind_opcode (op
, 2);
3724 unwind
.frame_size
+= count
* 8;
3728 /* Parse a directive saving VFP registers for pre-ARMv6. */
3731 s_arm_unwind_save_vfp (void)
3737 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3740 as_bad (_("expected register list"));
3741 ignore_rest_of_line ();
3745 demand_empty_rest_of_line ();
3750 op
= 0xb8 | (count
- 1);
3751 add_unwind_opcode (op
, 1);
3756 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3757 add_unwind_opcode (op
, 2);
3759 unwind
.frame_size
+= count
* 8 + 4;
3763 /* Parse a directive saving iWMMXt data registers. */
3766 s_arm_unwind_save_mmxwr (void)
3774 if (*input_line_pointer
== '{')
3775 input_line_pointer
++;
3779 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3783 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3788 as_tsktsk (_("register list not in ascending order"));
3791 if (*input_line_pointer
== '-')
3793 input_line_pointer
++;
3794 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3797 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3800 else if (reg
>= hi_reg
)
3802 as_bad (_("bad register range"));
3805 for (; reg
< hi_reg
; reg
++)
3809 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3811 if (*input_line_pointer
== '}')
3812 input_line_pointer
++;
3814 demand_empty_rest_of_line ();
3816 /* Generate any deferred opcodes because we're going to be looking at
3818 flush_pending_unwind ();
3820 for (i
= 0; i
< 16; i
++)
3822 if (mask
& (1 << i
))
3823 unwind
.frame_size
+= 8;
3826 /* Attempt to combine with a previous opcode. We do this because gcc
3827 likes to output separate unwind directives for a single block of
3829 if (unwind
.opcode_count
> 0)
3831 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3832 if ((i
& 0xf8) == 0xc0)
3835 /* Only merge if the blocks are contiguous. */
3838 if ((mask
& 0xfe00) == (1 << 9))
3840 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3841 unwind
.opcode_count
--;
3844 else if (i
== 6 && unwind
.opcode_count
>= 2)
3846 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3850 op
= 0xffff << (reg
- 1);
3852 && ((mask
& op
) == (1u << (reg
- 1))))
3854 op
= (1 << (reg
+ i
+ 1)) - 1;
3855 op
&= ~((1 << reg
) - 1);
3857 unwind
.opcode_count
-= 2;
3864 /* We want to generate opcodes in the order the registers have been
3865 saved, ie. descending order. */
3866 for (reg
= 15; reg
>= -1; reg
--)
3868 /* Save registers in blocks. */
3870 || !(mask
& (1 << reg
)))
3872 /* We found an unsaved reg. Generate opcodes to save the
3879 op
= 0xc0 | (hi_reg
- 10);
3880 add_unwind_opcode (op
, 1);
3885 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3886 add_unwind_opcode (op
, 2);
3895 ignore_rest_of_line ();
3899 s_arm_unwind_save_mmxwcg (void)
3906 if (*input_line_pointer
== '{')
3907 input_line_pointer
++;
3911 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3915 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3921 as_tsktsk (_("register list not in ascending order"));
3924 if (*input_line_pointer
== '-')
3926 input_line_pointer
++;
3927 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3930 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3933 else if (reg
>= hi_reg
)
3935 as_bad (_("bad register range"));
3938 for (; reg
< hi_reg
; reg
++)
3942 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3944 if (*input_line_pointer
== '}')
3945 input_line_pointer
++;
3947 demand_empty_rest_of_line ();
3949 /* Generate any deferred opcodes because we're going to be looking at
3951 flush_pending_unwind ();
3953 for (reg
= 0; reg
< 16; reg
++)
3955 if (mask
& (1 << reg
))
3956 unwind
.frame_size
+= 4;
3959 add_unwind_opcode (op
, 2);
3962 ignore_rest_of_line ();
3966 /* Parse an unwind_save directive.
3967 If the argument is non-zero, this is a .vsave directive. */
3970 s_arm_unwind_save (int arch_v6
)
3973 struct reg_entry
*reg
;
3974 bfd_boolean had_brace
= FALSE
;
3976 if (!unwind
.proc_start
)
3977 as_bad (MISSING_FNSTART
);
3979 /* Figure out what sort of save we have. */
3980 peek
= input_line_pointer
;
3988 reg
= arm_reg_parse_multi (&peek
);
3992 as_bad (_("register expected"));
3993 ignore_rest_of_line ();
4002 as_bad (_("FPA .unwind_save does not take a register list"));
4003 ignore_rest_of_line ();
4006 input_line_pointer
= peek
;
4007 s_arm_unwind_save_fpa (reg
->number
);
4010 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
4013 s_arm_unwind_save_vfp_armv6 ();
4015 s_arm_unwind_save_vfp ();
4017 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
4018 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
4021 as_bad (_(".unwind_save does not support this kind of register"));
4022 ignore_rest_of_line ();
4027 /* Parse an unwind_movsp directive. */
4030 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4036 if (!unwind
.proc_start
)
4037 as_bad (MISSING_FNSTART
);
4039 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4042 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4043 ignore_rest_of_line ();
4047 /* Optional constant. */
4048 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4050 if (immediate_for_directive (&offset
) == FAIL
)
4056 demand_empty_rest_of_line ();
4058 if (reg
== REG_SP
|| reg
== REG_PC
)
4060 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4064 if (unwind
.fp_reg
!= REG_SP
)
4065 as_bad (_("unexpected .unwind_movsp directive"));
4067 /* Generate opcode to restore the value. */
4069 add_unwind_opcode (op
, 1);
4071 /* Record the information for later. */
4072 unwind
.fp_reg
= reg
;
4073 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4074 unwind
.sp_restored
= 1;
4077 /* Parse an unwind_pad directive. */
4080 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4084 if (!unwind
.proc_start
)
4085 as_bad (MISSING_FNSTART
);
4087 if (immediate_for_directive (&offset
) == FAIL
)
4092 as_bad (_("stack increment must be multiple of 4"));
4093 ignore_rest_of_line ();
4097 /* Don't generate any opcodes, just record the details for later. */
4098 unwind
.frame_size
+= offset
;
4099 unwind
.pending_offset
+= offset
;
4101 demand_empty_rest_of_line ();
4104 /* Parse an unwind_setfp directive. */
4107 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4113 if (!unwind
.proc_start
)
4114 as_bad (MISSING_FNSTART
);
4116 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4117 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4120 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4122 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4124 as_bad (_("expected <reg>, <reg>"));
4125 ignore_rest_of_line ();
4129 /* Optional constant. */
4130 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4132 if (immediate_for_directive (&offset
) == FAIL
)
4138 demand_empty_rest_of_line ();
4140 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4142 as_bad (_("register must be either sp or set by a previous"
4143 "unwind_movsp directive"));
4147 /* Don't generate any opcodes, just record the information for later. */
4148 unwind
.fp_reg
= fp_reg
;
4150 if (sp_reg
== REG_SP
)
4151 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4153 unwind
.fp_offset
-= offset
;
4156 /* Parse an unwind_raw directive. */
4159 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4162 /* This is an arbitrary limit. */
4163 unsigned char op
[16];
4166 if (!unwind
.proc_start
)
4167 as_bad (MISSING_FNSTART
);
4170 if (exp
.X_op
== O_constant
4171 && skip_past_comma (&input_line_pointer
) != FAIL
)
4173 unwind
.frame_size
+= exp
.X_add_number
;
4177 exp
.X_op
= O_illegal
;
4179 if (exp
.X_op
!= O_constant
)
4181 as_bad (_("expected <offset>, <opcode>"));
4182 ignore_rest_of_line ();
4188 /* Parse the opcode. */
4193 as_bad (_("unwind opcode too long"));
4194 ignore_rest_of_line ();
4196 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4198 as_bad (_("invalid unwind opcode"));
4199 ignore_rest_of_line ();
4202 op
[count
++] = exp
.X_add_number
;
4204 /* Parse the next byte. */
4205 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4211 /* Add the opcode bytes in reverse order. */
4213 add_unwind_opcode (op
[count
], 1);
4215 demand_empty_rest_of_line ();
4219 /* Parse a .eabi_attribute directive. */
4222 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4224 int tag
= s_vendor_attribute (OBJ_ATTR_PROC
);
4226 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4227 attributes_set_explicitly
[tag
] = 1;
4229 #endif /* OBJ_ELF */
4231 static void s_arm_arch (int);
4232 static void s_arm_object_arch (int);
4233 static void s_arm_cpu (int);
4234 static void s_arm_fpu (int);
4239 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4246 if (exp
.X_op
== O_symbol
)
4247 exp
.X_op
= O_secrel
;
4249 emit_expr (&exp
, 4);
4251 while (*input_line_pointer
++ == ',');
4253 input_line_pointer
--;
4254 demand_empty_rest_of_line ();
4258 /* This table describes all the machine specific pseudo-ops the assembler
4259 has to support. The fields are:
4260 pseudo-op name without dot
4261 function to call to execute this pseudo-op
4262 Integer arg to pass to the function. */
4264 const pseudo_typeS md_pseudo_table
[] =
4266 /* Never called because '.req' does not start a line. */
4267 { "req", s_req
, 0 },
4268 /* Following two are likewise never called. */
4271 { "unreq", s_unreq
, 0 },
4272 { "bss", s_bss
, 0 },
4273 { "align", s_align
, 0 },
4274 { "arm", s_arm
, 0 },
4275 { "thumb", s_thumb
, 0 },
4276 { "code", s_code
, 0 },
4277 { "force_thumb", s_force_thumb
, 0 },
4278 { "thumb_func", s_thumb_func
, 0 },
4279 { "thumb_set", s_thumb_set
, 0 },
4280 { "even", s_even
, 0 },
4281 { "ltorg", s_ltorg
, 0 },
4282 { "pool", s_ltorg
, 0 },
4283 { "syntax", s_syntax
, 0 },
4284 { "cpu", s_arm_cpu
, 0 },
4285 { "arch", s_arm_arch
, 0 },
4286 { "object_arch", s_arm_object_arch
, 0 },
4287 { "fpu", s_arm_fpu
, 0 },
4289 { "word", s_arm_elf_cons
, 4 },
4290 { "long", s_arm_elf_cons
, 4 },
4291 { "inst.n", s_arm_elf_inst
, 2 },
4292 { "inst.w", s_arm_elf_inst
, 4 },
4293 { "inst", s_arm_elf_inst
, 0 },
4294 { "rel31", s_arm_rel31
, 0 },
4295 { "fnstart", s_arm_unwind_fnstart
, 0 },
4296 { "fnend", s_arm_unwind_fnend
, 0 },
4297 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4298 { "personality", s_arm_unwind_personality
, 0 },
4299 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4300 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4301 { "save", s_arm_unwind_save
, 0 },
4302 { "vsave", s_arm_unwind_save
, 1 },
4303 { "movsp", s_arm_unwind_movsp
, 0 },
4304 { "pad", s_arm_unwind_pad
, 0 },
4305 { "setfp", s_arm_unwind_setfp
, 0 },
4306 { "unwind_raw", s_arm_unwind_raw
, 0 },
4307 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4311 /* These are used for dwarf. */
4315 /* These are used for dwarf2. */
4316 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4317 { "loc", dwarf2_directive_loc
, 0 },
4318 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4320 { "extend", float_cons
, 'x' },
4321 { "ldouble", float_cons
, 'x' },
4322 { "packed", float_cons
, 'p' },
4324 {"secrel32", pe_directive_secrel
, 0},
4329 /* Parser functions used exclusively in instruction operands. */
4331 /* Generic immediate-value read function for use in insn parsing.
4332 STR points to the beginning of the immediate (the leading #);
4333 VAL receives the value; if the value is outside [MIN, MAX]
4334 issue an error. PREFIX_OPT is true if the immediate prefix is
4338 parse_immediate (char **str
, int *val
, int min
, int max
,
4339 bfd_boolean prefix_opt
)
4342 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4343 if (exp
.X_op
!= O_constant
)
4345 inst
.error
= _("constant expression required");
4349 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4351 inst
.error
= _("immediate value out of range");
4355 *val
= exp
.X_add_number
;
4359 /* Less-generic immediate-value read function with the possibility of loading a
4360 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4361 instructions. Puts the result directly in inst.operands[i]. */
4364 parse_big_immediate (char **str
, int i
)
4369 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4371 if (exp
.X_op
== O_constant
)
4373 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4374 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4375 O_constant. We have to be careful not to break compilation for
4376 32-bit X_add_number, though. */
4377 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4379 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4380 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4381 inst
.operands
[i
].regisimm
= 1;
4384 else if (exp
.X_op
== O_big
4385 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
4386 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
4388 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4389 /* Bignums have their least significant bits in
4390 generic_bignum[0]. Make sure we put 32 bits in imm and
4391 32 bits in reg, in a (hopefully) portable way. */
4392 gas_assert (parts
!= 0);
4393 inst
.operands
[i
].imm
= 0;
4394 for (j
= 0; j
< parts
; j
++, idx
++)
4395 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4396 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4397 inst
.operands
[i
].reg
= 0;
4398 for (j
= 0; j
< parts
; j
++, idx
++)
4399 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4400 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4401 inst
.operands
[i
].regisimm
= 1;
4411 /* Returns the pseudo-register number of an FPA immediate constant,
4412 or FAIL if there isn't a valid constant here. */
4415 parse_fpa_immediate (char ** str
)
4417 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4423 /* First try and match exact strings, this is to guarantee
4424 that some formats will work even for cross assembly. */
4426 for (i
= 0; fp_const
[i
]; i
++)
4428 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4432 *str
+= strlen (fp_const
[i
]);
4433 if (is_end_of_line
[(unsigned char) **str
])
4439 /* Just because we didn't get a match doesn't mean that the constant
4440 isn't valid, just that it is in a format that we don't
4441 automatically recognize. Try parsing it with the standard
4442 expression routines. */
4444 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4446 /* Look for a raw floating point number. */
4447 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4448 && is_end_of_line
[(unsigned char) *save_in
])
4450 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4452 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4454 if (words
[j
] != fp_values
[i
][j
])
4458 if (j
== MAX_LITTLENUMS
)
4466 /* Try and parse a more complex expression, this will probably fail
4467 unless the code uses a floating point prefix (eg "0f"). */
4468 save_in
= input_line_pointer
;
4469 input_line_pointer
= *str
;
4470 if (expression (&exp
) == absolute_section
4471 && exp
.X_op
== O_big
4472 && exp
.X_add_number
< 0)
4474 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4476 if (gen_to_words (words
, 5, (long) 15) == 0)
4478 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4480 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4482 if (words
[j
] != fp_values
[i
][j
])
4486 if (j
== MAX_LITTLENUMS
)
4488 *str
= input_line_pointer
;
4489 input_line_pointer
= save_in
;
4496 *str
= input_line_pointer
;
4497 input_line_pointer
= save_in
;
4498 inst
.error
= _("invalid FPA immediate expression");
4502 /* Returns 1 if a number has "quarter-precision" float format
4503 0baBbbbbbc defgh000 00000000 00000000. */
4506 is_quarter_float (unsigned imm
)
4508 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4509 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4512 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4513 0baBbbbbbc defgh000 00000000 00000000.
4514 The zero and minus-zero cases need special handling, since they can't be
4515 encoded in the "quarter-precision" float format, but can nonetheless be
4516 loaded as integer constants. */
4519 parse_qfloat_immediate (char **ccp
, int *immed
)
4523 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4524 int found_fpchar
= 0;
4526 skip_past_char (&str
, '#');
4528 /* We must not accidentally parse an integer as a floating-point number. Make
4529 sure that the value we parse is not an integer by checking for special
4530 characters '.' or 'e'.
4531 FIXME: This is a horrible hack, but doing better is tricky because type
4532 information isn't in a very usable state at parse time. */
4534 skip_whitespace (fpnum
);
4536 if (strncmp (fpnum
, "0x", 2) == 0)
4540 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4541 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4551 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4553 unsigned fpword
= 0;
4556 /* Our FP word must be 32 bits (single-precision FP). */
4557 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4559 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4563 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
4576 /* Shift operands. */
4579 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4582 struct asm_shift_name
4585 enum shift_kind kind
;
4588 /* Third argument to parse_shift. */
4589 enum parse_shift_mode
4591 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4592 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4593 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4594 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4595 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4598 /* Parse a <shift> specifier on an ARM data processing instruction.
4599 This has three forms:
4601 (LSL|LSR|ASL|ASR|ROR) Rs
4602 (LSL|LSR|ASL|ASR|ROR) #imm
4605 Note that ASL is assimilated to LSL in the instruction encoding, and
4606 RRX to ROR #0 (which cannot be written as such). */
4609 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4611 const struct asm_shift_name
*shift_name
;
4612 enum shift_kind shift
;
4617 for (p
= *str
; ISALPHA (*p
); p
++)
4622 inst
.error
= _("shift expression expected");
4626 shift_name
= hash_find_n (arm_shift_hsh
, *str
, p
- *str
);
4628 if (shift_name
== NULL
)
4630 inst
.error
= _("shift expression expected");
4634 shift
= shift_name
->kind
;
4638 case NO_SHIFT_RESTRICT
:
4639 case SHIFT_IMMEDIATE
: break;
4641 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4642 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4644 inst
.error
= _("'LSL' or 'ASR' required");
4649 case SHIFT_LSL_IMMEDIATE
:
4650 if (shift
!= SHIFT_LSL
)
4652 inst
.error
= _("'LSL' required");
4657 case SHIFT_ASR_IMMEDIATE
:
4658 if (shift
!= SHIFT_ASR
)
4660 inst
.error
= _("'ASR' required");
4668 if (shift
!= SHIFT_RRX
)
4670 /* Whitespace can appear here if the next thing is a bare digit. */
4671 skip_whitespace (p
);
4673 if (mode
== NO_SHIFT_RESTRICT
4674 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4676 inst
.operands
[i
].imm
= reg
;
4677 inst
.operands
[i
].immisreg
= 1;
4679 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4682 inst
.operands
[i
].shift_kind
= shift
;
4683 inst
.operands
[i
].shifted
= 1;
4688 /* Parse a <shifter_operand> for an ARM data processing instruction:
4691 #<immediate>, <rotate>
4695 where <shift> is defined by parse_shift above, and <rotate> is a
4696 multiple of 2 between 0 and 30. Validation of immediate operands
4697 is deferred to md_apply_fix. */
4700 parse_shifter_operand (char **str
, int i
)
4705 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4707 inst
.operands
[i
].reg
= value
;
4708 inst
.operands
[i
].isreg
= 1;
4710 /* parse_shift will override this if appropriate */
4711 inst
.reloc
.exp
.X_op
= O_constant
;
4712 inst
.reloc
.exp
.X_add_number
= 0;
4714 if (skip_past_comma (str
) == FAIL
)
4717 /* Shift operation on register. */
4718 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4721 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4724 if (skip_past_comma (str
) == SUCCESS
)
4726 /* #x, y -- ie explicit rotation by Y. */
4727 if (my_get_expression (&expr
, str
, GE_NO_PREFIX
))
4730 if (expr
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4732 inst
.error
= _("constant expression expected");
4736 value
= expr
.X_add_number
;
4737 if (value
< 0 || value
> 30 || value
% 2 != 0)
4739 inst
.error
= _("invalid rotation");
4742 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4744 inst
.error
= _("invalid constant");
4748 /* Convert to decoded value. md_apply_fix will put it back. */
4749 inst
.reloc
.exp
.X_add_number
4750 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4751 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4754 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4755 inst
.reloc
.pc_rel
= 0;
4759 /* Group relocation information. Each entry in the table contains the
4760 textual name of the relocation as may appear in assembler source
4761 and must end with a colon.
4762 Along with this textual name are the relocation codes to be used if
4763 the corresponding instruction is an ALU instruction (ADD or SUB only),
4764 an LDR, an LDRS, or an LDC. */
4766 struct group_reloc_table_entry
4777 /* Varieties of non-ALU group relocation. */
4784 static struct group_reloc_table_entry group_reloc_table
[] =
4785 { /* Program counter relative: */
4787 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4792 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4793 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4794 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4795 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4797 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4802 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4803 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4804 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4805 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4807 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4808 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4809 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4810 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4811 /* Section base relative */
4813 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4818 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4819 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4820 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4821 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4823 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4828 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4829 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4830 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4831 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4833 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4834 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4835 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4836 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4838 /* Given the address of a pointer pointing to the textual name of a group
4839 relocation as may appear in assembler source, attempt to find its details
4840 in group_reloc_table. The pointer will be updated to the character after
4841 the trailing colon. On failure, FAIL will be returned; SUCCESS
4842 otherwise. On success, *entry will be updated to point at the relevant
4843 group_reloc_table entry. */
4846 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4849 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4851 int length
= strlen (group_reloc_table
[i
].name
);
4853 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
4854 && (*str
)[length
] == ':')
4856 *out
= &group_reloc_table
[i
];
4857 *str
+= (length
+ 1);
4865 /* Parse a <shifter_operand> for an ARM data processing instruction
4866 (as for parse_shifter_operand) where group relocations are allowed:
4869 #<immediate>, <rotate>
4870 #:<group_reloc>:<expression>
4874 where <group_reloc> is one of the strings defined in group_reloc_table.
4875 The hashes are optional.
4877 Everything else is as for parse_shifter_operand. */
4879 static parse_operand_result
4880 parse_shifter_operand_group_reloc (char **str
, int i
)
4882 /* Determine if we have the sequence of characters #: or just :
4883 coming next. If we do, then we check for a group relocation.
4884 If we don't, punt the whole lot to parse_shifter_operand. */
4886 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4887 || (*str
)[0] == ':')
4889 struct group_reloc_table_entry
*entry
;
4891 if ((*str
)[0] == '#')
4896 /* Try to parse a group relocation. Anything else is an error. */
4897 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4899 inst
.error
= _("unknown group relocation");
4900 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4903 /* We now have the group relocation table entry corresponding to
4904 the name in the assembler source. Next, we parse the expression. */
4905 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4906 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4908 /* Record the relocation type (always the ALU variant here). */
4909 inst
.reloc
.type
= entry
->alu_code
;
4910 gas_assert (inst
.reloc
.type
!= 0);
4912 return PARSE_OPERAND_SUCCESS
;
4915 return parse_shifter_operand (str
, i
) == SUCCESS
4916 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4918 /* Never reached. */
4921 /* Parse all forms of an ARM address expression. Information is written
4922 to inst.operands[i] and/or inst.reloc.
4924 Preindexed addressing (.preind=1):
4926 [Rn, #offset] .reg=Rn .reloc.exp=offset
4927 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4928 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4929 .shift_kind=shift .reloc.exp=shift_imm
4931 These three may have a trailing ! which causes .writeback to be set also.
4933 Postindexed addressing (.postind=1, .writeback=1):
4935 [Rn], #offset .reg=Rn .reloc.exp=offset
4936 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4937 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4938 .shift_kind=shift .reloc.exp=shift_imm
4940 Unindexed addressing (.preind=0, .postind=0):
4942 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4946 [Rn]{!} shorthand for [Rn,#0]{!}
4947 =immediate .isreg=0 .reloc.exp=immediate
4948 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4950 It is the caller's responsibility to check for addressing modes not
4951 supported by the instruction, and to set inst.reloc.type. */
4953 static parse_operand_result
4954 parse_address_main (char **str
, int i
, int group_relocations
,
4955 group_reloc_type group_type
)
4960 if (skip_past_char (&p
, '[') == FAIL
)
4962 if (skip_past_char (&p
, '=') == FAIL
)
4964 /* bare address - translate to PC-relative offset */
4965 inst
.reloc
.pc_rel
= 1;
4966 inst
.operands
[i
].reg
= REG_PC
;
4967 inst
.operands
[i
].isreg
= 1;
4968 inst
.operands
[i
].preind
= 1;
4970 /* else a load-constant pseudo op, no special treatment needed here */
4972 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4973 return PARSE_OPERAND_FAIL
;
4976 return PARSE_OPERAND_SUCCESS
;
4979 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
4981 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
4982 return PARSE_OPERAND_FAIL
;
4984 inst
.operands
[i
].reg
= reg
;
4985 inst
.operands
[i
].isreg
= 1;
4987 if (skip_past_comma (&p
) == SUCCESS
)
4989 inst
.operands
[i
].preind
= 1;
4992 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
4994 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4996 inst
.operands
[i
].imm
= reg
;
4997 inst
.operands
[i
].immisreg
= 1;
4999 if (skip_past_comma (&p
) == SUCCESS
)
5000 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5001 return PARSE_OPERAND_FAIL
;
5003 else if (skip_past_char (&p
, ':') == SUCCESS
)
5005 /* FIXME: '@' should be used here, but it's filtered out by generic
5006 code before we get to see it here. This may be subject to
5009 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5010 if (exp
.X_op
!= O_constant
)
5012 inst
.error
= _("alignment must be constant");
5013 return PARSE_OPERAND_FAIL
;
5015 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5016 inst
.operands
[i
].immisalign
= 1;
5017 /* Alignments are not pre-indexes. */
5018 inst
.operands
[i
].preind
= 0;
5022 if (inst
.operands
[i
].negative
)
5024 inst
.operands
[i
].negative
= 0;
5028 if (group_relocations
5029 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5031 struct group_reloc_table_entry
*entry
;
5033 /* Skip over the #: or : sequence. */
5039 /* Try to parse a group relocation. Anything else is an
5041 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5043 inst
.error
= _("unknown group relocation");
5044 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5047 /* We now have the group relocation table entry corresponding to
5048 the name in the assembler source. Next, we parse the
5050 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5051 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5053 /* Record the relocation type. */
5057 inst
.reloc
.type
= entry
->ldr_code
;
5061 inst
.reloc
.type
= entry
->ldrs_code
;
5065 inst
.reloc
.type
= entry
->ldc_code
;
5072 if (inst
.reloc
.type
== 0)
5074 inst
.error
= _("this group relocation is not allowed on this instruction");
5075 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5079 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5080 return PARSE_OPERAND_FAIL
;
5084 if (skip_past_char (&p
, ']') == FAIL
)
5086 inst
.error
= _("']' expected");
5087 return PARSE_OPERAND_FAIL
;
5090 if (skip_past_char (&p
, '!') == SUCCESS
)
5091 inst
.operands
[i
].writeback
= 1;
5093 else if (skip_past_comma (&p
) == SUCCESS
)
5095 if (skip_past_char (&p
, '{') == SUCCESS
)
5097 /* [Rn], {expr} - unindexed, with option */
5098 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5099 0, 255, TRUE
) == FAIL
)
5100 return PARSE_OPERAND_FAIL
;
5102 if (skip_past_char (&p
, '}') == FAIL
)
5104 inst
.error
= _("'}' expected at end of 'option' field");
5105 return PARSE_OPERAND_FAIL
;
5107 if (inst
.operands
[i
].preind
)
5109 inst
.error
= _("cannot combine index with option");
5110 return PARSE_OPERAND_FAIL
;
5113 return PARSE_OPERAND_SUCCESS
;
5117 inst
.operands
[i
].postind
= 1;
5118 inst
.operands
[i
].writeback
= 1;
5120 if (inst
.operands
[i
].preind
)
5122 inst
.error
= _("cannot combine pre- and post-indexing");
5123 return PARSE_OPERAND_FAIL
;
5127 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5129 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5131 /* We might be using the immediate for alignment already. If we
5132 are, OR the register number into the low-order bits. */
5133 if (inst
.operands
[i
].immisalign
)
5134 inst
.operands
[i
].imm
|= reg
;
5136 inst
.operands
[i
].imm
= reg
;
5137 inst
.operands
[i
].immisreg
= 1;
5139 if (skip_past_comma (&p
) == SUCCESS
)
5140 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5141 return PARSE_OPERAND_FAIL
;
5145 if (inst
.operands
[i
].negative
)
5147 inst
.operands
[i
].negative
= 0;
5150 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5151 return PARSE_OPERAND_FAIL
;
5156 /* If at this point neither .preind nor .postind is set, we have a
5157 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5158 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5160 inst
.operands
[i
].preind
= 1;
5161 inst
.reloc
.exp
.X_op
= O_constant
;
5162 inst
.reloc
.exp
.X_add_number
= 0;
5165 return PARSE_OPERAND_SUCCESS
;
5169 parse_address (char **str
, int i
)
5171 return parse_address_main (str
, i
, 0, 0) == PARSE_OPERAND_SUCCESS
5175 static parse_operand_result
5176 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5178 return parse_address_main (str
, i
, 1, type
);
5181 /* Parse an operand for a MOVW or MOVT instruction. */
5183 parse_half (char **str
)
5188 skip_past_char (&p
, '#');
5189 if (strncasecmp (p
, ":lower16:", 9) == 0)
5190 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5191 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5192 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5194 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5197 skip_whitespace (p
);
5200 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5203 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5205 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5207 inst
.error
= _("constant expression expected");
5210 if (inst
.reloc
.exp
.X_add_number
< 0
5211 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5213 inst
.error
= _("immediate value out of range");
5221 /* Miscellaneous. */
5223 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5224 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5226 parse_psr (char **str
)
5229 unsigned long psr_field
;
5230 const struct asm_psr
*psr
;
5233 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5234 feature for ease of use and backwards compatibility. */
5236 if (strncasecmp (p
, "SPSR", 4) == 0)
5237 psr_field
= SPSR_BIT
;
5238 else if (strncasecmp (p
, "CPSR", 4) == 0)
5245 while (ISALNUM (*p
) || *p
== '_');
5247 psr
= hash_find_n (arm_v7m_psr_hsh
, start
, p
- start
);
5258 /* A suffix follows. */
5264 while (ISALNUM (*p
) || *p
== '_');
5266 psr
= hash_find_n (arm_psr_hsh
, start
, p
- start
);
5270 psr_field
|= psr
->field
;
5275 goto error
; /* Garbage after "[CS]PSR". */
5277 psr_field
|= (PSR_c
| PSR_f
);
5283 inst
.error
= _("flag for {c}psr instruction expected");
5287 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5288 value suitable for splatting into the AIF field of the instruction. */
5291 parse_cps_flags (char **str
)
5300 case '\0': case ',':
5303 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5304 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5305 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5308 inst
.error
= _("unrecognized CPS flag");
5313 if (saw_a_flag
== 0)
5315 inst
.error
= _("missing CPS flags");
5323 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5324 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5327 parse_endian_specifier (char **str
)
5332 if (strncasecmp (s
, "BE", 2))
5334 else if (strncasecmp (s
, "LE", 2))
5338 inst
.error
= _("valid endian specifiers are be or le");
5342 if (ISALNUM (s
[2]) || s
[2] == '_')
5344 inst
.error
= _("valid endian specifiers are be or le");
5349 return little_endian
;
5352 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5353 value suitable for poking into the rotate field of an sxt or sxta
5354 instruction, or FAIL on error. */
5357 parse_ror (char **str
)
5362 if (strncasecmp (s
, "ROR", 3) == 0)
5366 inst
.error
= _("missing rotation field after comma");
5370 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5375 case 0: *str
= s
; return 0x0;
5376 case 8: *str
= s
; return 0x1;
5377 case 16: *str
= s
; return 0x2;
5378 case 24: *str
= s
; return 0x3;
5381 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5386 /* Parse a conditional code (from conds[] below). The value returned is in the
5387 range 0 .. 14, or FAIL. */
5389 parse_cond (char **str
)
5392 const struct asm_cond
*c
;
5394 /* Condition codes are always 2 characters, so matching up to
5395 3 characters is sufficient. */
5400 while (ISALPHA (*q
) && n
< 3)
5402 cond
[n
] = TOLOWER (*q
);
5407 c
= hash_find_n (arm_cond_hsh
, cond
, n
);
5410 inst
.error
= _("condition required");
5418 /* Parse an option for a barrier instruction. Returns the encoding for the
5421 parse_barrier (char **str
)
5424 const struct asm_barrier_opt
*o
;
5427 while (ISALPHA (*q
))
5430 o
= hash_find_n (arm_barrier_opt_hsh
, p
, q
- p
);
5438 /* Parse the operands of a table branch instruction. Similar to a memory
5441 parse_tb (char **str
)
5446 if (skip_past_char (&p
, '[') == FAIL
)
5448 inst
.error
= _("'[' expected");
5452 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5454 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5457 inst
.operands
[0].reg
= reg
;
5459 if (skip_past_comma (&p
) == FAIL
)
5461 inst
.error
= _("',' expected");
5465 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5467 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5470 inst
.operands
[0].imm
= reg
;
5472 if (skip_past_comma (&p
) == SUCCESS
)
5474 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5476 if (inst
.reloc
.exp
.X_add_number
!= 1)
5478 inst
.error
= _("invalid shift");
5481 inst
.operands
[0].shifted
= 1;
5484 if (skip_past_char (&p
, ']') == FAIL
)
5486 inst
.error
= _("']' expected");
5493 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5494 information on the types the operands can take and how they are encoded.
5495 Up to four operands may be read; this function handles setting the
5496 ".present" field for each read operand itself.
5497 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5498 else returns FAIL. */
5501 parse_neon_mov (char **str
, int *which_operand
)
5503 int i
= *which_operand
, val
;
5504 enum arm_reg_type rtype
;
5506 struct neon_type_el optype
;
5508 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5510 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5511 inst
.operands
[i
].reg
= val
;
5512 inst
.operands
[i
].isscalar
= 1;
5513 inst
.operands
[i
].vectype
= optype
;
5514 inst
.operands
[i
++].present
= 1;
5516 if (skip_past_comma (&ptr
) == FAIL
)
5519 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5522 inst
.operands
[i
].reg
= val
;
5523 inst
.operands
[i
].isreg
= 1;
5524 inst
.operands
[i
].present
= 1;
5526 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5529 /* Cases 0, 1, 2, 3, 5 (D only). */
5530 if (skip_past_comma (&ptr
) == FAIL
)
5533 inst
.operands
[i
].reg
= val
;
5534 inst
.operands
[i
].isreg
= 1;
5535 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5536 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5537 inst
.operands
[i
].isvec
= 1;
5538 inst
.operands
[i
].vectype
= optype
;
5539 inst
.operands
[i
++].present
= 1;
5541 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5543 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5544 Case 13: VMOV <Sd>, <Rm> */
5545 inst
.operands
[i
].reg
= val
;
5546 inst
.operands
[i
].isreg
= 1;
5547 inst
.operands
[i
].present
= 1;
5549 if (rtype
== REG_TYPE_NQ
)
5551 first_error (_("can't use Neon quad register here"));
5554 else if (rtype
!= REG_TYPE_VFS
)
5557 if (skip_past_comma (&ptr
) == FAIL
)
5559 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5561 inst
.operands
[i
].reg
= val
;
5562 inst
.operands
[i
].isreg
= 1;
5563 inst
.operands
[i
].present
= 1;
5566 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5569 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5570 Case 1: VMOV<c><q> <Dd>, <Dm>
5571 Case 8: VMOV.F32 <Sd>, <Sm>
5572 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5574 inst
.operands
[i
].reg
= val
;
5575 inst
.operands
[i
].isreg
= 1;
5576 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5577 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5578 inst
.operands
[i
].isvec
= 1;
5579 inst
.operands
[i
].vectype
= optype
;
5580 inst
.operands
[i
].present
= 1;
5582 if (skip_past_comma (&ptr
) == SUCCESS
)
5587 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5590 inst
.operands
[i
].reg
= val
;
5591 inst
.operands
[i
].isreg
= 1;
5592 inst
.operands
[i
++].present
= 1;
5594 if (skip_past_comma (&ptr
) == FAIL
)
5597 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5600 inst
.operands
[i
].reg
= val
;
5601 inst
.operands
[i
].isreg
= 1;
5602 inst
.operands
[i
++].present
= 1;
5605 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5606 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5607 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5608 Case 10: VMOV.F32 <Sd>, #<imm>
5609 Case 11: VMOV.F64 <Dd>, #<imm> */
5610 inst
.operands
[i
].immisfloat
= 1;
5611 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5612 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5613 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5617 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5621 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5624 inst
.operands
[i
].reg
= val
;
5625 inst
.operands
[i
].isreg
= 1;
5626 inst
.operands
[i
++].present
= 1;
5628 if (skip_past_comma (&ptr
) == FAIL
)
5631 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5633 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5634 inst
.operands
[i
].reg
= val
;
5635 inst
.operands
[i
].isscalar
= 1;
5636 inst
.operands
[i
].present
= 1;
5637 inst
.operands
[i
].vectype
= optype
;
5639 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5641 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5642 inst
.operands
[i
].reg
= val
;
5643 inst
.operands
[i
].isreg
= 1;
5644 inst
.operands
[i
++].present
= 1;
5646 if (skip_past_comma (&ptr
) == FAIL
)
5649 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5652 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5656 inst
.operands
[i
].reg
= val
;
5657 inst
.operands
[i
].isreg
= 1;
5658 inst
.operands
[i
].isvec
= 1;
5659 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5660 inst
.operands
[i
].vectype
= optype
;
5661 inst
.operands
[i
].present
= 1;
5663 if (rtype
== REG_TYPE_VFS
)
5667 if (skip_past_comma (&ptr
) == FAIL
)
5669 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5672 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5675 inst
.operands
[i
].reg
= val
;
5676 inst
.operands
[i
].isreg
= 1;
5677 inst
.operands
[i
].isvec
= 1;
5678 inst
.operands
[i
].issingle
= 1;
5679 inst
.operands
[i
].vectype
= optype
;
5680 inst
.operands
[i
].present
= 1;
5683 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5687 inst
.operands
[i
].reg
= val
;
5688 inst
.operands
[i
].isreg
= 1;
5689 inst
.operands
[i
].isvec
= 1;
5690 inst
.operands
[i
].issingle
= 1;
5691 inst
.operands
[i
].vectype
= optype
;
5692 inst
.operands
[i
++].present
= 1;
5697 first_error (_("parse error"));
5701 /* Successfully parsed the operands. Update args. */
5707 first_error (_("expected comma"));
5711 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5715 /* Matcher codes for parse_operands. */
5716 enum operand_parse_code
5718 OP_stop
, /* end of line */
5720 OP_RR
, /* ARM register */
5721 OP_RRnpc
, /* ARM register, not r15 */
5722 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5723 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5724 OP_RCP
, /* Coprocessor number */
5725 OP_RCN
, /* Coprocessor register */
5726 OP_RF
, /* FPA register */
5727 OP_RVS
, /* VFP single precision register */
5728 OP_RVD
, /* VFP double precision register (0..15) */
5729 OP_RND
, /* Neon double precision register (0..31) */
5730 OP_RNQ
, /* Neon quad precision register */
5731 OP_RVSD
, /* VFP single or double precision register */
5732 OP_RNDQ
, /* Neon double or quad precision register */
5733 OP_RNSDQ
, /* Neon single, double or quad precision register */
5734 OP_RNSC
, /* Neon scalar D[X] */
5735 OP_RVC
, /* VFP control register */
5736 OP_RMF
, /* Maverick F register */
5737 OP_RMD
, /* Maverick D register */
5738 OP_RMFX
, /* Maverick FX register */
5739 OP_RMDX
, /* Maverick DX register */
5740 OP_RMAX
, /* Maverick AX register */
5741 OP_RMDS
, /* Maverick DSPSC register */
5742 OP_RIWR
, /* iWMMXt wR register */
5743 OP_RIWC
, /* iWMMXt wC register */
5744 OP_RIWG
, /* iWMMXt wCG register */
5745 OP_RXA
, /* XScale accumulator register */
5747 OP_REGLST
, /* ARM register list */
5748 OP_VRSLST
, /* VFP single-precision register list */
5749 OP_VRDLST
, /* VFP double-precision register list */
5750 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5751 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5752 OP_NSTRLST
, /* Neon element/structure list */
5754 OP_NILO
, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5755 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5756 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5757 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5758 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5759 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5760 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5761 OP_VMOV
, /* Neon VMOV operands. */
5762 OP_RNDQ_IMVNb
,/* Neon D or Q reg, or immediate good for VMVN. */
5763 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5764 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5766 OP_I0
, /* immediate zero */
5767 OP_I7
, /* immediate value 0 .. 7 */
5768 OP_I15
, /* 0 .. 15 */
5769 OP_I16
, /* 1 .. 16 */
5770 OP_I16z
, /* 0 .. 16 */
5771 OP_I31
, /* 0 .. 31 */
5772 OP_I31w
, /* 0 .. 31, optional trailing ! */
5773 OP_I32
, /* 1 .. 32 */
5774 OP_I32z
, /* 0 .. 32 */
5775 OP_I63
, /* 0 .. 63 */
5776 OP_I63s
, /* -64 .. 63 */
5777 OP_I64
, /* 1 .. 64 */
5778 OP_I64z
, /* 0 .. 64 */
5779 OP_I255
, /* 0 .. 255 */
5781 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5782 OP_I7b
, /* 0 .. 7 */
5783 OP_I15b
, /* 0 .. 15 */
5784 OP_I31b
, /* 0 .. 31 */
5786 OP_SH
, /* shifter operand */
5787 OP_SHG
, /* shifter operand with possible group relocation */
5788 OP_ADDR
, /* Memory address expression (any mode) */
5789 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5790 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5791 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5792 OP_EXP
, /* arbitrary expression */
5793 OP_EXPi
, /* same, with optional immediate prefix */
5794 OP_EXPr
, /* same, with optional relocation suffix */
5795 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5797 OP_CPSF
, /* CPS flags */
5798 OP_ENDI
, /* Endianness specifier */
5799 OP_PSR
, /* CPSR/SPSR mask for msr */
5800 OP_COND
, /* conditional code */
5801 OP_TB
, /* Table branch. */
5803 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5804 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5806 OP_RRnpc_I0
, /* ARM register or literal 0 */
5807 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5808 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5809 OP_RF_IF
, /* FPA register or immediate */
5810 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5811 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5813 /* Optional operands. */
5814 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5815 OP_oI31b
, /* 0 .. 31 */
5816 OP_oI32b
, /* 1 .. 32 */
5817 OP_oIffffb
, /* 0 .. 65535 */
5818 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5820 OP_oRR
, /* ARM register */
5821 OP_oRRnpc
, /* ARM register, not the PC */
5822 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
5823 OP_oRND
, /* Optional Neon double precision register */
5824 OP_oRNQ
, /* Optional Neon quad precision register */
5825 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5826 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5827 OP_oSHll
, /* LSL immediate */
5828 OP_oSHar
, /* ASR immediate */
5829 OP_oSHllar
, /* LSL or ASR immediate */
5830 OP_oROR
, /* ROR 0/8/16/24 */
5831 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5833 OP_FIRST_OPTIONAL
= OP_oI7b
5836 /* Generic instruction operand parser. This does no encoding and no
5837 semantic validation; it merely squirrels values away in the inst
5838 structure. Returns SUCCESS or FAIL depending on whether the
5839 specified grammar matched. */
5841 parse_operands (char *str
, const unsigned char *pattern
)
5843 unsigned const char *upat
= pattern
;
5844 char *backtrack_pos
= 0;
5845 const char *backtrack_error
= 0;
5846 int i
, val
, backtrack_index
= 0;
5847 enum arm_reg_type rtype
;
5848 parse_operand_result result
;
5850 #define po_char_or_fail(chr) \
5853 if (skip_past_char (&str, chr) == FAIL) \
5858 #define po_reg_or_fail(regtype) \
5861 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5862 & inst.operands[i].vectype); \
5865 first_error (_(reg_expected_msgs[regtype])); \
5868 inst.operands[i].reg = val; \
5869 inst.operands[i].isreg = 1; \
5870 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5871 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5872 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5873 || rtype == REG_TYPE_VFD \
5874 || rtype == REG_TYPE_NQ); \
5878 #define po_reg_or_goto(regtype, label) \
5881 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5882 & inst.operands[i].vectype); \
5886 inst.operands[i].reg = val; \
5887 inst.operands[i].isreg = 1; \
5888 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5889 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5890 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5891 || rtype == REG_TYPE_VFD \
5892 || rtype == REG_TYPE_NQ); \
5896 #define po_imm_or_fail(min, max, popt) \
5899 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5901 inst.operands[i].imm = val; \
5905 #define po_scalar_or_goto(elsz, label) \
5908 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
5911 inst.operands[i].reg = val; \
5912 inst.operands[i].isscalar = 1; \
5916 #define po_misc_or_fail(expr) \
5924 #define po_misc_or_fail_no_backtrack(expr) \
5928 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
5929 backtrack_pos = 0; \
5930 if (result != PARSE_OPERAND_SUCCESS) \
5935 skip_whitespace (str
);
5937 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5939 if (upat
[i
] >= OP_FIRST_OPTIONAL
)
5941 /* Remember where we are in case we need to backtrack. */
5942 gas_assert (!backtrack_pos
);
5943 backtrack_pos
= str
;
5944 backtrack_error
= inst
.error
;
5945 backtrack_index
= i
;
5948 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
5949 po_char_or_fail (',');
5957 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
5958 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
5959 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
5960 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
5961 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
5962 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
5964 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
5966 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
5968 /* Also accept generic coprocessor regs for unknown registers. */
5970 po_reg_or_fail (REG_TYPE_CN
);
5972 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
5973 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
5974 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
5975 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
5976 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
5977 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
5978 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
5979 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
5980 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
5981 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
5983 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
5985 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
5986 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
5988 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
5990 /* Neon scalar. Using an element size of 8 means that some invalid
5991 scalars are accepted here, so deal with those in later code. */
5992 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
5994 /* WARNING: We can expand to two operands here. This has the potential
5995 to totally confuse the backtracking mechanism! It will be OK at
5996 least as long as we don't try to use optional args as well,
6000 po_reg_or_goto (REG_TYPE_NDQ
, try_imm
);
6001 inst
.operands
[i
].present
= 1;
6003 skip_past_comma (&str
);
6004 po_reg_or_goto (REG_TYPE_NDQ
, one_reg_only
);
6007 /* Optional register operand was omitted. Unfortunately, it's in
6008 operands[i-1] and we need it to be in inst.operands[i]. Fix that
6009 here (this is a bit grotty). */
6010 inst
.operands
[i
] = inst
.operands
[i
-1];
6011 inst
.operands
[i
-1].present
= 0;
6014 /* There's a possibility of getting a 64-bit immediate here, so
6015 we need special handling. */
6016 if (parse_big_immediate (&str
, i
) == FAIL
)
6018 inst
.error
= _("immediate value is out of range");
6026 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6029 po_imm_or_fail (0, 0, TRUE
);
6034 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6039 po_scalar_or_goto (8, try_rr
);
6042 po_reg_or_fail (REG_TYPE_RN
);
6048 po_scalar_or_goto (8, try_nsdq
);
6051 po_reg_or_fail (REG_TYPE_NSDQ
);
6057 po_scalar_or_goto (8, try_ndq
);
6060 po_reg_or_fail (REG_TYPE_NDQ
);
6066 po_scalar_or_goto (8, try_vfd
);
6069 po_reg_or_fail (REG_TYPE_VFD
);
6074 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6075 not careful then bad things might happen. */
6076 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6081 po_reg_or_goto (REG_TYPE_NDQ
, try_mvnimm
);
6084 /* There's a possibility of getting a 64-bit immediate here, so
6085 we need special handling. */
6086 if (parse_big_immediate (&str
, i
) == FAIL
)
6088 inst
.error
= _("immediate value is out of range");
6096 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6099 po_imm_or_fail (0, 63, TRUE
);
6104 po_char_or_fail ('[');
6105 po_reg_or_fail (REG_TYPE_RN
);
6106 po_char_or_fail (']');
6111 po_reg_or_fail (REG_TYPE_RN
);
6112 if (skip_past_char (&str
, '!') == SUCCESS
)
6113 inst
.operands
[i
].writeback
= 1;
6117 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6118 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6119 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6120 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6121 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6122 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6123 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6124 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6125 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6126 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6127 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6128 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6130 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6132 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6133 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6135 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6136 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6137 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6139 /* Immediate variants */
6141 po_char_or_fail ('{');
6142 po_imm_or_fail (0, 255, TRUE
);
6143 po_char_or_fail ('}');
6147 /* The expression parser chokes on a trailing !, so we have
6148 to find it first and zap it. */
6151 while (*s
&& *s
!= ',')
6156 inst
.operands
[i
].writeback
= 1;
6158 po_imm_or_fail (0, 31, TRUE
);
6166 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6171 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6176 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6178 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6180 val
= parse_reloc (&str
);
6183 inst
.error
= _("unrecognized relocation suffix");
6186 else if (val
!= BFD_RELOC_UNUSED
)
6188 inst
.operands
[i
].imm
= val
;
6189 inst
.operands
[i
].hasreloc
= 1;
6194 /* Operand for MOVW or MOVT. */
6196 po_misc_or_fail (parse_half (&str
));
6199 /* Register or expression. */
6200 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6201 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6203 /* Register or immediate. */
6204 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6205 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6207 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6209 if (!is_immediate_prefix (*str
))
6212 val
= parse_fpa_immediate (&str
);
6215 /* FPA immediates are encoded as registers 8-15.
6216 parse_fpa_immediate has already applied the offset. */
6217 inst
.operands
[i
].reg
= val
;
6218 inst
.operands
[i
].isreg
= 1;
6221 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6222 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6224 /* Two kinds of register. */
6227 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6229 || (rege
->type
!= REG_TYPE_MMXWR
6230 && rege
->type
!= REG_TYPE_MMXWC
6231 && rege
->type
!= REG_TYPE_MMXWCG
))
6233 inst
.error
= _("iWMMXt data or control register expected");
6236 inst
.operands
[i
].reg
= rege
->number
;
6237 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6243 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6245 || (rege
->type
!= REG_TYPE_MMXWC
6246 && rege
->type
!= REG_TYPE_MMXWCG
))
6248 inst
.error
= _("iWMMXt control register expected");
6251 inst
.operands
[i
].reg
= rege
->number
;
6252 inst
.operands
[i
].isreg
= 1;
6257 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6258 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6259 case OP_oROR
: val
= parse_ror (&str
); break;
6260 case OP_PSR
: val
= parse_psr (&str
); break;
6261 case OP_COND
: val
= parse_cond (&str
); break;
6262 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
6265 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
6266 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
6269 val
= parse_psr (&str
);
6273 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
6276 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6278 if (strncasecmp (str
, "APSR_", 5) == 0)
6285 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
6286 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
6287 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
6288 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
6289 default: found
= 16;
6293 inst
.operands
[i
].isvec
= 1;
6300 po_misc_or_fail (parse_tb (&str
));
6303 /* Register lists. */
6305 val
= parse_reg_list (&str
);
6308 inst
.operands
[1].writeback
= 1;
6314 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
6318 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
6322 /* Allow Q registers too. */
6323 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6328 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6330 inst
.operands
[i
].issingle
= 1;
6335 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6340 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
6341 &inst
.operands
[i
].vectype
);
6344 /* Addressing modes */
6346 po_misc_or_fail (parse_address (&str
, i
));
6350 po_misc_or_fail_no_backtrack (
6351 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
6355 po_misc_or_fail_no_backtrack (
6356 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
6360 po_misc_or_fail_no_backtrack (
6361 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
6365 po_misc_or_fail (parse_shifter_operand (&str
, i
));
6369 po_misc_or_fail_no_backtrack (
6370 parse_shifter_operand_group_reloc (&str
, i
));
6374 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
6378 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
6382 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
6386 as_fatal (_("unhandled operand code %d"), upat
[i
]);
6389 /* Various value-based sanity checks and shared operations. We
6390 do not signal immediate failures for the register constraints;
6391 this allows a syntax error to take precedence. */
6400 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6401 inst
.error
= BAD_PC
;
6419 inst
.operands
[i
].imm
= val
;
6426 /* If we get here, this operand was successfully parsed. */
6427 inst
.operands
[i
].present
= 1;
6431 inst
.error
= BAD_ARGS
;
6436 /* The parse routine should already have set inst.error, but set a
6437 default here just in case. */
6439 inst
.error
= _("syntax error");
6443 /* Do not backtrack over a trailing optional argument that
6444 absorbed some text. We will only fail again, with the
6445 'garbage following instruction' error message, which is
6446 probably less helpful than the current one. */
6447 if (backtrack_index
== i
&& backtrack_pos
!= str
6448 && upat
[i
+1] == OP_stop
)
6451 inst
.error
= _("syntax error");
6455 /* Try again, skipping the optional argument at backtrack_pos. */
6456 str
= backtrack_pos
;
6457 inst
.error
= backtrack_error
;
6458 inst
.operands
[backtrack_index
].present
= 0;
6459 i
= backtrack_index
;
6463 /* Check that we have parsed all the arguments. */
6464 if (*str
!= '\0' && !inst
.error
)
6465 inst
.error
= _("garbage following instruction");
6467 return inst
.error
? FAIL
: SUCCESS
;
6470 #undef po_char_or_fail
6471 #undef po_reg_or_fail
6472 #undef po_reg_or_goto
6473 #undef po_imm_or_fail
6474 #undef po_scalar_or_fail
6476 /* Shorthand macro for instruction encoding functions issuing errors. */
6477 #define constraint(expr, err) \
6488 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6489 instructions are unpredictable if these registers are used. This
6490 is the BadReg predicate in ARM's Thumb-2 documentation. */
6491 #define reject_bad_reg(reg) \
6493 if (reg == REG_SP || reg == REG_PC) \
6495 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6500 /* If REG is R13 (the stack pointer), warn that its use is
6502 #define warn_deprecated_sp(reg) \
6504 if (warn_on_deprecated && reg == REG_SP) \
6505 as_warn (_("use of r13 is deprecated")); \
6508 /* Functions for operand encoding. ARM, then Thumb. */
6510 #define rotate_left(v, n) (v << n | v >> (32 - n))
6512 /* If VAL can be encoded in the immediate field of an ARM instruction,
6513 return the encoded form. Otherwise, return FAIL. */
6516 encode_arm_immediate (unsigned int val
)
6520 for (i
= 0; i
< 32; i
+= 2)
6521 if ((a
= rotate_left (val
, i
)) <= 0xff)
6522 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6527 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6528 return the encoded form. Otherwise, return FAIL. */
6530 encode_thumb32_immediate (unsigned int val
)
6537 for (i
= 1; i
<= 24; i
++)
6540 if ((val
& ~(0xff << i
)) == 0)
6541 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6545 if (val
== ((a
<< 16) | a
))
6547 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6551 if (val
== ((a
<< 16) | a
))
6552 return 0x200 | (a
>> 8);
6556 /* Encode a VFP SP or DP register number into inst.instruction. */
6559 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6561 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6564 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
6567 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6570 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6575 first_error (_("D register out of range for selected VFP version"));
6583 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6587 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6591 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6595 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6599 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6603 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6611 /* Encode a <shift> in an ARM-format instruction. The immediate,
6612 if any, is handled by md_apply_fix. */
6614 encode_arm_shift (int i
)
6616 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6617 inst
.instruction
|= SHIFT_ROR
<< 5;
6620 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6621 if (inst
.operands
[i
].immisreg
)
6623 inst
.instruction
|= SHIFT_BY_REG
;
6624 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6627 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6632 encode_arm_shifter_operand (int i
)
6634 if (inst
.operands
[i
].isreg
)
6636 inst
.instruction
|= inst
.operands
[i
].reg
;
6637 encode_arm_shift (i
);
6640 inst
.instruction
|= INST_IMMEDIATE
;
6643 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6645 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6647 gas_assert (inst
.operands
[i
].isreg
);
6648 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6650 if (inst
.operands
[i
].preind
)
6654 inst
.error
= _("instruction does not accept preindexed addressing");
6657 inst
.instruction
|= PRE_INDEX
;
6658 if (inst
.operands
[i
].writeback
)
6659 inst
.instruction
|= WRITE_BACK
;
6662 else if (inst
.operands
[i
].postind
)
6664 gas_assert (inst
.operands
[i
].writeback
);
6666 inst
.instruction
|= WRITE_BACK
;
6668 else /* unindexed - only for coprocessor */
6670 inst
.error
= _("instruction does not accept unindexed addressing");
6674 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6675 && (((inst
.instruction
& 0x000f0000) >> 16)
6676 == ((inst
.instruction
& 0x0000f000) >> 12)))
6677 as_warn ((inst
.instruction
& LOAD_BIT
)
6678 ? _("destination register same as write-back base")
6679 : _("source register same as write-back base"));
6682 /* inst.operands[i] was set up by parse_address. Encode it into an
6683 ARM-format mode 2 load or store instruction. If is_t is true,
6684 reject forms that cannot be used with a T instruction (i.e. not
6687 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6689 encode_arm_addr_mode_common (i
, is_t
);
6691 if (inst
.operands
[i
].immisreg
)
6693 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6694 inst
.instruction
|= inst
.operands
[i
].imm
;
6695 if (!inst
.operands
[i
].negative
)
6696 inst
.instruction
|= INDEX_UP
;
6697 if (inst
.operands
[i
].shifted
)
6699 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6700 inst
.instruction
|= SHIFT_ROR
<< 5;
6703 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6704 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6708 else /* immediate offset in inst.reloc */
6710 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6711 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6715 /* inst.operands[i] was set up by parse_address. Encode it into an
6716 ARM-format mode 3 load or store instruction. Reject forms that
6717 cannot be used with such instructions. If is_t is true, reject
6718 forms that cannot be used with a T instruction (i.e. not
6721 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6723 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6725 inst
.error
= _("instruction does not accept scaled register index");
6729 encode_arm_addr_mode_common (i
, is_t
);
6731 if (inst
.operands
[i
].immisreg
)
6733 inst
.instruction
|= inst
.operands
[i
].imm
;
6734 if (!inst
.operands
[i
].negative
)
6735 inst
.instruction
|= INDEX_UP
;
6737 else /* immediate offset in inst.reloc */
6739 inst
.instruction
|= HWOFFSET_IMM
;
6740 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6741 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6745 /* inst.operands[i] was set up by parse_address. Encode it into an
6746 ARM-format instruction. Reject all forms which cannot be encoded
6747 into a coprocessor load/store instruction. If wb_ok is false,
6748 reject use of writeback; if unind_ok is false, reject use of
6749 unindexed addressing. If reloc_override is not 0, use it instead
6750 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6751 (in which case it is preserved). */
6754 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6756 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6758 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6760 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6762 gas_assert (!inst
.operands
[i
].writeback
);
6765 inst
.error
= _("instruction does not support unindexed addressing");
6768 inst
.instruction
|= inst
.operands
[i
].imm
;
6769 inst
.instruction
|= INDEX_UP
;
6773 if (inst
.operands
[i
].preind
)
6774 inst
.instruction
|= PRE_INDEX
;
6776 if (inst
.operands
[i
].writeback
)
6778 if (inst
.operands
[i
].reg
== REG_PC
)
6780 inst
.error
= _("pc may not be used with write-back");
6785 inst
.error
= _("instruction does not support writeback");
6788 inst
.instruction
|= WRITE_BACK
;
6792 inst
.reloc
.type
= reloc_override
;
6793 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6794 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6795 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6798 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6800 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6806 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6807 Determine whether it can be performed with a move instruction; if
6808 it can, convert inst.instruction to that move instruction and
6809 return TRUE; if it can't, convert inst.instruction to a literal-pool
6810 load and return FALSE. If this is not a valid thing to do in the
6811 current context, set inst.error and return TRUE.
6813 inst.operands[i] describes the destination register. */
6816 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6821 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6825 if ((inst
.instruction
& tbit
) == 0)
6827 inst
.error
= _("invalid pseudo operation");
6830 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6832 inst
.error
= _("constant expression expected");
6835 if (inst
.reloc
.exp
.X_op
== O_constant
)
6839 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
6841 /* This can be done with a mov(1) instruction. */
6842 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
6843 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
6849 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6852 /* This can be done with a mov instruction. */
6853 inst
.instruction
&= LITERAL_MASK
;
6854 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6855 inst
.instruction
|= value
& 0xfff;
6859 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6862 /* This can be done with a mvn instruction. */
6863 inst
.instruction
&= LITERAL_MASK
;
6864 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6865 inst
.instruction
|= value
& 0xfff;
6871 if (add_to_lit_pool () == FAIL
)
6873 inst
.error
= _("literal pool insertion failed");
6876 inst
.operands
[1].reg
= REG_PC
;
6877 inst
.operands
[1].isreg
= 1;
6878 inst
.operands
[1].preind
= 1;
6879 inst
.reloc
.pc_rel
= 1;
6880 inst
.reloc
.type
= (thumb_p
6881 ? BFD_RELOC_ARM_THUMB_OFFSET
6883 ? BFD_RELOC_ARM_HWLITERAL
6884 : BFD_RELOC_ARM_LITERAL
));
6888 /* Functions for instruction encoding, sorted by sub-architecture.
6889 First some generics; their names are taken from the conventional
6890 bit positions for register arguments in ARM format instructions. */
6900 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6906 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6907 inst
.instruction
|= inst
.operands
[1].reg
;
6913 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6914 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6920 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6921 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6927 unsigned Rn
= inst
.operands
[2].reg
;
6928 /* Enforce restrictions on SWP instruction. */
6929 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6930 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6931 _("Rn must not overlap other operands"));
6932 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6933 inst
.instruction
|= inst
.operands
[1].reg
;
6934 inst
.instruction
|= Rn
<< 16;
6940 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6941 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6942 inst
.instruction
|= inst
.operands
[2].reg
;
6948 inst
.instruction
|= inst
.operands
[0].reg
;
6949 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6950 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
6956 inst
.instruction
|= inst
.operands
[0].imm
;
6962 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6963 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
6966 /* ARM instructions, in alphabetical order by function name (except
6967 that wrapper functions appear immediately after the function they
6970 /* This is a pseudo-op of the form "adr rd, label" to be converted
6971 into a relative address of the form "add rd, pc, #label-.-8". */
6976 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6978 /* Frag hacking will turn this into a sub instruction if the offset turns
6979 out to be negative. */
6980 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
6981 inst
.reloc
.pc_rel
= 1;
6982 inst
.reloc
.exp
.X_add_number
-= 8;
6985 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6986 into a relative address of the form:
6987 add rd, pc, #low(label-.-8)"
6988 add rd, rd, #high(label-.-8)" */
6993 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
6995 /* Frag hacking will turn this into a sub instruction if the offset turns
6996 out to be negative. */
6997 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
6998 inst
.reloc
.pc_rel
= 1;
6999 inst
.size
= INSN_SIZE
* 2;
7000 inst
.reloc
.exp
.X_add_number
-= 8;
7006 if (!inst
.operands
[1].present
)
7007 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
7008 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7009 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7010 encode_arm_shifter_operand (2);
7016 if (inst
.operands
[0].present
)
7018 constraint ((inst
.instruction
& 0xf0) != 0x40
7019 && inst
.operands
[0].imm
!= 0xf,
7020 _("bad barrier type"));
7021 inst
.instruction
|= inst
.operands
[0].imm
;
7024 inst
.instruction
|= 0xf;
7030 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7031 constraint (msb
> 32, _("bit-field extends past end of register"));
7032 /* The instruction encoding stores the LSB and MSB,
7033 not the LSB and width. */
7034 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7035 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
7036 inst
.instruction
|= (msb
- 1) << 16;
7044 /* #0 in second position is alternative syntax for bfc, which is
7045 the same instruction but with REG_PC in the Rm field. */
7046 if (!inst
.operands
[1].isreg
)
7047 inst
.operands
[1].reg
= REG_PC
;
7049 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7050 constraint (msb
> 32, _("bit-field extends past end of register"));
7051 /* The instruction encoding stores the LSB and MSB,
7052 not the LSB and width. */
7053 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7054 inst
.instruction
|= inst
.operands
[1].reg
;
7055 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7056 inst
.instruction
|= (msb
- 1) << 16;
7062 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7063 _("bit-field extends past end of register"));
7064 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7065 inst
.instruction
|= inst
.operands
[1].reg
;
7066 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7067 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
7070 /* ARM V5 breakpoint instruction (argument parse)
7071 BKPT <16 bit unsigned immediate>
7072 Instruction is not conditional.
7073 The bit pattern given in insns[] has the COND_ALWAYS condition,
7074 and it is an error if the caller tried to override that. */
7079 /* Top 12 of 16 bits to bits 19:8. */
7080 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
7082 /* Bottom 4 of 16 bits to bits 3:0. */
7083 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
7087 encode_branch (int default_reloc
)
7089 if (inst
.operands
[0].hasreloc
)
7091 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
7092 _("the only suffix valid here is '(plt)'"));
7093 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
7097 inst
.reloc
.type
= default_reloc
;
7099 inst
.reloc
.pc_rel
= 1;
7106 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7107 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7110 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7117 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7119 if (inst
.cond
== COND_ALWAYS
)
7120 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
7122 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7126 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7129 /* ARM V5 branch-link-exchange instruction (argument parse)
7130 BLX <target_addr> ie BLX(1)
7131 BLX{<condition>} <Rm> ie BLX(2)
7132 Unfortunately, there are two different opcodes for this mnemonic.
7133 So, the insns[].value is not used, and the code here zaps values
7134 into inst.instruction.
7135 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7140 if (inst
.operands
[0].isreg
)
7142 /* Arg is a register; the opcode provided by insns[] is correct.
7143 It is not illegal to do "blx pc", just useless. */
7144 if (inst
.operands
[0].reg
== REG_PC
)
7145 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7147 inst
.instruction
|= inst
.operands
[0].reg
;
7151 /* Arg is an address; this instruction cannot be executed
7152 conditionally, and the opcode must be adjusted.
7153 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7154 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7155 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7156 inst
.instruction
= 0xfa000000;
7157 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
7164 bfd_boolean want_reloc
;
7166 if (inst
.operands
[0].reg
== REG_PC
)
7167 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7169 inst
.instruction
|= inst
.operands
[0].reg
;
7170 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7171 it is for ARMv4t or earlier. */
7172 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
7173 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
7177 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
7182 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
7186 /* ARM v5TEJ. Jump to Jazelle code. */
7191 if (inst
.operands
[0].reg
== REG_PC
)
7192 as_tsktsk (_("use of r15 in bxj is not really useful"));
7194 inst
.instruction
|= inst
.operands
[0].reg
;
7197 /* Co-processor data operation:
7198 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7199 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7203 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7204 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
7205 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7206 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7207 inst
.instruction
|= inst
.operands
[4].reg
;
7208 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7214 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7215 encode_arm_shifter_operand (1);
7218 /* Transfer between coprocessor and ARM registers.
7219 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7224 No special properties. */
7231 Rd
= inst
.operands
[2].reg
;
7234 if (inst
.instruction
== 0xee000010
7235 || inst
.instruction
== 0xfe000010)
7237 reject_bad_reg (Rd
);
7240 constraint (Rd
== REG_SP
, BAD_SP
);
7245 if (inst
.instruction
== 0xe000010)
7246 constraint (Rd
== REG_PC
, BAD_PC
);
7250 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7251 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
7252 inst
.instruction
|= Rd
<< 12;
7253 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7254 inst
.instruction
|= inst
.operands
[4].reg
;
7255 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7258 /* Transfer between coprocessor register and pair of ARM registers.
7259 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7264 Two XScale instructions are special cases of these:
7266 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7267 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7269 Result unpredictable if Rd or Rn is R15. */
7276 Rd
= inst
.operands
[2].reg
;
7277 Rn
= inst
.operands
[3].reg
;
7281 reject_bad_reg (Rd
);
7282 reject_bad_reg (Rn
);
7286 constraint (Rd
== REG_PC
, BAD_PC
);
7287 constraint (Rn
== REG_PC
, BAD_PC
);
7290 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7291 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
7292 inst
.instruction
|= Rd
<< 12;
7293 inst
.instruction
|= Rn
<< 16;
7294 inst
.instruction
|= inst
.operands
[4].reg
;
7300 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
7301 if (inst
.operands
[1].present
)
7303 inst
.instruction
|= CPSI_MMOD
;
7304 inst
.instruction
|= inst
.operands
[1].imm
;
7311 inst
.instruction
|= inst
.operands
[0].imm
;
7317 /* There is no IT instruction in ARM mode. We
7318 process it to do the validation as if in
7319 thumb mode, just in case the code gets
7320 assembled for thumb using the unified syntax. */
7325 set_it_insn_type (IT_INSN
);
7326 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
7327 now_it
.cc
= inst
.operands
[0].imm
;
7334 int base_reg
= inst
.operands
[0].reg
;
7335 int range
= inst
.operands
[1].imm
;
7337 inst
.instruction
|= base_reg
<< 16;
7338 inst
.instruction
|= range
;
7340 if (inst
.operands
[1].writeback
)
7341 inst
.instruction
|= LDM_TYPE_2_OR_3
;
7343 if (inst
.operands
[0].writeback
)
7345 inst
.instruction
|= WRITE_BACK
;
7346 /* Check for unpredictable uses of writeback. */
7347 if (inst
.instruction
& LOAD_BIT
)
7349 /* Not allowed in LDM type 2. */
7350 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
7351 && ((range
& (1 << REG_PC
)) == 0))
7352 as_warn (_("writeback of base register is UNPREDICTABLE"));
7353 /* Only allowed if base reg not in list for other types. */
7354 else if (range
& (1 << base_reg
))
7355 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7359 /* Not allowed for type 2. */
7360 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
7361 as_warn (_("writeback of base register is UNPREDICTABLE"));
7362 /* Only allowed if base reg not in list, or first in list. */
7363 else if ((range
& (1 << base_reg
))
7364 && (range
& ((1 << base_reg
) - 1)))
7365 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7370 /* ARMv5TE load-consecutive (argument parse)
7379 constraint (inst
.operands
[0].reg
% 2 != 0,
7380 _("first destination register must be even"));
7381 constraint (inst
.operands
[1].present
7382 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7383 _("can only load two consecutive registers"));
7384 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7385 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
7387 if (!inst
.operands
[1].present
)
7388 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
7390 if (inst
.instruction
& LOAD_BIT
)
7392 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7393 register and the first register written; we have to diagnose
7394 overlap between the base and the second register written here. */
7396 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
7397 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
7398 as_warn (_("base register written back, and overlaps "
7399 "second destination register"));
7401 /* For an index-register load, the index register must not overlap the
7402 destination (even if not write-back). */
7403 else if (inst
.operands
[2].immisreg
7404 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
7405 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
7406 as_warn (_("index register overlaps destination register"));
7409 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7410 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
7416 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
7417 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
7418 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
7419 || inst
.operands
[1].negative
7420 /* This can arise if the programmer has written
7422 or if they have mistakenly used a register name as the last
7425 It is very difficult to distinguish between these two cases
7426 because "rX" might actually be a label. ie the register
7427 name has been occluded by a symbol of the same name. So we
7428 just generate a general 'bad addressing mode' type error
7429 message and leave it up to the programmer to discover the
7430 true cause and fix their mistake. */
7431 || (inst
.operands
[1].reg
== REG_PC
),
7434 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7435 || inst
.reloc
.exp
.X_add_number
!= 0,
7436 _("offset must be zero in ARM encoding"));
7438 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7439 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7440 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7446 constraint (inst
.operands
[0].reg
% 2 != 0,
7447 _("even register required"));
7448 constraint (inst
.operands
[1].present
7449 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7450 _("can only load two consecutive registers"));
7451 /* If op 1 were present and equal to PC, this function wouldn't
7452 have been called in the first place. */
7453 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7455 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7456 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7462 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7463 if (!inst
.operands
[1].isreg
)
7464 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
7466 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
7472 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7474 if (inst
.operands
[1].preind
)
7476 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7477 || inst
.reloc
.exp
.X_add_number
!= 0,
7478 _("this instruction requires a post-indexed address"));
7480 inst
.operands
[1].preind
= 0;
7481 inst
.operands
[1].postind
= 1;
7482 inst
.operands
[1].writeback
= 1;
7484 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7485 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7488 /* Halfword and signed-byte load/store operations. */
7493 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7494 if (!inst
.operands
[1].isreg
)
7495 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7497 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7503 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7505 if (inst
.operands
[1].preind
)
7507 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7508 || inst
.reloc
.exp
.X_add_number
!= 0,
7509 _("this instruction requires a post-indexed address"));
7511 inst
.operands
[1].preind
= 0;
7512 inst
.operands
[1].postind
= 1;
7513 inst
.operands
[1].writeback
= 1;
7515 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7516 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7519 /* Co-processor register load/store.
7520 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7524 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7525 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7526 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7532 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7533 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7534 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
7535 && !(inst
.instruction
& 0x00400000))
7536 as_tsktsk (_("Rd and Rm should be different in mla"));
7538 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7539 inst
.instruction
|= inst
.operands
[1].reg
;
7540 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7541 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7547 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7548 encode_arm_shifter_operand (1);
7551 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7558 top
= (inst
.instruction
& 0x00400000) != 0;
7559 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7560 _(":lower16: not allowed this instruction"));
7561 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7562 _(":upper16: not allowed instruction"));
7563 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7564 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7566 imm
= inst
.reloc
.exp
.X_add_number
;
7567 /* The value is in two pieces: 0:11, 16:19. */
7568 inst
.instruction
|= (imm
& 0x00000fff);
7569 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7573 static void do_vfp_nsyn_opcode (const char *);
7576 do_vfp_nsyn_mrs (void)
7578 if (inst
.operands
[0].isvec
)
7580 if (inst
.operands
[1].reg
!= 1)
7581 first_error (_("operand 1 must be FPSCR"));
7582 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7583 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7584 do_vfp_nsyn_opcode ("fmstat");
7586 else if (inst
.operands
[1].isvec
)
7587 do_vfp_nsyn_opcode ("fmrx");
7595 do_vfp_nsyn_msr (void)
7597 if (inst
.operands
[0].isvec
)
7598 do_vfp_nsyn_opcode ("fmxr");
7608 if (do_vfp_nsyn_mrs () == SUCCESS
)
7611 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7612 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7614 _("'CPSR' or 'SPSR' expected"));
7615 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7616 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7619 /* Two possible forms:
7620 "{C|S}PSR_<field>, Rm",
7621 "{C|S}PSR_f, #expression". */
7626 if (do_vfp_nsyn_msr () == SUCCESS
)
7629 inst
.instruction
|= inst
.operands
[0].imm
;
7630 if (inst
.operands
[1].isreg
)
7631 inst
.instruction
|= inst
.operands
[1].reg
;
7634 inst
.instruction
|= INST_IMMEDIATE
;
7635 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7636 inst
.reloc
.pc_rel
= 0;
7643 if (!inst
.operands
[2].present
)
7644 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7645 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7646 inst
.instruction
|= inst
.operands
[1].reg
;
7647 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7649 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7650 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7651 as_tsktsk (_("Rd and Rm should be different in mul"));
7654 /* Long Multiply Parser
7655 UMULL RdLo, RdHi, Rm, Rs
7656 SMULL RdLo, RdHi, Rm, Rs
7657 UMLAL RdLo, RdHi, Rm, Rs
7658 SMLAL RdLo, RdHi, Rm, Rs. */
7663 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7664 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7665 inst
.instruction
|= inst
.operands
[2].reg
;
7666 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7668 /* rdhi and rdlo must be different. */
7669 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7670 as_tsktsk (_("rdhi and rdlo must be different"));
7672 /* rdhi, rdlo and rm must all be different before armv6. */
7673 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
7674 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7675 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7676 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7682 if (inst
.operands
[0].present
7683 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
7685 /* Architectural NOP hints are CPSR sets with no bits selected. */
7686 inst
.instruction
&= 0xf0000000;
7687 inst
.instruction
|= 0x0320f000;
7688 if (inst
.operands
[0].present
)
7689 inst
.instruction
|= inst
.operands
[0].imm
;
7693 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7694 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7695 Condition defaults to COND_ALWAYS.
7696 Error if Rd, Rn or Rm are R15. */
7701 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7702 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7703 inst
.instruction
|= inst
.operands
[2].reg
;
7704 if (inst
.operands
[3].present
)
7705 encode_arm_shift (3);
7708 /* ARM V6 PKHTB (Argument Parse). */
7713 if (!inst
.operands
[3].present
)
7715 /* If the shift specifier is omitted, turn the instruction
7716 into pkhbt rd, rm, rn. */
7717 inst
.instruction
&= 0xfff00010;
7718 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7719 inst
.instruction
|= inst
.operands
[1].reg
;
7720 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7724 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7725 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7726 inst
.instruction
|= inst
.operands
[2].reg
;
7727 encode_arm_shift (3);
7731 /* ARMv5TE: Preload-Cache
7735 Syntactically, like LDR with B=1, W=0, L=1. */
7740 constraint (!inst
.operands
[0].isreg
,
7741 _("'[' expected after PLD mnemonic"));
7742 constraint (inst
.operands
[0].postind
,
7743 _("post-indexed expression used in preload instruction"));
7744 constraint (inst
.operands
[0].writeback
,
7745 _("writeback used in preload instruction"));
7746 constraint (!inst
.operands
[0].preind
,
7747 _("unindexed addressing used in preload instruction"));
7748 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7751 /* ARMv7: PLI <addr_mode> */
7755 constraint (!inst
.operands
[0].isreg
,
7756 _("'[' expected after PLI mnemonic"));
7757 constraint (inst
.operands
[0].postind
,
7758 _("post-indexed expression used in preload instruction"));
7759 constraint (inst
.operands
[0].writeback
,
7760 _("writeback used in preload instruction"));
7761 constraint (!inst
.operands
[0].preind
,
7762 _("unindexed addressing used in preload instruction"));
7763 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7764 inst
.instruction
&= ~PRE_INDEX
;
7770 inst
.operands
[1] = inst
.operands
[0];
7771 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7772 inst
.operands
[0].isreg
= 1;
7773 inst
.operands
[0].writeback
= 1;
7774 inst
.operands
[0].reg
= REG_SP
;
7778 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7779 word at the specified address and the following word
7781 Unconditionally executed.
7782 Error if Rn is R15. */
7787 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7788 if (inst
.operands
[0].writeback
)
7789 inst
.instruction
|= WRITE_BACK
;
7792 /* ARM V6 ssat (argument parse). */
7797 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7798 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
7799 inst
.instruction
|= inst
.operands
[2].reg
;
7801 if (inst
.operands
[3].present
)
7802 encode_arm_shift (3);
7805 /* ARM V6 usat (argument parse). */
7810 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7811 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7812 inst
.instruction
|= inst
.operands
[2].reg
;
7814 if (inst
.operands
[3].present
)
7815 encode_arm_shift (3);
7818 /* ARM V6 ssat16 (argument parse). */
7823 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7824 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
7825 inst
.instruction
|= inst
.operands
[2].reg
;
7831 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7832 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7833 inst
.instruction
|= inst
.operands
[2].reg
;
7836 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7837 preserving the other bits.
7839 setend <endian_specifier>, where <endian_specifier> is either
7845 if (inst
.operands
[0].imm
)
7846 inst
.instruction
|= 0x200;
7852 unsigned int Rm
= (inst
.operands
[1].present
7853 ? inst
.operands
[1].reg
7854 : inst
.operands
[0].reg
);
7856 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7857 inst
.instruction
|= Rm
;
7858 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
7860 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7861 inst
.instruction
|= SHIFT_BY_REG
;
7864 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7870 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
7871 inst
.reloc
.pc_rel
= 0;
7877 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
7878 inst
.reloc
.pc_rel
= 0;
7881 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7882 SMLAxy{cond} Rd,Rm,Rs,Rn
7883 SMLAWy{cond} Rd,Rm,Rs,Rn
7884 Error if any register is R15. */
7889 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7890 inst
.instruction
|= inst
.operands
[1].reg
;
7891 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7892 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7895 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7896 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7897 Error if any register is R15.
7898 Warning if Rdlo == Rdhi. */
7903 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7904 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7905 inst
.instruction
|= inst
.operands
[2].reg
;
7906 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7908 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7909 as_tsktsk (_("rdhi and rdlo must be different"));
7912 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7913 SMULxy{cond} Rd,Rm,Rs
7914 Error if any register is R15. */
7919 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7920 inst
.instruction
|= inst
.operands
[1].reg
;
7921 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7924 /* ARM V6 srs (argument parse). The variable fields in the encoding are
7925 the same for both ARM and Thumb-2. */
7932 if (inst
.operands
[0].present
)
7934 reg
= inst
.operands
[0].reg
;
7935 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
7940 inst
.instruction
|= reg
<< 16;
7941 inst
.instruction
|= inst
.operands
[1].imm
;
7942 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
7943 inst
.instruction
|= WRITE_BACK
;
7946 /* ARM V6 strex (argument parse). */
7951 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
7952 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
7953 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
7954 || inst
.operands
[2].negative
7955 /* See comment in do_ldrex(). */
7956 || (inst
.operands
[2].reg
== REG_PC
),
7959 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7960 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
7962 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7963 || inst
.reloc
.exp
.X_add_number
!= 0,
7964 _("offset must be zero in ARM encoding"));
7966 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7967 inst
.instruction
|= inst
.operands
[1].reg
;
7968 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7969 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7975 constraint (inst
.operands
[1].reg
% 2 != 0,
7976 _("even register required"));
7977 constraint (inst
.operands
[2].present
7978 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
7979 _("can only store two consecutive registers"));
7980 /* If op 2 were present and equal to PC, this function wouldn't
7981 have been called in the first place. */
7982 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
7984 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
7985 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
7986 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
7989 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7990 inst
.instruction
|= inst
.operands
[1].reg
;
7991 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7994 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7995 extends it to 32-bits, and adds the result to a value in another
7996 register. You can specify a rotation by 0, 8, 16, or 24 bits
7997 before extracting the 16-bit value.
7998 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7999 Condition defaults to COND_ALWAYS.
8000 Error if any register uses R15. */
8005 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8006 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8007 inst
.instruction
|= inst
.operands
[2].reg
;
8008 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
8013 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8014 Condition defaults to COND_ALWAYS.
8015 Error if any register uses R15. */
8020 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8021 inst
.instruction
|= inst
.operands
[1].reg
;
8022 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
8025 /* VFP instructions. In a logical order: SP variant first, monad
8026 before dyad, arithmetic then move then load/store. */
8029 do_vfp_sp_monadic (void)
8031 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8032 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8036 do_vfp_sp_dyadic (void)
8038 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8039 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8040 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8044 do_vfp_sp_compare_z (void)
8046 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8050 do_vfp_dp_sp_cvt (void)
8052 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8053 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8057 do_vfp_sp_dp_cvt (void)
8059 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8060 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8064 do_vfp_reg_from_sp (void)
8066 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8067 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8071 do_vfp_reg2_from_sp2 (void)
8073 constraint (inst
.operands
[2].imm
!= 2,
8074 _("only two consecutive VFP SP registers allowed here"));
8075 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8076 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8077 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8081 do_vfp_sp_from_reg (void)
8083 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
8084 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8088 do_vfp_sp2_from_reg2 (void)
8090 constraint (inst
.operands
[0].imm
!= 2,
8091 _("only two consecutive VFP SP registers allowed here"));
8092 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
8093 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8094 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8098 do_vfp_sp_ldst (void)
8100 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8101 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8105 do_vfp_dp_ldst (void)
8107 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8108 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8113 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
8115 if (inst
.operands
[0].writeback
)
8116 inst
.instruction
|= WRITE_BACK
;
8118 constraint (ldstm_type
!= VFP_LDSTMIA
,
8119 _("this addressing mode requires base-register writeback"));
8120 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8121 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
8122 inst
.instruction
|= inst
.operands
[1].imm
;
8126 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
8130 if (inst
.operands
[0].writeback
)
8131 inst
.instruction
|= WRITE_BACK
;
8133 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
8134 _("this addressing mode requires base-register writeback"));
8136 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8137 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8139 count
= inst
.operands
[1].imm
<< 1;
8140 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
8143 inst
.instruction
|= count
;
8147 do_vfp_sp_ldstmia (void)
8149 vfp_sp_ldstm (VFP_LDSTMIA
);
8153 do_vfp_sp_ldstmdb (void)
8155 vfp_sp_ldstm (VFP_LDSTMDB
);
8159 do_vfp_dp_ldstmia (void)
8161 vfp_dp_ldstm (VFP_LDSTMIA
);
8165 do_vfp_dp_ldstmdb (void)
8167 vfp_dp_ldstm (VFP_LDSTMDB
);
8171 do_vfp_xp_ldstmia (void)
8173 vfp_dp_ldstm (VFP_LDSTMIAX
);
8177 do_vfp_xp_ldstmdb (void)
8179 vfp_dp_ldstm (VFP_LDSTMDBX
);
8183 do_vfp_dp_rd_rm (void)
8185 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8186 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8190 do_vfp_dp_rn_rd (void)
8192 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
8193 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8197 do_vfp_dp_rd_rn (void)
8199 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8200 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8204 do_vfp_dp_rd_rn_rm (void)
8206 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8207 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8208 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
8214 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8218 do_vfp_dp_rm_rd_rn (void)
8220 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
8221 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8222 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
8225 /* VFPv3 instructions. */
8227 do_vfp_sp_const (void)
8229 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8230 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8231 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8235 do_vfp_dp_const (void)
8237 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8238 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8239 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8243 vfp_conv (int srcsize
)
8245 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
8246 inst
.instruction
|= (immbits
& 1) << 5;
8247 inst
.instruction
|= (immbits
>> 1);
8251 do_vfp_sp_conv_16 (void)
8253 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8258 do_vfp_dp_conv_16 (void)
8260 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8265 do_vfp_sp_conv_32 (void)
8267 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8272 do_vfp_dp_conv_32 (void)
8274 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8278 /* FPA instructions. Also in a logical order. */
8283 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8284 inst
.instruction
|= inst
.operands
[1].reg
;
8288 do_fpa_ldmstm (void)
8290 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8291 switch (inst
.operands
[1].imm
)
8293 case 1: inst
.instruction
|= CP_T_X
; break;
8294 case 2: inst
.instruction
|= CP_T_Y
; break;
8295 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
8300 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
8302 /* The instruction specified "ea" or "fd", so we can only accept
8303 [Rn]{!}. The instruction does not really support stacking or
8304 unstacking, so we have to emulate these by setting appropriate
8305 bits and offsets. */
8306 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8307 || inst
.reloc
.exp
.X_add_number
!= 0,
8308 _("this instruction does not support indexing"));
8310 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
8311 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
8313 if (!(inst
.instruction
& INDEX_UP
))
8314 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
8316 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
8318 inst
.operands
[2].preind
= 0;
8319 inst
.operands
[2].postind
= 1;
8323 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8326 /* iWMMXt instructions: strictly in alphabetical order. */
8329 do_iwmmxt_tandorc (void)
8331 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
8335 do_iwmmxt_textrc (void)
8337 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8338 inst
.instruction
|= inst
.operands
[1].imm
;
8342 do_iwmmxt_textrm (void)
8344 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8345 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8346 inst
.instruction
|= inst
.operands
[2].imm
;
8350 do_iwmmxt_tinsr (void)
8352 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8353 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8354 inst
.instruction
|= inst
.operands
[2].imm
;
8358 do_iwmmxt_tmia (void)
8360 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8361 inst
.instruction
|= inst
.operands
[1].reg
;
8362 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8366 do_iwmmxt_waligni (void)
8368 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8369 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8370 inst
.instruction
|= inst
.operands
[2].reg
;
8371 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
8375 do_iwmmxt_wmerge (void)
8377 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8378 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8379 inst
.instruction
|= inst
.operands
[2].reg
;
8380 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
8384 do_iwmmxt_wmov (void)
8386 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8387 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8388 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8389 inst
.instruction
|= inst
.operands
[1].reg
;
8393 do_iwmmxt_wldstbh (void)
8396 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8398 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
8400 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
8401 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
8405 do_iwmmxt_wldstw (void)
8407 /* RIWR_RIWC clears .isreg for a control register. */
8408 if (!inst
.operands
[0].isreg
)
8410 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8411 inst
.instruction
|= 0xf0000000;
8414 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8415 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8419 do_iwmmxt_wldstd (void)
8421 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8422 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
8423 && inst
.operands
[1].immisreg
)
8425 inst
.instruction
&= ~0x1a000ff;
8426 inst
.instruction
|= (0xf << 28);
8427 if (inst
.operands
[1].preind
)
8428 inst
.instruction
|= PRE_INDEX
;
8429 if (!inst
.operands
[1].negative
)
8430 inst
.instruction
|= INDEX_UP
;
8431 if (inst
.operands
[1].writeback
)
8432 inst
.instruction
|= WRITE_BACK
;
8433 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8434 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8435 inst
.instruction
|= inst
.operands
[1].imm
;
8438 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
8442 do_iwmmxt_wshufh (void)
8444 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8445 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8446 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
8447 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
8451 do_iwmmxt_wzero (void)
8453 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8454 inst
.instruction
|= inst
.operands
[0].reg
;
8455 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8456 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8460 do_iwmmxt_wrwrwr_or_imm5 (void)
8462 if (inst
.operands
[2].isreg
)
8465 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
8466 _("immediate operand requires iWMMXt2"));
8468 if (inst
.operands
[2].imm
== 0)
8470 switch ((inst
.instruction
>> 20) & 0xf)
8476 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8477 inst
.operands
[2].imm
= 16;
8478 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
8484 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8485 inst
.operands
[2].imm
= 32;
8486 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
8493 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8495 wrn
= (inst
.instruction
>> 16) & 0xf;
8496 inst
.instruction
&= 0xff0fff0f;
8497 inst
.instruction
|= wrn
;
8498 /* Bail out here; the instruction is now assembled. */
8503 /* Map 32 -> 0, etc. */
8504 inst
.operands
[2].imm
&= 0x1f;
8505 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8509 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8510 operations first, then control, shift, and load/store. */
8512 /* Insns like "foo X,Y,Z". */
8515 do_mav_triple (void)
8517 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8518 inst
.instruction
|= inst
.operands
[1].reg
;
8519 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8522 /* Insns like "foo W,X,Y,Z".
8523 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8528 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8529 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8530 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8531 inst
.instruction
|= inst
.operands
[3].reg
;
8534 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8538 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8541 /* Maverick shift immediate instructions.
8542 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8543 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8548 int imm
= inst
.operands
[2].imm
;
8550 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8551 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8553 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8554 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8555 Bit 4 should be 0. */
8556 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8558 inst
.instruction
|= imm
;
8561 /* XScale instructions. Also sorted arithmetic before move. */
8563 /* Xscale multiply-accumulate (argument parse)
8566 MIAxycc acc0,Rm,Rs. */
8571 inst
.instruction
|= inst
.operands
[1].reg
;
8572 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8575 /* Xscale move-accumulator-register (argument parse)
8577 MARcc acc0,RdLo,RdHi. */
8582 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8583 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8586 /* Xscale move-register-accumulator (argument parse)
8588 MRAcc RdLo,RdHi,acc0. */
8593 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8594 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8595 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8598 /* Encoding functions relevant only to Thumb. */
8600 /* inst.operands[i] is a shifted-register operand; encode
8601 it into inst.instruction in the format used by Thumb32. */
8604 encode_thumb32_shifted_operand (int i
)
8606 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8607 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8609 constraint (inst
.operands
[i
].immisreg
,
8610 _("shift by register not allowed in thumb mode"));
8611 inst
.instruction
|= inst
.operands
[i
].reg
;
8612 if (shift
== SHIFT_RRX
)
8613 inst
.instruction
|= SHIFT_ROR
<< 4;
8616 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8617 _("expression too complex"));
8619 constraint (value
> 32
8620 || (value
== 32 && (shift
== SHIFT_LSL
8621 || shift
== SHIFT_ROR
)),
8622 _("shift expression is too large"));
8626 else if (value
== 32)
8629 inst
.instruction
|= shift
<< 4;
8630 inst
.instruction
|= (value
& 0x1c) << 10;
8631 inst
.instruction
|= (value
& 0x03) << 6;
8636 /* inst.operands[i] was set up by parse_address. Encode it into a
8637 Thumb32 format load or store instruction. Reject forms that cannot
8638 be used with such instructions. If is_t is true, reject forms that
8639 cannot be used with a T instruction; if is_d is true, reject forms
8640 that cannot be used with a D instruction. */
8643 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8645 bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8647 constraint (!inst
.operands
[i
].isreg
,
8648 _("Instruction does not support =N addresses"));
8650 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8651 if (inst
.operands
[i
].immisreg
)
8653 constraint (is_pc
, _("cannot use register index with PC-relative addressing"));
8654 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8655 constraint (inst
.operands
[i
].negative
,
8656 _("Thumb does not support negative register indexing"));
8657 constraint (inst
.operands
[i
].postind
,
8658 _("Thumb does not support register post-indexing"));
8659 constraint (inst
.operands
[i
].writeback
,
8660 _("Thumb does not support register indexing with writeback"));
8661 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8662 _("Thumb supports only LSL in shifted register indexing"));
8664 inst
.instruction
|= inst
.operands
[i
].imm
;
8665 if (inst
.operands
[i
].shifted
)
8667 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8668 _("expression too complex"));
8669 constraint (inst
.reloc
.exp
.X_add_number
< 0
8670 || inst
.reloc
.exp
.X_add_number
> 3,
8671 _("shift out of range"));
8672 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8674 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8676 else if (inst
.operands
[i
].preind
)
8678 constraint (is_pc
&& inst
.operands
[i
].writeback
,
8679 _("cannot use writeback with PC-relative addressing"));
8680 constraint (is_t
&& inst
.operands
[i
].writeback
,
8681 _("cannot use writeback with this instruction"));
8685 inst
.instruction
|= 0x01000000;
8686 if (inst
.operands
[i
].writeback
)
8687 inst
.instruction
|= 0x00200000;
8691 inst
.instruction
|= 0x00000c00;
8692 if (inst
.operands
[i
].writeback
)
8693 inst
.instruction
|= 0x00000100;
8695 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8697 else if (inst
.operands
[i
].postind
)
8699 gas_assert (inst
.operands
[i
].writeback
);
8700 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8701 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8704 inst
.instruction
|= 0x00200000;
8706 inst
.instruction
|= 0x00000900;
8707 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8709 else /* unindexed - only for coprocessor */
8710 inst
.error
= _("instruction does not accept unindexed addressing");
8713 /* Table of Thumb instructions which exist in both 16- and 32-bit
8714 encodings (the latter only in post-V6T2 cores). The index is the
8715 value used in the insns table below. When there is more than one
8716 possible 16-bit encoding for the instruction, this table always
8718 Also contains several pseudo-instructions used during relaxation. */
8719 #define T16_32_TAB \
8720 X(adc, 4140, eb400000), \
8721 X(adcs, 4140, eb500000), \
8722 X(add, 1c00, eb000000), \
8723 X(adds, 1c00, eb100000), \
8724 X(addi, 0000, f1000000), \
8725 X(addis, 0000, f1100000), \
8726 X(add_pc,000f, f20f0000), \
8727 X(add_sp,000d, f10d0000), \
8728 X(adr, 000f, f20f0000), \
8729 X(and, 4000, ea000000), \
8730 X(ands, 4000, ea100000), \
8731 X(asr, 1000, fa40f000), \
8732 X(asrs, 1000, fa50f000), \
8733 X(b, e000, f000b000), \
8734 X(bcond, d000, f0008000), \
8735 X(bic, 4380, ea200000), \
8736 X(bics, 4380, ea300000), \
8737 X(cmn, 42c0, eb100f00), \
8738 X(cmp, 2800, ebb00f00), \
8739 X(cpsie, b660, f3af8400), \
8740 X(cpsid, b670, f3af8600), \
8741 X(cpy, 4600, ea4f0000), \
8742 X(dec_sp,80dd, f1ad0d00), \
8743 X(eor, 4040, ea800000), \
8744 X(eors, 4040, ea900000), \
8745 X(inc_sp,00dd, f10d0d00), \
8746 X(ldmia, c800, e8900000), \
8747 X(ldr, 6800, f8500000), \
8748 X(ldrb, 7800, f8100000), \
8749 X(ldrh, 8800, f8300000), \
8750 X(ldrsb, 5600, f9100000), \
8751 X(ldrsh, 5e00, f9300000), \
8752 X(ldr_pc,4800, f85f0000), \
8753 X(ldr_pc2,4800, f85f0000), \
8754 X(ldr_sp,9800, f85d0000), \
8755 X(lsl, 0000, fa00f000), \
8756 X(lsls, 0000, fa10f000), \
8757 X(lsr, 0800, fa20f000), \
8758 X(lsrs, 0800, fa30f000), \
8759 X(mov, 2000, ea4f0000), \
8760 X(movs, 2000, ea5f0000), \
8761 X(mul, 4340, fb00f000), \
8762 X(muls, 4340, ffffffff), /* no 32b muls */ \
8763 X(mvn, 43c0, ea6f0000), \
8764 X(mvns, 43c0, ea7f0000), \
8765 X(neg, 4240, f1c00000), /* rsb #0 */ \
8766 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8767 X(orr, 4300, ea400000), \
8768 X(orrs, 4300, ea500000), \
8769 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8770 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8771 X(rev, ba00, fa90f080), \
8772 X(rev16, ba40, fa90f090), \
8773 X(revsh, bac0, fa90f0b0), \
8774 X(ror, 41c0, fa60f000), \
8775 X(rors, 41c0, fa70f000), \
8776 X(sbc, 4180, eb600000), \
8777 X(sbcs, 4180, eb700000), \
8778 X(stmia, c000, e8800000), \
8779 X(str, 6000, f8400000), \
8780 X(strb, 7000, f8000000), \
8781 X(strh, 8000, f8200000), \
8782 X(str_sp,9000, f84d0000), \
8783 X(sub, 1e00, eba00000), \
8784 X(subs, 1e00, ebb00000), \
8785 X(subi, 8000, f1a00000), \
8786 X(subis, 8000, f1b00000), \
8787 X(sxtb, b240, fa4ff080), \
8788 X(sxth, b200, fa0ff080), \
8789 X(tst, 4200, ea100f00), \
8790 X(uxtb, b2c0, fa5ff080), \
8791 X(uxth, b280, fa1ff080), \
8792 X(nop, bf00, f3af8000), \
8793 X(yield, bf10, f3af8001), \
8794 X(wfe, bf20, f3af8002), \
8795 X(wfi, bf30, f3af8003), \
8796 X(sev, bf40, f3af8004),
8798 /* To catch errors in encoding functions, the codes are all offset by
8799 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8800 as 16-bit instructions. */
8801 #define X(a,b,c) T_MNEM_##a
8802 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
8805 #define X(a,b,c) 0x##b
8806 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
8807 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8810 #define X(a,b,c) 0x##c
8811 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
8812 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8813 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8817 /* Thumb instruction encoders, in alphabetical order. */
8822 do_t_add_sub_w (void)
8826 Rd
= inst
.operands
[0].reg
;
8827 Rn
= inst
.operands
[1].reg
;
8829 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
8830 is the SP-{plus,minus}-immediate form of the instruction. */
8832 constraint (Rd
== REG_PC
, BAD_PC
);
8834 reject_bad_reg (Rd
);
8836 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
8837 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8840 /* Parse an add or subtract instruction. We get here with inst.instruction
8841 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8848 Rd
= inst
.operands
[0].reg
;
8849 Rs
= (inst
.operands
[1].present
8850 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8851 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8854 set_it_insn_type_last ();
8862 flags
= (inst
.instruction
== T_MNEM_adds
8863 || inst
.instruction
== T_MNEM_subs
);
8865 narrow
= !in_it_block ();
8867 narrow
= in_it_block ();
8868 if (!inst
.operands
[2].isreg
)
8872 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
8874 add
= (inst
.instruction
== T_MNEM_add
8875 || inst
.instruction
== T_MNEM_adds
);
8877 if (inst
.size_req
!= 4)
8879 /* Attempt to use a narrow opcode, with relaxation if
8881 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
8882 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
8883 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
8884 opcode
= T_MNEM_add_sp
;
8885 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
8886 opcode
= T_MNEM_add_pc
;
8887 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
8890 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
8892 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
8896 inst
.instruction
= THUMB_OP16(opcode
);
8897 inst
.instruction
|= (Rd
<< 4) | Rs
;
8898 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
8899 if (inst
.size_req
!= 2)
8900 inst
.relax
= opcode
;
8903 constraint (inst
.size_req
== 2, BAD_HIREG
);
8905 if (inst
.size_req
== 4
8906 || (inst
.size_req
!= 2 && !opcode
))
8910 constraint (add
, BAD_PC
);
8911 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
8912 _("only SUBS PC, LR, #const allowed"));
8913 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8914 _("expression too complex"));
8915 constraint (inst
.reloc
.exp
.X_add_number
< 0
8916 || inst
.reloc
.exp
.X_add_number
> 0xff,
8917 _("immediate value out of range"));
8918 inst
.instruction
= T2_SUBS_PC_LR
8919 | inst
.reloc
.exp
.X_add_number
;
8920 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8923 else if (Rs
== REG_PC
)
8925 /* Always use addw/subw. */
8926 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
8927 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8931 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8932 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
8935 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
8937 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
8939 inst
.instruction
|= Rd
<< 8;
8940 inst
.instruction
|= Rs
<< 16;
8945 Rn
= inst
.operands
[2].reg
;
8946 /* See if we can do this with a 16-bit instruction. */
8947 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
8949 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
8954 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
8955 || inst
.instruction
== T_MNEM_add
)
8958 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
8962 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
8964 /* Thumb-1 cores (except v6-M) require at least one high
8965 register in a narrow non flag setting add. */
8966 if (Rd
> 7 || Rn
> 7
8967 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
8968 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
8975 inst
.instruction
= T_OPCODE_ADD_HI
;
8976 inst
.instruction
|= (Rd
& 8) << 4;
8977 inst
.instruction
|= (Rd
& 7);
8978 inst
.instruction
|= Rn
<< 3;
8984 constraint (Rd
== REG_PC
, BAD_PC
);
8985 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
8986 constraint (Rs
== REG_PC
, BAD_PC
);
8987 reject_bad_reg (Rn
);
8989 /* If we get here, it can't be done in 16 bits. */
8990 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
8991 _("shift must be constant"));
8992 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
8993 inst
.instruction
|= Rd
<< 8;
8994 inst
.instruction
|= Rs
<< 16;
8995 encode_thumb32_shifted_operand (2);
9000 constraint (inst
.instruction
== T_MNEM_adds
9001 || inst
.instruction
== T_MNEM_subs
,
9004 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
9006 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
9007 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
9010 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9012 inst
.instruction
|= (Rd
<< 4) | Rs
;
9013 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9017 Rn
= inst
.operands
[2].reg
;
9018 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
9020 /* We now have Rd, Rs, and Rn set to registers. */
9021 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
9023 /* Can't do this for SUB. */
9024 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
9025 inst
.instruction
= T_OPCODE_ADD_HI
;
9026 inst
.instruction
|= (Rd
& 8) << 4;
9027 inst
.instruction
|= (Rd
& 7);
9029 inst
.instruction
|= Rn
<< 3;
9031 inst
.instruction
|= Rs
<< 3;
9033 constraint (1, _("dest must overlap one source register"));
9037 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9038 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
9039 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
9049 Rd
= inst
.operands
[0].reg
;
9050 reject_bad_reg (Rd
);
9052 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
9054 /* Defer to section relaxation. */
9055 inst
.relax
= inst
.instruction
;
9056 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9057 inst
.instruction
|= Rd
<< 4;
9059 else if (unified_syntax
&& inst
.size_req
!= 2)
9061 /* Generate a 32-bit opcode. */
9062 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9063 inst
.instruction
|= Rd
<< 8;
9064 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
9065 inst
.reloc
.pc_rel
= 1;
9069 /* Generate a 16-bit opcode. */
9070 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9071 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9072 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
9073 inst
.reloc
.pc_rel
= 1;
9075 inst
.instruction
|= Rd
<< 4;
9079 /* Arithmetic instructions for which there is just one 16-bit
9080 instruction encoding, and it allows only two low registers.
9081 For maximal compatibility with ARM syntax, we allow three register
9082 operands even when Thumb-32 instructions are not available, as long
9083 as the first two are identical. For instance, both "sbc r0,r1" and
9084 "sbc r0,r0,r1" are allowed. */
9090 Rd
= inst
.operands
[0].reg
;
9091 Rs
= (inst
.operands
[1].present
9092 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9093 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9094 Rn
= inst
.operands
[2].reg
;
9096 reject_bad_reg (Rd
);
9097 reject_bad_reg (Rs
);
9098 if (inst
.operands
[2].isreg
)
9099 reject_bad_reg (Rn
);
9103 if (!inst
.operands
[2].isreg
)
9105 /* For an immediate, we always generate a 32-bit opcode;
9106 section relaxation will shrink it later if possible. */
9107 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9108 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9109 inst
.instruction
|= Rd
<< 8;
9110 inst
.instruction
|= Rs
<< 16;
9111 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9117 /* See if we can do this with a 16-bit instruction. */
9118 if (THUMB_SETS_FLAGS (inst
.instruction
))
9119 narrow
= !in_it_block ();
9121 narrow
= in_it_block ();
9123 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9125 if (inst
.operands
[2].shifted
)
9127 if (inst
.size_req
== 4)
9133 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9134 inst
.instruction
|= Rd
;
9135 inst
.instruction
|= Rn
<< 3;
9139 /* If we get here, it can't be done in 16 bits. */
9140 constraint (inst
.operands
[2].shifted
9141 && inst
.operands
[2].immisreg
,
9142 _("shift must be constant"));
9143 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9144 inst
.instruction
|= Rd
<< 8;
9145 inst
.instruction
|= Rs
<< 16;
9146 encode_thumb32_shifted_operand (2);
9151 /* On its face this is a lie - the instruction does set the
9152 flags. However, the only supported mnemonic in this mode
9154 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9156 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9157 _("unshifted register required"));
9158 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9159 constraint (Rd
!= Rs
,
9160 _("dest and source1 must be the same register"));
9162 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9163 inst
.instruction
|= Rd
;
9164 inst
.instruction
|= Rn
<< 3;
9168 /* Similarly, but for instructions where the arithmetic operation is
9169 commutative, so we can allow either of them to be different from
9170 the destination operand in a 16-bit instruction. For instance, all
9171 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9178 Rd
= inst
.operands
[0].reg
;
9179 Rs
= (inst
.operands
[1].present
9180 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9181 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9182 Rn
= inst
.operands
[2].reg
;
9184 reject_bad_reg (Rd
);
9185 reject_bad_reg (Rs
);
9186 if (inst
.operands
[2].isreg
)
9187 reject_bad_reg (Rn
);
9191 if (!inst
.operands
[2].isreg
)
9193 /* For an immediate, we always generate a 32-bit opcode;
9194 section relaxation will shrink it later if possible. */
9195 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9196 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9197 inst
.instruction
|= Rd
<< 8;
9198 inst
.instruction
|= Rs
<< 16;
9199 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9205 /* See if we can do this with a 16-bit instruction. */
9206 if (THUMB_SETS_FLAGS (inst
.instruction
))
9207 narrow
= !in_it_block ();
9209 narrow
= in_it_block ();
9211 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9213 if (inst
.operands
[2].shifted
)
9215 if (inst
.size_req
== 4)
9222 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9223 inst
.instruction
|= Rd
;
9224 inst
.instruction
|= Rn
<< 3;
9229 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9230 inst
.instruction
|= Rd
;
9231 inst
.instruction
|= Rs
<< 3;
9236 /* If we get here, it can't be done in 16 bits. */
9237 constraint (inst
.operands
[2].shifted
9238 && inst
.operands
[2].immisreg
,
9239 _("shift must be constant"));
9240 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9241 inst
.instruction
|= Rd
<< 8;
9242 inst
.instruction
|= Rs
<< 16;
9243 encode_thumb32_shifted_operand (2);
9248 /* On its face this is a lie - the instruction does set the
9249 flags. However, the only supported mnemonic in this mode
9251 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9253 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9254 _("unshifted register required"));
9255 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9257 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9258 inst
.instruction
|= Rd
;
9261 inst
.instruction
|= Rn
<< 3;
9263 inst
.instruction
|= Rs
<< 3;
9265 constraint (1, _("dest must overlap one source register"));
9272 if (inst
.operands
[0].present
)
9274 constraint ((inst
.instruction
& 0xf0) != 0x40
9275 && inst
.operands
[0].imm
!= 0xf,
9276 _("bad barrier type"));
9277 inst
.instruction
|= inst
.operands
[0].imm
;
9280 inst
.instruction
|= 0xf;
9287 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
9288 constraint (msb
> 32, _("bit-field extends past end of register"));
9289 /* The instruction encoding stores the LSB and MSB,
9290 not the LSB and width. */
9291 Rd
= inst
.operands
[0].reg
;
9292 reject_bad_reg (Rd
);
9293 inst
.instruction
|= Rd
<< 8;
9294 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
9295 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
9296 inst
.instruction
|= msb
- 1;
9305 Rd
= inst
.operands
[0].reg
;
9306 reject_bad_reg (Rd
);
9308 /* #0 in second position is alternative syntax for bfc, which is
9309 the same instruction but with REG_PC in the Rm field. */
9310 if (!inst
.operands
[1].isreg
)
9314 Rn
= inst
.operands
[1].reg
;
9315 reject_bad_reg (Rn
);
9318 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
9319 constraint (msb
> 32, _("bit-field extends past end of register"));
9320 /* The instruction encoding stores the LSB and MSB,
9321 not the LSB and width. */
9322 inst
.instruction
|= Rd
<< 8;
9323 inst
.instruction
|= Rn
<< 16;
9324 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9325 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9326 inst
.instruction
|= msb
- 1;
9334 Rd
= inst
.operands
[0].reg
;
9335 Rn
= inst
.operands
[1].reg
;
9337 reject_bad_reg (Rd
);
9338 reject_bad_reg (Rn
);
9340 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
9341 _("bit-field extends past end of register"));
9342 inst
.instruction
|= Rd
<< 8;
9343 inst
.instruction
|= Rn
<< 16;
9344 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9345 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9346 inst
.instruction
|= inst
.operands
[3].imm
- 1;
9349 /* ARM V5 Thumb BLX (argument parse)
9350 BLX <target_addr> which is BLX(1)
9351 BLX <Rm> which is BLX(2)
9352 Unfortunately, there are two different opcodes for this mnemonic.
9353 So, the insns[].value is not used, and the code here zaps values
9354 into inst.instruction.
9356 ??? How to take advantage of the additional two bits of displacement
9357 available in Thumb32 mode? Need new relocation? */
9362 set_it_insn_type_last ();
9364 if (inst
.operands
[0].isreg
)
9366 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9367 /* We have a register, so this is BLX(2). */
9368 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9372 /* No register. This must be BLX(1). */
9373 inst
.instruction
= 0xf000e800;
9374 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
9375 inst
.reloc
.pc_rel
= 1;
9386 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
9390 /* Conditional branches inside IT blocks are encoded as unconditional
9397 if (cond
!= COND_ALWAYS
)
9398 opcode
= T_MNEM_bcond
;
9400 opcode
= inst
.instruction
;
9402 if (unified_syntax
&& inst
.size_req
== 4)
9404 inst
.instruction
= THUMB_OP32(opcode
);
9405 if (cond
== COND_ALWAYS
)
9406 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
9409 gas_assert (cond
!= 0xF);
9410 inst
.instruction
|= cond
<< 22;
9411 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
9416 inst
.instruction
= THUMB_OP16(opcode
);
9417 if (cond
== COND_ALWAYS
)
9418 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
9421 inst
.instruction
|= cond
<< 8;
9422 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
9424 /* Allow section relaxation. */
9425 if (unified_syntax
&& inst
.size_req
!= 2)
9426 inst
.relax
= opcode
;
9429 inst
.reloc
.pc_rel
= 1;
9435 constraint (inst
.cond
!= COND_ALWAYS
,
9436 _("instruction is always unconditional"));
9437 if (inst
.operands
[0].present
)
9439 constraint (inst
.operands
[0].imm
> 255,
9440 _("immediate value out of range"));
9441 inst
.instruction
|= inst
.operands
[0].imm
;
9442 set_it_insn_type (NEUTRAL_IT_INSN
);
9447 do_t_branch23 (void)
9449 set_it_insn_type_last ();
9450 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
9451 inst
.reloc
.pc_rel
= 1;
9453 #if defined(OBJ_COFF)
9454 /* If the destination of the branch is a defined symbol which does not have
9455 the THUMB_FUNC attribute, then we must be calling a function which has
9456 the (interfacearm) attribute. We look for the Thumb entry point to that
9457 function and change the branch to refer to that function instead. */
9458 if ( inst
.reloc
.exp
.X_op
== O_symbol
9459 && inst
.reloc
.exp
.X_add_symbol
!= NULL
9460 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
9461 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
9462 inst
.reloc
.exp
.X_add_symbol
=
9463 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
9470 set_it_insn_type_last ();
9471 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9472 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9473 should cause the alignment to be checked once it is known. This is
9474 because BX PC only works if the instruction is word aligned. */
9482 set_it_insn_type_last ();
9483 Rm
= inst
.operands
[0].reg
;
9484 reject_bad_reg (Rm
);
9485 inst
.instruction
|= Rm
<< 16;
9494 Rd
= inst
.operands
[0].reg
;
9495 Rm
= inst
.operands
[1].reg
;
9497 reject_bad_reg (Rd
);
9498 reject_bad_reg (Rm
);
9500 inst
.instruction
|= Rd
<< 8;
9501 inst
.instruction
|= Rm
<< 16;
9502 inst
.instruction
|= Rm
;
9508 set_it_insn_type (OUTSIDE_IT_INSN
);
9509 inst
.instruction
|= inst
.operands
[0].imm
;
9515 set_it_insn_type (OUTSIDE_IT_INSN
);
9517 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
9518 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
9520 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
9521 inst
.instruction
= 0xf3af8000;
9522 inst
.instruction
|= imod
<< 9;
9523 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
9524 if (inst
.operands
[1].present
)
9525 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
9529 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
9530 && (inst
.operands
[0].imm
& 4),
9531 _("selected processor does not support 'A' form "
9532 "of this instruction"));
9533 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
9534 _("Thumb does not support the 2-argument "
9535 "form of this instruction"));
9536 inst
.instruction
|= inst
.operands
[0].imm
;
9540 /* THUMB CPY instruction (argument parse). */
9545 if (inst
.size_req
== 4)
9547 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
9548 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9549 inst
.instruction
|= inst
.operands
[1].reg
;
9553 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9554 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9555 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9562 set_it_insn_type (OUTSIDE_IT_INSN
);
9563 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9564 inst
.instruction
|= inst
.operands
[0].reg
;
9565 inst
.reloc
.pc_rel
= 1;
9566 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
9572 inst
.instruction
|= inst
.operands
[0].imm
;
9578 unsigned Rd
, Rn
, Rm
;
9580 Rd
= inst
.operands
[0].reg
;
9581 Rn
= (inst
.operands
[1].present
9582 ? inst
.operands
[1].reg
: Rd
);
9583 Rm
= inst
.operands
[2].reg
;
9585 reject_bad_reg (Rd
);
9586 reject_bad_reg (Rn
);
9587 reject_bad_reg (Rm
);
9589 inst
.instruction
|= Rd
<< 8;
9590 inst
.instruction
|= Rn
<< 16;
9591 inst
.instruction
|= Rm
;
9597 if (unified_syntax
&& inst
.size_req
== 4)
9598 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9600 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9606 unsigned int cond
= inst
.operands
[0].imm
;
9608 set_it_insn_type (IT_INSN
);
9609 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
9612 /* If the condition is a negative condition, invert the mask. */
9613 if ((cond
& 0x1) == 0x0)
9615 unsigned int mask
= inst
.instruction
& 0x000f;
9617 if ((mask
& 0x7) == 0)
9618 /* no conversion needed */;
9619 else if ((mask
& 0x3) == 0)
9621 else if ((mask
& 0x1) == 0)
9626 inst
.instruction
&= 0xfff0;
9627 inst
.instruction
|= mask
;
9630 inst
.instruction
|= cond
<< 4;
9633 /* Helper function used for both push/pop and ldm/stm. */
9635 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
9639 load
= (inst
.instruction
& (1 << 20)) != 0;
9641 if (mask
& (1 << 13))
9642 inst
.error
= _("SP not allowed in register list");
9645 if (mask
& (1 << 15))
9647 if (mask
& (1 << 14))
9648 inst
.error
= _("LR and PC should not both be in register list");
9650 set_it_insn_type_last ();
9653 if ((mask
& (1 << base
)) != 0
9655 as_warn (_("base register should not be in register list "
9656 "when written back"));
9660 if (mask
& (1 << 15))
9661 inst
.error
= _("PC not allowed in register list");
9663 if (mask
& (1 << base
))
9664 as_warn (_("value stored for r%d is UNPREDICTABLE"), base
);
9667 if ((mask
& (mask
- 1)) == 0)
9669 /* Single register transfers implemented as str/ldr. */
9672 if (inst
.instruction
& (1 << 23))
9673 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
9675 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
9679 if (inst
.instruction
& (1 << 23))
9680 inst
.instruction
= 0x00800000; /* ia -> [base] */
9682 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
9685 inst
.instruction
|= 0xf8400000;
9687 inst
.instruction
|= 0x00100000;
9689 mask
= ffs (mask
) - 1;
9693 inst
.instruction
|= WRITE_BACK
;
9695 inst
.instruction
|= mask
;
9696 inst
.instruction
|= base
<< 16;
9702 /* This really doesn't seem worth it. */
9703 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9704 _("expression too complex"));
9705 constraint (inst
.operands
[1].writeback
,
9706 _("Thumb load/store multiple does not support {reglist}^"));
9714 /* See if we can use a 16-bit instruction. */
9715 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9716 && inst
.size_req
!= 4
9717 && !(inst
.operands
[1].imm
& ~0xff))
9719 mask
= 1 << inst
.operands
[0].reg
;
9721 if (inst
.operands
[0].reg
<= 7
9722 && (inst
.instruction
== T_MNEM_stmia
9723 ? inst
.operands
[0].writeback
9724 : (inst
.operands
[0].writeback
9725 == !(inst
.operands
[1].imm
& mask
))))
9727 if (inst
.instruction
== T_MNEM_stmia
9728 && (inst
.operands
[1].imm
& mask
)
9729 && (inst
.operands
[1].imm
& (mask
- 1)))
9730 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9731 inst
.operands
[0].reg
);
9733 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9734 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9735 inst
.instruction
|= inst
.operands
[1].imm
;
9738 else if (inst
.operands
[0] .reg
== REG_SP
9739 && inst
.operands
[0].writeback
)
9741 inst
.instruction
= THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
9742 ? T_MNEM_push
: T_MNEM_pop
);
9743 inst
.instruction
|= inst
.operands
[1].imm
;
9750 if (inst
.instruction
< 0xffff)
9751 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9753 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
9754 inst
.operands
[0].writeback
);
9759 constraint (inst
.operands
[0].reg
> 7
9760 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9761 constraint (inst
.instruction
!= T_MNEM_ldmia
9762 && inst
.instruction
!= T_MNEM_stmia
,
9763 _("Thumb-2 instruction only valid in unified syntax"));
9764 if (inst
.instruction
== T_MNEM_stmia
)
9766 if (!inst
.operands
[0].writeback
)
9767 as_warn (_("this instruction will write back the base register"));
9768 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9769 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9770 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9771 inst
.operands
[0].reg
);
9775 if (!inst
.operands
[0].writeback
9776 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9777 as_warn (_("this instruction will write back the base register"));
9778 else if (inst
.operands
[0].writeback
9779 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9780 as_warn (_("this instruction will not write back the base register"));
9783 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9784 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9785 inst
.instruction
|= inst
.operands
[1].imm
;
9792 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9793 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9794 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9795 || inst
.operands
[1].negative
,
9798 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9799 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9800 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9806 if (!inst
.operands
[1].present
)
9808 constraint (inst
.operands
[0].reg
== REG_LR
,
9809 _("r14 not allowed as first register "
9810 "when second register is omitted"));
9811 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9813 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
9816 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9817 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9818 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9824 unsigned long opcode
;
9827 if (inst
.operands
[0].isreg
9828 && !inst
.operands
[0].preind
9829 && inst
.operands
[0].reg
== REG_PC
)
9830 set_it_insn_type_last ();
9832 opcode
= inst
.instruction
;
9835 if (!inst
.operands
[1].isreg
)
9837 if (opcode
<= 0xffff)
9838 inst
.instruction
= THUMB_OP32 (opcode
);
9839 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9842 if (inst
.operands
[1].isreg
9843 && !inst
.operands
[1].writeback
9844 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
9845 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
9847 && inst
.size_req
!= 4)
9849 /* Insn may have a 16-bit form. */
9850 Rn
= inst
.operands
[1].reg
;
9851 if (inst
.operands
[1].immisreg
)
9853 inst
.instruction
= THUMB_OP16 (opcode
);
9855 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
9858 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
9859 && opcode
!= T_MNEM_ldrsb
)
9860 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
9861 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
9868 if (inst
.reloc
.pc_rel
)
9869 opcode
= T_MNEM_ldr_pc2
;
9871 opcode
= T_MNEM_ldr_pc
;
9875 if (opcode
== T_MNEM_ldr
)
9876 opcode
= T_MNEM_ldr_sp
;
9878 opcode
= T_MNEM_str_sp
;
9880 inst
.instruction
= inst
.operands
[0].reg
<< 8;
9884 inst
.instruction
= inst
.operands
[0].reg
;
9885 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9887 inst
.instruction
|= THUMB_OP16 (opcode
);
9888 if (inst
.size_req
== 2)
9889 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9891 inst
.relax
= opcode
;
9895 /* Definitely a 32-bit variant. */
9896 inst
.instruction
= THUMB_OP32 (opcode
);
9897 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9898 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
9902 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9904 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
9906 /* Only [Rn,Rm] is acceptable. */
9907 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
9908 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
9909 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
9910 || inst
.operands
[1].negative
,
9911 _("Thumb does not support this addressing mode"));
9912 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9916 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9917 if (!inst
.operands
[1].isreg
)
9918 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9921 constraint (!inst
.operands
[1].preind
9922 || inst
.operands
[1].shifted
9923 || inst
.operands
[1].writeback
,
9924 _("Thumb does not support this addressing mode"));
9925 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
9927 constraint (inst
.instruction
& 0x0600,
9928 _("byte or halfword not valid for base register"));
9929 constraint (inst
.operands
[1].reg
== REG_PC
9930 && !(inst
.instruction
& THUMB_LOAD_BIT
),
9931 _("r15 based store not allowed"));
9932 constraint (inst
.operands
[1].immisreg
,
9933 _("invalid base register for register offset"));
9935 if (inst
.operands
[1].reg
== REG_PC
)
9936 inst
.instruction
= T_OPCODE_LDR_PC
;
9937 else if (inst
.instruction
& THUMB_LOAD_BIT
)
9938 inst
.instruction
= T_OPCODE_LDR_SP
;
9940 inst
.instruction
= T_OPCODE_STR_SP
;
9942 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9943 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9947 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
9948 if (!inst
.operands
[1].immisreg
)
9950 /* Immediate offset. */
9951 inst
.instruction
|= inst
.operands
[0].reg
;
9952 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9953 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
9957 /* Register offset. */
9958 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
9959 constraint (inst
.operands
[1].negative
,
9960 _("Thumb does not support this addressing mode"));
9963 switch (inst
.instruction
)
9965 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
9966 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
9967 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
9968 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
9969 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
9970 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
9971 case 0x5600 /* ldrsb */:
9972 case 0x5e00 /* ldrsh */: break;
9976 inst
.instruction
|= inst
.operands
[0].reg
;
9977 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9978 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
9984 if (!inst
.operands
[1].present
)
9986 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9987 constraint (inst
.operands
[0].reg
== REG_LR
,
9988 _("r14 not allowed here"));
9990 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9991 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9992 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
9998 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9999 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
10005 unsigned Rd
, Rn
, Rm
, Ra
;
10007 Rd
= inst
.operands
[0].reg
;
10008 Rn
= inst
.operands
[1].reg
;
10009 Rm
= inst
.operands
[2].reg
;
10010 Ra
= inst
.operands
[3].reg
;
10012 reject_bad_reg (Rd
);
10013 reject_bad_reg (Rn
);
10014 reject_bad_reg (Rm
);
10015 reject_bad_reg (Ra
);
10017 inst
.instruction
|= Rd
<< 8;
10018 inst
.instruction
|= Rn
<< 16;
10019 inst
.instruction
|= Rm
;
10020 inst
.instruction
|= Ra
<< 12;
10026 unsigned RdLo
, RdHi
, Rn
, Rm
;
10028 RdLo
= inst
.operands
[0].reg
;
10029 RdHi
= inst
.operands
[1].reg
;
10030 Rn
= inst
.operands
[2].reg
;
10031 Rm
= inst
.operands
[3].reg
;
10033 reject_bad_reg (RdLo
);
10034 reject_bad_reg (RdHi
);
10035 reject_bad_reg (Rn
);
10036 reject_bad_reg (Rm
);
10038 inst
.instruction
|= RdLo
<< 12;
10039 inst
.instruction
|= RdHi
<< 8;
10040 inst
.instruction
|= Rn
<< 16;
10041 inst
.instruction
|= Rm
;
10045 do_t_mov_cmp (void)
10049 Rn
= inst
.operands
[0].reg
;
10050 Rm
= inst
.operands
[1].reg
;
10053 set_it_insn_type_last ();
10055 if (unified_syntax
)
10057 int r0off
= (inst
.instruction
== T_MNEM_mov
10058 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
10059 unsigned long opcode
;
10060 bfd_boolean narrow
;
10061 bfd_boolean low_regs
;
10063 low_regs
= (Rn
<= 7 && Rm
<= 7);
10064 opcode
= inst
.instruction
;
10065 if (in_it_block ())
10066 narrow
= opcode
!= T_MNEM_movs
;
10068 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
10069 if (inst
.size_req
== 4
10070 || inst
.operands
[1].shifted
)
10073 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10074 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
10075 && !inst
.operands
[1].shifted
10079 inst
.instruction
= T2_SUBS_PC_LR
;
10083 if (opcode
== T_MNEM_cmp
)
10085 constraint (Rn
== REG_PC
, BAD_PC
);
10088 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10090 warn_deprecated_sp (Rm
);
10091 /* R15 was documented as a valid choice for Rm in ARMv6,
10092 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10093 tools reject R15, so we do too. */
10094 constraint (Rm
== REG_PC
, BAD_PC
);
10097 reject_bad_reg (Rm
);
10099 else if (opcode
== T_MNEM_mov
10100 || opcode
== T_MNEM_movs
)
10102 if (inst
.operands
[1].isreg
)
10104 if (opcode
== T_MNEM_movs
)
10106 reject_bad_reg (Rn
);
10107 reject_bad_reg (Rm
);
10109 else if ((Rn
== REG_SP
|| Rn
== REG_PC
)
10110 && (Rm
== REG_SP
|| Rm
== REG_PC
))
10111 reject_bad_reg (Rm
);
10114 reject_bad_reg (Rn
);
10117 if (!inst
.operands
[1].isreg
)
10119 /* Immediate operand. */
10120 if (!in_it_block () && opcode
== T_MNEM_mov
)
10122 if (low_regs
&& narrow
)
10124 inst
.instruction
= THUMB_OP16 (opcode
);
10125 inst
.instruction
|= Rn
<< 8;
10126 if (inst
.size_req
== 2)
10127 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10129 inst
.relax
= opcode
;
10133 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10134 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10135 inst
.instruction
|= Rn
<< r0off
;
10136 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10139 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
10140 && (inst
.instruction
== T_MNEM_mov
10141 || inst
.instruction
== T_MNEM_movs
))
10143 /* Register shifts are encoded as separate shift instructions. */
10144 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
10146 if (in_it_block ())
10151 if (inst
.size_req
== 4)
10154 if (!low_regs
|| inst
.operands
[1].imm
> 7)
10160 switch (inst
.operands
[1].shift_kind
)
10163 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
10166 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
10169 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
10172 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
10178 inst
.instruction
= opcode
;
10181 inst
.instruction
|= Rn
;
10182 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
10187 inst
.instruction
|= CONDS_BIT
;
10189 inst
.instruction
|= Rn
<< 8;
10190 inst
.instruction
|= Rm
<< 16;
10191 inst
.instruction
|= inst
.operands
[1].imm
;
10196 /* Some mov with immediate shift have narrow variants.
10197 Register shifts are handled above. */
10198 if (low_regs
&& inst
.operands
[1].shifted
10199 && (inst
.instruction
== T_MNEM_mov
10200 || inst
.instruction
== T_MNEM_movs
))
10202 if (in_it_block ())
10203 narrow
= (inst
.instruction
== T_MNEM_mov
);
10205 narrow
= (inst
.instruction
== T_MNEM_movs
);
10210 switch (inst
.operands
[1].shift_kind
)
10212 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10213 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10214 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10215 default: narrow
= FALSE
; break;
10221 inst
.instruction
|= Rn
;
10222 inst
.instruction
|= Rm
<< 3;
10223 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10227 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10228 inst
.instruction
|= Rn
<< r0off
;
10229 encode_thumb32_shifted_operand (1);
10233 switch (inst
.instruction
)
10236 inst
.instruction
= T_OPCODE_MOV_HR
;
10237 inst
.instruction
|= (Rn
& 0x8) << 4;
10238 inst
.instruction
|= (Rn
& 0x7);
10239 inst
.instruction
|= Rm
<< 3;
10243 /* We know we have low registers at this point.
10244 Generate ADD Rd, Rs, #0. */
10245 inst
.instruction
= T_OPCODE_ADD_I3
;
10246 inst
.instruction
|= Rn
;
10247 inst
.instruction
|= Rm
<< 3;
10253 inst
.instruction
= T_OPCODE_CMP_LR
;
10254 inst
.instruction
|= Rn
;
10255 inst
.instruction
|= Rm
<< 3;
10259 inst
.instruction
= T_OPCODE_CMP_HR
;
10260 inst
.instruction
|= (Rn
& 0x8) << 4;
10261 inst
.instruction
|= (Rn
& 0x7);
10262 inst
.instruction
|= Rm
<< 3;
10269 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10271 /* PR 10443: Do not silently ignore shifted operands. */
10272 constraint (inst
.operands
[1].shifted
,
10273 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10275 if (inst
.operands
[1].isreg
)
10277 if (Rn
< 8 && Rm
< 8)
10279 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10280 since a MOV instruction produces unpredictable results. */
10281 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10282 inst
.instruction
= T_OPCODE_ADD_I3
;
10284 inst
.instruction
= T_OPCODE_CMP_LR
;
10286 inst
.instruction
|= Rn
;
10287 inst
.instruction
|= Rm
<< 3;
10291 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10292 inst
.instruction
= T_OPCODE_MOV_HR
;
10294 inst
.instruction
= T_OPCODE_CMP_HR
;
10300 constraint (Rn
> 7,
10301 _("only lo regs allowed with immediate"));
10302 inst
.instruction
|= Rn
<< 8;
10303 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10314 top
= (inst
.instruction
& 0x00800000) != 0;
10315 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
10317 constraint (top
, _(":lower16: not allowed this instruction"));
10318 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
10320 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
10322 constraint (!top
, _(":upper16: not allowed this instruction"));
10323 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
10326 Rd
= inst
.operands
[0].reg
;
10327 reject_bad_reg (Rd
);
10329 inst
.instruction
|= Rd
<< 8;
10330 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
10332 imm
= inst
.reloc
.exp
.X_add_number
;
10333 inst
.instruction
|= (imm
& 0xf000) << 4;
10334 inst
.instruction
|= (imm
& 0x0800) << 15;
10335 inst
.instruction
|= (imm
& 0x0700) << 4;
10336 inst
.instruction
|= (imm
& 0x00ff);
10341 do_t_mvn_tst (void)
10345 Rn
= inst
.operands
[0].reg
;
10346 Rm
= inst
.operands
[1].reg
;
10348 if (inst
.instruction
== T_MNEM_cmp
10349 || inst
.instruction
== T_MNEM_cmn
)
10350 constraint (Rn
== REG_PC
, BAD_PC
);
10352 reject_bad_reg (Rn
);
10353 reject_bad_reg (Rm
);
10355 if (unified_syntax
)
10357 int r0off
= (inst
.instruction
== T_MNEM_mvn
10358 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
10359 bfd_boolean narrow
;
10361 if (inst
.size_req
== 4
10362 || inst
.instruction
> 0xffff
10363 || inst
.operands
[1].shifted
10364 || Rn
> 7 || Rm
> 7)
10366 else if (inst
.instruction
== T_MNEM_cmn
)
10368 else if (THUMB_SETS_FLAGS (inst
.instruction
))
10369 narrow
= !in_it_block ();
10371 narrow
= in_it_block ();
10373 if (!inst
.operands
[1].isreg
)
10375 /* For an immediate, we always generate a 32-bit opcode;
10376 section relaxation will shrink it later if possible. */
10377 if (inst
.instruction
< 0xffff)
10378 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10379 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10380 inst
.instruction
|= Rn
<< r0off
;
10381 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10385 /* See if we can do this with a 16-bit instruction. */
10388 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10389 inst
.instruction
|= Rn
;
10390 inst
.instruction
|= Rm
<< 3;
10394 constraint (inst
.operands
[1].shifted
10395 && inst
.operands
[1].immisreg
,
10396 _("shift must be constant"));
10397 if (inst
.instruction
< 0xffff)
10398 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10399 inst
.instruction
|= Rn
<< r0off
;
10400 encode_thumb32_shifted_operand (1);
10406 constraint (inst
.instruction
> 0xffff
10407 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
10408 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
10409 _("unshifted register required"));
10410 constraint (Rn
> 7 || Rm
> 7,
10413 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10414 inst
.instruction
|= Rn
;
10415 inst
.instruction
|= Rm
<< 3;
10425 if (do_vfp_nsyn_mrs () == SUCCESS
)
10428 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
10431 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10432 _("selected processor does not support "
10433 "requested special purpose register"));
10437 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10438 _("selected processor does not support "
10439 "requested special purpose register"));
10440 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10441 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
10442 _("'CPSR' or 'SPSR' expected"));
10445 Rd
= inst
.operands
[0].reg
;
10446 reject_bad_reg (Rd
);
10448 inst
.instruction
|= Rd
<< 8;
10449 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10450 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
10459 if (do_vfp_nsyn_msr () == SUCCESS
)
10462 constraint (!inst
.operands
[1].isreg
,
10463 _("Thumb encoding does not support an immediate here"));
10464 flags
= inst
.operands
[0].imm
;
10467 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10468 _("selected processor does not support "
10469 "requested special purpose register"));
10473 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10474 _("selected processor does not support "
10475 "requested special purpose register"));
10479 Rn
= inst
.operands
[1].reg
;
10480 reject_bad_reg (Rn
);
10482 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10483 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
10484 inst
.instruction
|= (flags
& 0xff);
10485 inst
.instruction
|= Rn
<< 16;
10491 bfd_boolean narrow
;
10492 unsigned Rd
, Rn
, Rm
;
10494 if (!inst
.operands
[2].present
)
10495 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
10497 Rd
= inst
.operands
[0].reg
;
10498 Rn
= inst
.operands
[1].reg
;
10499 Rm
= inst
.operands
[2].reg
;
10501 if (unified_syntax
)
10503 if (inst
.size_req
== 4
10509 else if (inst
.instruction
== T_MNEM_muls
)
10510 narrow
= !in_it_block ();
10512 narrow
= in_it_block ();
10516 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
10517 constraint (Rn
> 7 || Rm
> 7,
10524 /* 16-bit MULS/Conditional MUL. */
10525 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10526 inst
.instruction
|= Rd
;
10529 inst
.instruction
|= Rm
<< 3;
10531 inst
.instruction
|= Rn
<< 3;
10533 constraint (1, _("dest must overlap one source register"));
10537 constraint (inst
.instruction
!= T_MNEM_mul
,
10538 _("Thumb-2 MUL must not set flags"));
10540 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10541 inst
.instruction
|= Rd
<< 8;
10542 inst
.instruction
|= Rn
<< 16;
10543 inst
.instruction
|= Rm
<< 0;
10545 reject_bad_reg (Rd
);
10546 reject_bad_reg (Rn
);
10547 reject_bad_reg (Rm
);
10554 unsigned RdLo
, RdHi
, Rn
, Rm
;
10556 RdLo
= inst
.operands
[0].reg
;
10557 RdHi
= inst
.operands
[1].reg
;
10558 Rn
= inst
.operands
[2].reg
;
10559 Rm
= inst
.operands
[3].reg
;
10561 reject_bad_reg (RdLo
);
10562 reject_bad_reg (RdHi
);
10563 reject_bad_reg (Rn
);
10564 reject_bad_reg (Rm
);
10566 inst
.instruction
|= RdLo
<< 12;
10567 inst
.instruction
|= RdHi
<< 8;
10568 inst
.instruction
|= Rn
<< 16;
10569 inst
.instruction
|= Rm
;
10572 as_tsktsk (_("rdhi and rdlo must be different"));
10578 set_it_insn_type (NEUTRAL_IT_INSN
);
10580 if (unified_syntax
)
10582 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
10584 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10585 inst
.instruction
|= inst
.operands
[0].imm
;
10589 /* PR9722: Check for Thumb2 availability before
10590 generating a thumb2 nop instruction. */
10591 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
10593 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10594 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
10597 inst
.instruction
= 0x46c0;
10602 constraint (inst
.operands
[0].present
,
10603 _("Thumb does not support NOP with hints"));
10604 inst
.instruction
= 0x46c0;
10611 if (unified_syntax
)
10613 bfd_boolean narrow
;
10615 if (THUMB_SETS_FLAGS (inst
.instruction
))
10616 narrow
= !in_it_block ();
10618 narrow
= in_it_block ();
10619 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10621 if (inst
.size_req
== 4)
10626 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10627 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10628 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10632 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10633 inst
.instruction
|= inst
.operands
[0].reg
;
10634 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10639 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
10641 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10643 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10644 inst
.instruction
|= inst
.operands
[0].reg
;
10645 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10654 Rd
= inst
.operands
[0].reg
;
10655 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
10657 reject_bad_reg (Rd
);
10658 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10659 reject_bad_reg (Rn
);
10661 inst
.instruction
|= Rd
<< 8;
10662 inst
.instruction
|= Rn
<< 16;
10664 if (!inst
.operands
[2].isreg
)
10666 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10667 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10673 Rm
= inst
.operands
[2].reg
;
10674 reject_bad_reg (Rm
);
10676 constraint (inst
.operands
[2].shifted
10677 && inst
.operands
[2].immisreg
,
10678 _("shift must be constant"));
10679 encode_thumb32_shifted_operand (2);
10686 unsigned Rd
, Rn
, Rm
;
10688 Rd
= inst
.operands
[0].reg
;
10689 Rn
= inst
.operands
[1].reg
;
10690 Rm
= inst
.operands
[2].reg
;
10692 reject_bad_reg (Rd
);
10693 reject_bad_reg (Rn
);
10694 reject_bad_reg (Rm
);
10696 inst
.instruction
|= Rd
<< 8;
10697 inst
.instruction
|= Rn
<< 16;
10698 inst
.instruction
|= Rm
;
10699 if (inst
.operands
[3].present
)
10701 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
10702 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10703 _("expression too complex"));
10704 inst
.instruction
|= (val
& 0x1c) << 10;
10705 inst
.instruction
|= (val
& 0x03) << 6;
10712 if (!inst
.operands
[3].present
)
10716 inst
.instruction
&= ~0x00000020;
10718 /* PR 10168. Swap the Rm and Rn registers. */
10719 Rtmp
= inst
.operands
[1].reg
;
10720 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
10721 inst
.operands
[2].reg
= Rtmp
;
10729 if (inst
.operands
[0].immisreg
)
10730 reject_bad_reg (inst
.operands
[0].imm
);
10732 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10736 do_t_push_pop (void)
10740 constraint (inst
.operands
[0].writeback
,
10741 _("push/pop do not support {reglist}^"));
10742 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
10743 _("expression too complex"));
10745 mask
= inst
.operands
[0].imm
;
10746 if ((mask
& ~0xff) == 0)
10747 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
10748 else if ((inst
.instruction
== T_MNEM_push
10749 && (mask
& ~0xff) == 1 << REG_LR
)
10750 || (inst
.instruction
== T_MNEM_pop
10751 && (mask
& ~0xff) == 1 << REG_PC
))
10753 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10754 inst
.instruction
|= THUMB_PP_PC_LR
;
10755 inst
.instruction
|= mask
& 0xff;
10757 else if (unified_syntax
)
10759 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10760 encode_thumb2_ldmstm (13, mask
, TRUE
);
10764 inst
.error
= _("invalid register list to push/pop instruction");
10774 Rd
= inst
.operands
[0].reg
;
10775 Rm
= inst
.operands
[1].reg
;
10777 reject_bad_reg (Rd
);
10778 reject_bad_reg (Rm
);
10780 inst
.instruction
|= Rd
<< 8;
10781 inst
.instruction
|= Rm
<< 16;
10782 inst
.instruction
|= Rm
;
10790 Rd
= inst
.operands
[0].reg
;
10791 Rm
= inst
.operands
[1].reg
;
10793 reject_bad_reg (Rd
);
10794 reject_bad_reg (Rm
);
10796 if (Rd
<= 7 && Rm
<= 7
10797 && inst
.size_req
!= 4)
10799 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10800 inst
.instruction
|= Rd
;
10801 inst
.instruction
|= Rm
<< 3;
10803 else if (unified_syntax
)
10805 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10806 inst
.instruction
|= Rd
<< 8;
10807 inst
.instruction
|= Rm
<< 16;
10808 inst
.instruction
|= Rm
;
10811 inst
.error
= BAD_HIREG
;
10819 Rd
= inst
.operands
[0].reg
;
10820 Rm
= inst
.operands
[1].reg
;
10822 reject_bad_reg (Rd
);
10823 reject_bad_reg (Rm
);
10825 inst
.instruction
|= Rd
<< 8;
10826 inst
.instruction
|= Rm
;
10834 Rd
= inst
.operands
[0].reg
;
10835 Rs
= (inst
.operands
[1].present
10836 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10837 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10839 reject_bad_reg (Rd
);
10840 reject_bad_reg (Rs
);
10841 if (inst
.operands
[2].isreg
)
10842 reject_bad_reg (inst
.operands
[2].reg
);
10844 inst
.instruction
|= Rd
<< 8;
10845 inst
.instruction
|= Rs
<< 16;
10846 if (!inst
.operands
[2].isreg
)
10848 bfd_boolean narrow
;
10850 if ((inst
.instruction
& 0x00100000) != 0)
10851 narrow
= !in_it_block ();
10853 narrow
= in_it_block ();
10855 if (Rd
> 7 || Rs
> 7)
10858 if (inst
.size_req
== 4 || !unified_syntax
)
10861 if (inst
.reloc
.exp
.X_op
!= O_constant
10862 || inst
.reloc
.exp
.X_add_number
!= 0)
10865 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10866 relaxation, but it doesn't seem worth the hassle. */
10869 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10870 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
10871 inst
.instruction
|= Rs
<< 3;
10872 inst
.instruction
|= Rd
;
10876 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10877 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10881 encode_thumb32_shifted_operand (2);
10887 set_it_insn_type (OUTSIDE_IT_INSN
);
10888 if (inst
.operands
[0].imm
)
10889 inst
.instruction
|= 0x8;
10895 if (!inst
.operands
[1].present
)
10896 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
10898 if (unified_syntax
)
10900 bfd_boolean narrow
;
10903 switch (inst
.instruction
)
10906 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
10908 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
10910 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
10912 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
10916 if (THUMB_SETS_FLAGS (inst
.instruction
))
10917 narrow
= !in_it_block ();
10919 narrow
= in_it_block ();
10920 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10922 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
10924 if (inst
.operands
[2].isreg
10925 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
10926 || inst
.operands
[2].reg
> 7))
10928 if (inst
.size_req
== 4)
10931 reject_bad_reg (inst
.operands
[0].reg
);
10932 reject_bad_reg (inst
.operands
[1].reg
);
10936 if (inst
.operands
[2].isreg
)
10938 reject_bad_reg (inst
.operands
[2].reg
);
10939 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10940 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10941 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10942 inst
.instruction
|= inst
.operands
[2].reg
;
10946 inst
.operands
[1].shifted
= 1;
10947 inst
.operands
[1].shift_kind
= shift_kind
;
10948 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
10949 ? T_MNEM_movs
: T_MNEM_mov
);
10950 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10951 encode_thumb32_shifted_operand (1);
10952 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
10953 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10958 if (inst
.operands
[2].isreg
)
10960 switch (shift_kind
)
10962 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
10963 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
10964 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
10965 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
10969 inst
.instruction
|= inst
.operands
[0].reg
;
10970 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
10974 switch (shift_kind
)
10976 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10977 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10978 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10981 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10982 inst
.instruction
|= inst
.operands
[0].reg
;
10983 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10989 constraint (inst
.operands
[0].reg
> 7
10990 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
10991 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10993 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
10995 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
10996 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
10997 _("source1 and dest must be same register"));
10999 switch (inst
.instruction
)
11001 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
11002 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
11003 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
11004 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
11008 inst
.instruction
|= inst
.operands
[0].reg
;
11009 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
11013 switch (inst
.instruction
)
11015 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11016 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11017 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11018 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
11021 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11022 inst
.instruction
|= inst
.operands
[0].reg
;
11023 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11031 unsigned Rd
, Rn
, Rm
;
11033 Rd
= inst
.operands
[0].reg
;
11034 Rn
= inst
.operands
[1].reg
;
11035 Rm
= inst
.operands
[2].reg
;
11037 reject_bad_reg (Rd
);
11038 reject_bad_reg (Rn
);
11039 reject_bad_reg (Rm
);
11041 inst
.instruction
|= Rd
<< 8;
11042 inst
.instruction
|= Rn
<< 16;
11043 inst
.instruction
|= Rm
;
11049 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
11050 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11051 _("expression too complex"));
11052 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11053 inst
.instruction
|= (value
& 0xf000) >> 12;
11054 inst
.instruction
|= (value
& 0x0ff0);
11055 inst
.instruction
|= (value
& 0x000f) << 16;
11059 do_t_ssat_usat (int bias
)
11063 Rd
= inst
.operands
[0].reg
;
11064 Rn
= inst
.operands
[2].reg
;
11066 reject_bad_reg (Rd
);
11067 reject_bad_reg (Rn
);
11069 inst
.instruction
|= Rd
<< 8;
11070 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
11071 inst
.instruction
|= Rn
<< 16;
11073 if (inst
.operands
[3].present
)
11075 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
11077 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11079 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11080 _("expression too complex"));
11082 if (shift_amount
!= 0)
11084 constraint (shift_amount
> 31,
11085 _("shift expression is too large"));
11087 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
11088 inst
.instruction
|= 0x00200000; /* sh bit. */
11090 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
11091 inst
.instruction
|= (shift_amount
& 0x03) << 6;
11099 do_t_ssat_usat (1);
11107 Rd
= inst
.operands
[0].reg
;
11108 Rn
= inst
.operands
[2].reg
;
11110 reject_bad_reg (Rd
);
11111 reject_bad_reg (Rn
);
11113 inst
.instruction
|= Rd
<< 8;
11114 inst
.instruction
|= inst
.operands
[1].imm
- 1;
11115 inst
.instruction
|= Rn
<< 16;
11121 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
11122 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
11123 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
11124 || inst
.operands
[2].negative
,
11127 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11128 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11129 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11130 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11136 if (!inst
.operands
[2].present
)
11137 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
11139 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
11140 || inst
.operands
[0].reg
== inst
.operands
[2].reg
11141 || inst
.operands
[0].reg
== inst
.operands
[3].reg
11142 || inst
.operands
[1].reg
== inst
.operands
[2].reg
,
11145 inst
.instruction
|= inst
.operands
[0].reg
;
11146 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11147 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
11148 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
11154 unsigned Rd
, Rn
, Rm
;
11156 Rd
= inst
.operands
[0].reg
;
11157 Rn
= inst
.operands
[1].reg
;
11158 Rm
= inst
.operands
[2].reg
;
11160 reject_bad_reg (Rd
);
11161 reject_bad_reg (Rn
);
11162 reject_bad_reg (Rm
);
11164 inst
.instruction
|= Rd
<< 8;
11165 inst
.instruction
|= Rn
<< 16;
11166 inst
.instruction
|= Rm
;
11167 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
11175 Rd
= inst
.operands
[0].reg
;
11176 Rm
= inst
.operands
[1].reg
;
11178 reject_bad_reg (Rd
);
11179 reject_bad_reg (Rm
);
11181 if (inst
.instruction
<= 0xffff
11182 && inst
.size_req
!= 4
11183 && Rd
<= 7 && Rm
<= 7
11184 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
11186 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11187 inst
.instruction
|= Rd
;
11188 inst
.instruction
|= Rm
<< 3;
11190 else if (unified_syntax
)
11192 if (inst
.instruction
<= 0xffff)
11193 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11194 inst
.instruction
|= Rd
<< 8;
11195 inst
.instruction
|= Rm
;
11196 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
11200 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
11201 _("Thumb encoding does not support rotation"));
11202 constraint (1, BAD_HIREG
);
11209 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
11218 half
= (inst
.instruction
& 0x10) != 0;
11219 set_it_insn_type_last ();
11220 constraint (inst
.operands
[0].immisreg
,
11221 _("instruction requires register index"));
11223 Rn
= inst
.operands
[0].reg
;
11224 Rm
= inst
.operands
[0].imm
;
11226 constraint (Rn
== REG_SP
, BAD_SP
);
11227 reject_bad_reg (Rm
);
11229 constraint (!half
&& inst
.operands
[0].shifted
,
11230 _("instruction does not allow shifted index"));
11231 inst
.instruction
|= (Rn
<< 16) | Rm
;
11237 do_t_ssat_usat (0);
11245 Rd
= inst
.operands
[0].reg
;
11246 Rn
= inst
.operands
[2].reg
;
11248 reject_bad_reg (Rd
);
11249 reject_bad_reg (Rn
);
11251 inst
.instruction
|= Rd
<< 8;
11252 inst
.instruction
|= inst
.operands
[1].imm
;
11253 inst
.instruction
|= Rn
<< 16;
11256 /* Neon instruction encoder helpers. */
11258 /* Encodings for the different types for various Neon opcodes. */
11260 /* An "invalid" code for the following tables. */
11263 struct neon_tab_entry
11266 unsigned float_or_poly
;
11267 unsigned scalar_or_imm
;
11270 /* Map overloaded Neon opcodes to their respective encodings. */
11271 #define NEON_ENC_TAB \
11272 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11273 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11274 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11275 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11276 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11277 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11278 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11279 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11280 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11281 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11282 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11283 /* Register variants of the following two instructions are encoded as
11284 vcge / vcgt with the operands reversed. */ \
11285 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11286 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11287 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11288 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11289 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11290 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11291 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11292 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11293 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11294 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11295 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11296 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11297 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11298 X(vshl, 0x0000400, N_INV, 0x0800510), \
11299 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11300 X(vand, 0x0000110, N_INV, 0x0800030), \
11301 X(vbic, 0x0100110, N_INV, 0x0800030), \
11302 X(veor, 0x1000110, N_INV, N_INV), \
11303 X(vorn, 0x0300110, N_INV, 0x0800010), \
11304 X(vorr, 0x0200110, N_INV, 0x0800010), \
11305 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11306 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11307 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11308 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11309 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11310 X(vst1, 0x0000000, 0x0800000, N_INV), \
11311 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11312 X(vst2, 0x0000100, 0x0800100, N_INV), \
11313 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11314 X(vst3, 0x0000200, 0x0800200, N_INV), \
11315 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11316 X(vst4, 0x0000300, 0x0800300, N_INV), \
11317 X(vmovn, 0x1b20200, N_INV, N_INV), \
11318 X(vtrn, 0x1b20080, N_INV, N_INV), \
11319 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11320 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11321 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11322 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
11323 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
11324 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11325 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11326 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11327 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11331 #define X(OPC,I,F,S) N_MNEM_##OPC
11336 static const struct neon_tab_entry neon_enc_tab
[] =
11338 #define X(OPC,I,F,S) { (I), (F), (S) }
11343 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11344 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11345 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11346 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11347 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11348 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11349 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11350 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11351 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11352 #define NEON_ENC_SINGLE(X) \
11353 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11354 #define NEON_ENC_DOUBLE(X) \
11355 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11357 /* Define shapes for instruction operands. The following mnemonic characters
11358 are used in this table:
11360 F - VFP S<n> register
11361 D - Neon D<n> register
11362 Q - Neon Q<n> register
11366 L - D<n> register list
11368 This table is used to generate various data:
11369 - enumerations of the form NS_DDR to be used as arguments to
11371 - a table classifying shapes into single, double, quad, mixed.
11372 - a table used to drive neon_select_shape. */
11374 #define NEON_SHAPE_DEF \
11375 X(3, (D, D, D), DOUBLE), \
11376 X(3, (Q, Q, Q), QUAD), \
11377 X(3, (D, D, I), DOUBLE), \
11378 X(3, (Q, Q, I), QUAD), \
11379 X(3, (D, D, S), DOUBLE), \
11380 X(3, (Q, Q, S), QUAD), \
11381 X(2, (D, D), DOUBLE), \
11382 X(2, (Q, Q), QUAD), \
11383 X(2, (D, S), DOUBLE), \
11384 X(2, (Q, S), QUAD), \
11385 X(2, (D, R), DOUBLE), \
11386 X(2, (Q, R), QUAD), \
11387 X(2, (D, I), DOUBLE), \
11388 X(2, (Q, I), QUAD), \
11389 X(3, (D, L, D), DOUBLE), \
11390 X(2, (D, Q), MIXED), \
11391 X(2, (Q, D), MIXED), \
11392 X(3, (D, Q, I), MIXED), \
11393 X(3, (Q, D, I), MIXED), \
11394 X(3, (Q, D, D), MIXED), \
11395 X(3, (D, Q, Q), MIXED), \
11396 X(3, (Q, Q, D), MIXED), \
11397 X(3, (Q, D, S), MIXED), \
11398 X(3, (D, Q, S), MIXED), \
11399 X(4, (D, D, D, I), DOUBLE), \
11400 X(4, (Q, Q, Q, I), QUAD), \
11401 X(2, (F, F), SINGLE), \
11402 X(3, (F, F, F), SINGLE), \
11403 X(2, (F, I), SINGLE), \
11404 X(2, (F, D), MIXED), \
11405 X(2, (D, F), MIXED), \
11406 X(3, (F, F, I), MIXED), \
11407 X(4, (R, R, F, F), SINGLE), \
11408 X(4, (F, F, R, R), SINGLE), \
11409 X(3, (D, R, R), DOUBLE), \
11410 X(3, (R, R, D), DOUBLE), \
11411 X(2, (S, R), SINGLE), \
11412 X(2, (R, S), SINGLE), \
11413 X(2, (F, R), SINGLE), \
11414 X(2, (R, F), SINGLE)
11416 #define S2(A,B) NS_##A##B
11417 #define S3(A,B,C) NS_##A##B##C
11418 #define S4(A,B,C,D) NS_##A##B##C##D
11420 #define X(N, L, C) S##N L
11433 enum neon_shape_class
11441 #define X(N, L, C) SC_##C
11443 static enum neon_shape_class neon_shape_class
[] =
11461 /* Register widths of above. */
11462 static unsigned neon_shape_el_size
[] =
11473 struct neon_shape_info
11476 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
11479 #define S2(A,B) { SE_##A, SE_##B }
11480 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11481 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11483 #define X(N, L, C) { N, S##N L }
11485 static struct neon_shape_info neon_shape_tab
[] =
11495 /* Bit masks used in type checking given instructions.
11496 'N_EQK' means the type must be the same as (or based on in some way) the key
11497 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11498 set, various other bits can be set as well in order to modify the meaning of
11499 the type constraint. */
11501 enum neon_type_mask
11524 N_KEY
= 0x1000000, /* Key element (main type specifier). */
11525 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
11526 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
11527 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
11528 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
11529 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11530 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11531 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11532 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
11533 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11535 N_MAX_NONSPECIAL
= N_F64
11538 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11540 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11541 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11542 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11543 #define N_SUF_32 (N_SU_32 | N_F32)
11544 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11545 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11547 /* Pass this as the first type argument to neon_check_type to ignore types
11549 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11551 /* Select a "shape" for the current instruction (describing register types or
11552 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11553 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11554 function of operand parsing, so this function doesn't need to be called.
11555 Shapes should be listed in order of decreasing length. */
11557 static enum neon_shape
11558 neon_select_shape (enum neon_shape shape
, ...)
11561 enum neon_shape first_shape
= shape
;
11563 /* Fix missing optional operands. FIXME: we don't know at this point how
11564 many arguments we should have, so this makes the assumption that we have
11565 > 1. This is true of all current Neon opcodes, I think, but may not be
11566 true in the future. */
11567 if (!inst
.operands
[1].present
)
11568 inst
.operands
[1] = inst
.operands
[0];
11570 va_start (ap
, shape
);
11572 for (; shape
!= NS_NULL
; shape
= va_arg (ap
, int))
11577 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
11579 if (!inst
.operands
[j
].present
)
11585 switch (neon_shape_tab
[shape
].el
[j
])
11588 if (!(inst
.operands
[j
].isreg
11589 && inst
.operands
[j
].isvec
11590 && inst
.operands
[j
].issingle
11591 && !inst
.operands
[j
].isquad
))
11596 if (!(inst
.operands
[j
].isreg
11597 && inst
.operands
[j
].isvec
11598 && !inst
.operands
[j
].isquad
11599 && !inst
.operands
[j
].issingle
))
11604 if (!(inst
.operands
[j
].isreg
11605 && !inst
.operands
[j
].isvec
))
11610 if (!(inst
.operands
[j
].isreg
11611 && inst
.operands
[j
].isvec
11612 && inst
.operands
[j
].isquad
11613 && !inst
.operands
[j
].issingle
))
11618 if (!(!inst
.operands
[j
].isreg
11619 && !inst
.operands
[j
].isscalar
))
11624 if (!(!inst
.operands
[j
].isreg
11625 && inst
.operands
[j
].isscalar
))
11639 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
11640 first_error (_("invalid instruction shape"));
11645 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11646 means the Q bit should be set). */
11649 neon_quad (enum neon_shape shape
)
11651 return neon_shape_class
[shape
] == SC_QUAD
;
11655 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
11658 /* Allow modification to be made to types which are constrained to be
11659 based on the key element, based on bits set alongside N_EQK. */
11660 if ((typebits
& N_EQK
) != 0)
11662 if ((typebits
& N_HLF
) != 0)
11664 else if ((typebits
& N_DBL
) != 0)
11666 if ((typebits
& N_SGN
) != 0)
11667 *g_type
= NT_signed
;
11668 else if ((typebits
& N_UNS
) != 0)
11669 *g_type
= NT_unsigned
;
11670 else if ((typebits
& N_INT
) != 0)
11671 *g_type
= NT_integer
;
11672 else if ((typebits
& N_FLT
) != 0)
11673 *g_type
= NT_float
;
11674 else if ((typebits
& N_SIZ
) != 0)
11675 *g_type
= NT_untyped
;
11679 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11680 operand type, i.e. the single type specified in a Neon instruction when it
11681 is the only one given. */
11683 static struct neon_type_el
11684 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
11686 struct neon_type_el dest
= *key
;
11688 gas_assert ((thisarg
& N_EQK
) != 0);
11690 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
11695 /* Convert Neon type and size into compact bitmask representation. */
11697 static enum neon_type_mask
11698 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
11705 case 8: return N_8
;
11706 case 16: return N_16
;
11707 case 32: return N_32
;
11708 case 64: return N_64
;
11716 case 8: return N_I8
;
11717 case 16: return N_I16
;
11718 case 32: return N_I32
;
11719 case 64: return N_I64
;
11727 case 16: return N_F16
;
11728 case 32: return N_F32
;
11729 case 64: return N_F64
;
11737 case 8: return N_P8
;
11738 case 16: return N_P16
;
11746 case 8: return N_S8
;
11747 case 16: return N_S16
;
11748 case 32: return N_S32
;
11749 case 64: return N_S64
;
11757 case 8: return N_U8
;
11758 case 16: return N_U16
;
11759 case 32: return N_U32
;
11760 case 64: return N_U64
;
11771 /* Convert compact Neon bitmask type representation to a type and size. Only
11772 handles the case where a single bit is set in the mask. */
11775 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
11776 enum neon_type_mask mask
)
11778 if ((mask
& N_EQK
) != 0)
11781 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
11783 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
11785 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
11787 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
11792 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
11794 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
11795 *type
= NT_unsigned
;
11796 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
11797 *type
= NT_integer
;
11798 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
11799 *type
= NT_untyped
;
11800 else if ((mask
& (N_P8
| N_P16
)) != 0)
11802 else if ((mask
& (N_F32
| N_F64
)) != 0)
11810 /* Modify a bitmask of allowed types. This is only needed for type
11814 modify_types_allowed (unsigned allowed
, unsigned mods
)
11817 enum neon_el_type type
;
11823 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
11825 if (el_type_of_type_chk (&type
, &size
, allowed
& i
) == SUCCESS
)
11827 neon_modify_type_size (mods
, &type
, &size
);
11828 destmask
|= type_chk_of_el_type (type
, size
);
11835 /* Check type and return type classification.
11836 The manual states (paraphrase): If one datatype is given, it indicates the
11838 - the second operand, if there is one
11839 - the operand, if there is no second operand
11840 - the result, if there are no operands.
11841 This isn't quite good enough though, so we use a concept of a "key" datatype
11842 which is set on a per-instruction basis, which is the one which matters when
11843 only one data type is written.
11844 Note: this function has side-effects (e.g. filling in missing operands). All
11845 Neon instructions should call it before performing bit encoding. */
11847 static struct neon_type_el
11848 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
11851 unsigned i
, pass
, key_el
= 0;
11852 unsigned types
[NEON_MAX_TYPE_ELS
];
11853 enum neon_el_type k_type
= NT_invtype
;
11854 unsigned k_size
= -1u;
11855 struct neon_type_el badtype
= {NT_invtype
, -1};
11856 unsigned key_allowed
= 0;
11858 /* Optional registers in Neon instructions are always (not) in operand 1.
11859 Fill in the missing operand here, if it was omitted. */
11860 if (els
> 1 && !inst
.operands
[1].present
)
11861 inst
.operands
[1] = inst
.operands
[0];
11863 /* Suck up all the varargs. */
11865 for (i
= 0; i
< els
; i
++)
11867 unsigned thisarg
= va_arg (ap
, unsigned);
11868 if (thisarg
== N_IGNORE_TYPE
)
11873 types
[i
] = thisarg
;
11874 if ((thisarg
& N_KEY
) != 0)
11879 if (inst
.vectype
.elems
> 0)
11880 for (i
= 0; i
< els
; i
++)
11881 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
11883 first_error (_("types specified in both the mnemonic and operands"));
11887 /* Duplicate inst.vectype elements here as necessary.
11888 FIXME: No idea if this is exactly the same as the ARM assembler,
11889 particularly when an insn takes one register and one non-register
11891 if (inst
.vectype
.elems
== 1 && els
> 1)
11894 inst
.vectype
.elems
= els
;
11895 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
11896 for (j
= 0; j
< els
; j
++)
11898 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11901 else if (inst
.vectype
.elems
== 0 && els
> 0)
11904 /* No types were given after the mnemonic, so look for types specified
11905 after each operand. We allow some flexibility here; as long as the
11906 "key" operand has a type, we can infer the others. */
11907 for (j
= 0; j
< els
; j
++)
11908 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
11909 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
11911 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
11913 for (j
= 0; j
< els
; j
++)
11914 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
11915 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
11920 first_error (_("operand types can't be inferred"));
11924 else if (inst
.vectype
.elems
!= els
)
11926 first_error (_("type specifier has the wrong number of parts"));
11930 for (pass
= 0; pass
< 2; pass
++)
11932 for (i
= 0; i
< els
; i
++)
11934 unsigned thisarg
= types
[i
];
11935 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
11936 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
11937 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
11938 unsigned g_size
= inst
.vectype
.el
[i
].size
;
11940 /* Decay more-specific signed & unsigned types to sign-insensitive
11941 integer types if sign-specific variants are unavailable. */
11942 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
11943 && (types_allowed
& N_SU_ALL
) == 0)
11944 g_type
= NT_integer
;
11946 /* If only untyped args are allowed, decay any more specific types to
11947 them. Some instructions only care about signs for some element
11948 sizes, so handle that properly. */
11949 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
11950 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
11951 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
11952 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
11953 g_type
= NT_untyped
;
11957 if ((thisarg
& N_KEY
) != 0)
11961 key_allowed
= thisarg
& ~N_KEY
;
11966 if ((thisarg
& N_VFP
) != 0)
11968 enum neon_shape_el regshape
= neon_shape_tab
[ns
].el
[i
];
11969 unsigned regwidth
= neon_shape_el_size
[regshape
], match
;
11971 /* In VFP mode, operands must match register widths. If we
11972 have a key operand, use its width, else use the width of
11973 the current operand. */
11979 if (regwidth
!= match
)
11981 first_error (_("operand size must match register width"));
11986 if ((thisarg
& N_EQK
) == 0)
11988 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
11990 if ((given_type
& types_allowed
) == 0)
11992 first_error (_("bad type in Neon instruction"));
11998 enum neon_el_type mod_k_type
= k_type
;
11999 unsigned mod_k_size
= k_size
;
12000 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
12001 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
12003 first_error (_("inconsistent types in Neon instruction"));
12011 return inst
.vectype
.el
[key_el
];
12014 /* Neon-style VFP instruction forwarding. */
12016 /* Thumb VFP instructions have 0xE in the condition field. */
12019 do_vfp_cond_or_thumb (void)
12022 inst
.instruction
|= 0xe0000000;
12024 inst
.instruction
|= inst
.cond
<< 28;
12027 /* Look up and encode a simple mnemonic, for use as a helper function for the
12028 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12029 etc. It is assumed that operand parsing has already been done, and that the
12030 operands are in the form expected by the given opcode (this isn't necessarily
12031 the same as the form in which they were parsed, hence some massaging must
12032 take place before this function is called).
12033 Checks current arch version against that in the looked-up opcode. */
12036 do_vfp_nsyn_opcode (const char *opname
)
12038 const struct asm_opcode
*opcode
;
12040 opcode
= hash_find (arm_ops_hsh
, opname
);
12045 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
12046 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
12051 inst
.instruction
= opcode
->tvalue
;
12052 opcode
->tencode ();
12056 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
12057 opcode
->aencode ();
12062 do_vfp_nsyn_add_sub (enum neon_shape rs
)
12064 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
12069 do_vfp_nsyn_opcode ("fadds");
12071 do_vfp_nsyn_opcode ("fsubs");
12076 do_vfp_nsyn_opcode ("faddd");
12078 do_vfp_nsyn_opcode ("fsubd");
12082 /* Check operand types to see if this is a VFP instruction, and if so call
12086 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
12088 enum neon_shape rs
;
12089 struct neon_type_el et
;
12094 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12095 et
= neon_check_type (2, rs
,
12096 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12100 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12101 et
= neon_check_type (3, rs
,
12102 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12109 if (et
.type
!= NT_invtype
)
12121 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
12123 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
12128 do_vfp_nsyn_opcode ("fmacs");
12130 do_vfp_nsyn_opcode ("fmscs");
12135 do_vfp_nsyn_opcode ("fmacd");
12137 do_vfp_nsyn_opcode ("fmscd");
12142 do_vfp_nsyn_mul (enum neon_shape rs
)
12145 do_vfp_nsyn_opcode ("fmuls");
12147 do_vfp_nsyn_opcode ("fmuld");
12151 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
12153 int is_neg
= (inst
.instruction
& 0x80) != 0;
12154 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
12159 do_vfp_nsyn_opcode ("fnegs");
12161 do_vfp_nsyn_opcode ("fabss");
12166 do_vfp_nsyn_opcode ("fnegd");
12168 do_vfp_nsyn_opcode ("fabsd");
12172 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12173 insns belong to Neon, and are handled elsewhere. */
12176 do_vfp_nsyn_ldm_stm (int is_dbmode
)
12178 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
12182 do_vfp_nsyn_opcode ("fldmdbs");
12184 do_vfp_nsyn_opcode ("fldmias");
12189 do_vfp_nsyn_opcode ("fstmdbs");
12191 do_vfp_nsyn_opcode ("fstmias");
12196 do_vfp_nsyn_sqrt (void)
12198 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12199 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12202 do_vfp_nsyn_opcode ("fsqrts");
12204 do_vfp_nsyn_opcode ("fsqrtd");
12208 do_vfp_nsyn_div (void)
12210 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12211 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12212 N_F32
| N_F64
| N_KEY
| N_VFP
);
12215 do_vfp_nsyn_opcode ("fdivs");
12217 do_vfp_nsyn_opcode ("fdivd");
12221 do_vfp_nsyn_nmul (void)
12223 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12224 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12225 N_F32
| N_F64
| N_KEY
| N_VFP
);
12229 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
12230 do_vfp_sp_dyadic ();
12234 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
12235 do_vfp_dp_rd_rn_rm ();
12237 do_vfp_cond_or_thumb ();
12241 do_vfp_nsyn_cmp (void)
12243 if (inst
.operands
[1].isreg
)
12245 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12246 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12250 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
12251 do_vfp_sp_monadic ();
12255 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
12256 do_vfp_dp_rd_rm ();
12261 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
12262 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
12264 switch (inst
.instruction
& 0x0fffffff)
12267 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
12270 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
12278 inst
.instruction
= NEON_ENC_SINGLE (inst
.instruction
);
12279 do_vfp_sp_compare_z ();
12283 inst
.instruction
= NEON_ENC_DOUBLE (inst
.instruction
);
12287 do_vfp_cond_or_thumb ();
12291 nsyn_insert_sp (void)
12293 inst
.operands
[1] = inst
.operands
[0];
12294 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
12295 inst
.operands
[0].reg
= REG_SP
;
12296 inst
.operands
[0].isreg
= 1;
12297 inst
.operands
[0].writeback
= 1;
12298 inst
.operands
[0].present
= 1;
12302 do_vfp_nsyn_push (void)
12305 if (inst
.operands
[1].issingle
)
12306 do_vfp_nsyn_opcode ("fstmdbs");
12308 do_vfp_nsyn_opcode ("fstmdbd");
12312 do_vfp_nsyn_pop (void)
12315 if (inst
.operands
[1].issingle
)
12316 do_vfp_nsyn_opcode ("fldmias");
12318 do_vfp_nsyn_opcode ("fldmiad");
12321 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12322 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12325 neon_dp_fixup (unsigned i
)
12329 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12343 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12347 neon_logbits (unsigned x
)
12349 return ffs (x
) - 4;
12352 #define LOW4(R) ((R) & 0xf)
12353 #define HI1(R) (((R) >> 4) & 1)
12355 /* Encode insns with bit pattern:
12357 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12358 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12360 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12361 different meaning for some instruction. */
12364 neon_three_same (int isquad
, int ubit
, int size
)
12366 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12367 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12368 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12369 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12370 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12371 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12372 inst
.instruction
|= (isquad
!= 0) << 6;
12373 inst
.instruction
|= (ubit
!= 0) << 24;
12375 inst
.instruction
|= neon_logbits (size
) << 20;
12377 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12380 /* Encode instructions of the form:
12382 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12383 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12385 Don't write size if SIZE == -1. */
12388 neon_two_same (int qbit
, int ubit
, int size
)
12390 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12391 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12392 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12393 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12394 inst
.instruction
|= (qbit
!= 0) << 6;
12395 inst
.instruction
|= (ubit
!= 0) << 24;
12398 inst
.instruction
|= neon_logbits (size
) << 18;
12400 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12403 /* Neon instruction encoders, in approximate order of appearance. */
12406 do_neon_dyadic_i_su (void)
12408 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12409 struct neon_type_el et
= neon_check_type (3, rs
,
12410 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
12411 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12415 do_neon_dyadic_i64_su (void)
12417 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12418 struct neon_type_el et
= neon_check_type (3, rs
,
12419 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12420 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12424 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
12427 unsigned size
= et
.size
>> 3;
12428 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12429 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12430 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12431 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12432 inst
.instruction
|= (isquad
!= 0) << 6;
12433 inst
.instruction
|= immbits
<< 16;
12434 inst
.instruction
|= (size
>> 3) << 7;
12435 inst
.instruction
|= (size
& 0x7) << 19;
12437 inst
.instruction
|= (uval
!= 0) << 24;
12439 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12443 do_neon_shl_imm (void)
12445 if (!inst
.operands
[2].isreg
)
12447 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12448 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
12449 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12450 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
12454 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12455 struct neon_type_el et
= neon_check_type (3, rs
,
12456 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12459 /* VSHL/VQSHL 3-register variants have syntax such as:
12461 whereas other 3-register operations encoded by neon_three_same have
12464 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12466 tmp
= inst
.operands
[2].reg
;
12467 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12468 inst
.operands
[1].reg
= tmp
;
12469 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12470 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12475 do_neon_qshl_imm (void)
12477 if (!inst
.operands
[2].isreg
)
12479 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12480 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
12482 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12483 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
12484 inst
.operands
[2].imm
);
12488 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12489 struct neon_type_el et
= neon_check_type (3, rs
,
12490 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12493 /* See note in do_neon_shl_imm. */
12494 tmp
= inst
.operands
[2].reg
;
12495 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12496 inst
.operands
[1].reg
= tmp
;
12497 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12498 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12503 do_neon_rshl (void)
12505 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12506 struct neon_type_el et
= neon_check_type (3, rs
,
12507 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12510 tmp
= inst
.operands
[2].reg
;
12511 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12512 inst
.operands
[1].reg
= tmp
;
12513 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12517 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
12519 /* Handle .I8 pseudo-instructions. */
12522 /* Unfortunately, this will make everything apart from zero out-of-range.
12523 FIXME is this the intended semantics? There doesn't seem much point in
12524 accepting .I8 if so. */
12525 immediate
|= immediate
<< 8;
12531 if (immediate
== (immediate
& 0x000000ff))
12533 *immbits
= immediate
;
12536 else if (immediate
== (immediate
& 0x0000ff00))
12538 *immbits
= immediate
>> 8;
12541 else if (immediate
== (immediate
& 0x00ff0000))
12543 *immbits
= immediate
>> 16;
12546 else if (immediate
== (immediate
& 0xff000000))
12548 *immbits
= immediate
>> 24;
12551 if ((immediate
& 0xffff) != (immediate
>> 16))
12552 goto bad_immediate
;
12553 immediate
&= 0xffff;
12556 if (immediate
== (immediate
& 0x000000ff))
12558 *immbits
= immediate
;
12561 else if (immediate
== (immediate
& 0x0000ff00))
12563 *immbits
= immediate
>> 8;
12568 first_error (_("immediate value out of range"));
12572 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12576 neon_bits_same_in_bytes (unsigned imm
)
12578 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
12579 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
12580 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
12581 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
12584 /* For immediate of above form, return 0bABCD. */
12587 neon_squash_bits (unsigned imm
)
12589 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
12590 | ((imm
& 0x01000000) >> 21);
12593 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12596 neon_qfloat_bits (unsigned imm
)
12598 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
12601 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12602 the instruction. *OP is passed as the initial value of the op field, and
12603 may be set to a different value depending on the constant (i.e.
12604 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12605 MVN). If the immediate looks like a repeated pattern then also
12606 try smaller element sizes. */
12609 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
12610 unsigned *immbits
, int *op
, int size
,
12611 enum neon_el_type type
)
12613 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12615 if (type
== NT_float
&& !float_p
)
12618 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
12620 if (size
!= 32 || *op
== 1)
12622 *immbits
= neon_qfloat_bits (immlo
);
12628 if (neon_bits_same_in_bytes (immhi
)
12629 && neon_bits_same_in_bytes (immlo
))
12633 *immbits
= (neon_squash_bits (immhi
) << 4)
12634 | neon_squash_bits (immlo
);
12639 if (immhi
!= immlo
)
12645 if (immlo
== (immlo
& 0x000000ff))
12650 else if (immlo
== (immlo
& 0x0000ff00))
12652 *immbits
= immlo
>> 8;
12655 else if (immlo
== (immlo
& 0x00ff0000))
12657 *immbits
= immlo
>> 16;
12660 else if (immlo
== (immlo
& 0xff000000))
12662 *immbits
= immlo
>> 24;
12665 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
12667 *immbits
= (immlo
>> 8) & 0xff;
12670 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
12672 *immbits
= (immlo
>> 16) & 0xff;
12676 if ((immlo
& 0xffff) != (immlo
>> 16))
12683 if (immlo
== (immlo
& 0x000000ff))
12688 else if (immlo
== (immlo
& 0x0000ff00))
12690 *immbits
= immlo
>> 8;
12694 if ((immlo
& 0xff) != (immlo
>> 8))
12699 if (immlo
== (immlo
& 0x000000ff))
12701 /* Don't allow MVN with 8-bit immediate. */
12711 /* Write immediate bits [7:0] to the following locations:
12713 |28/24|23 19|18 16|15 4|3 0|
12714 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12716 This function is used by VMOV/VMVN/VORR/VBIC. */
12719 neon_write_immbits (unsigned immbits
)
12721 inst
.instruction
|= immbits
& 0xf;
12722 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
12723 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
12726 /* Invert low-order SIZE bits of XHI:XLO. */
12729 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
12731 unsigned immlo
= xlo
? *xlo
: 0;
12732 unsigned immhi
= xhi
? *xhi
: 0;
12737 immlo
= (~immlo
) & 0xff;
12741 immlo
= (~immlo
) & 0xffff;
12745 immhi
= (~immhi
) & 0xffffffff;
12746 /* fall through. */
12749 immlo
= (~immlo
) & 0xffffffff;
12764 do_neon_logic (void)
12766 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
12768 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12769 neon_check_type (3, rs
, N_IGNORE_TYPE
);
12770 /* U bit and size field were set as part of the bitmask. */
12771 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12772 neon_three_same (neon_quad (rs
), 0, -1);
12776 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
12777 struct neon_type_el et
= neon_check_type (2, rs
,
12778 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
12779 enum neon_opc opcode
= inst
.instruction
& 0x0fffffff;
12783 if (et
.type
== NT_invtype
)
12786 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12788 immbits
= inst
.operands
[1].imm
;
12791 /* .i64 is a pseudo-op, so the immediate must be a repeating
12793 if (immbits
!= (inst
.operands
[1].regisimm
?
12794 inst
.operands
[1].reg
: 0))
12796 /* Set immbits to an invalid constant. */
12797 immbits
= 0xdeadbeef;
12804 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12808 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12812 /* Pseudo-instruction for VBIC. */
12813 neon_invert_size (&immbits
, 0, et
.size
);
12814 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12818 /* Pseudo-instruction for VORR. */
12819 neon_invert_size (&immbits
, 0, et
.size
);
12820 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
12830 inst
.instruction
|= neon_quad (rs
) << 6;
12831 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12832 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12833 inst
.instruction
|= cmode
<< 8;
12834 neon_write_immbits (immbits
);
12836 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
12841 do_neon_bitfield (void)
12843 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12844 neon_check_type (3, rs
, N_IGNORE_TYPE
);
12845 neon_three_same (neon_quad (rs
), 0, -1);
12849 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
12852 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12853 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
12855 if (et
.type
== NT_float
)
12857 inst
.instruction
= NEON_ENC_FLOAT (inst
.instruction
);
12858 neon_three_same (neon_quad (rs
), 0, -1);
12862 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
12863 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
12868 do_neon_dyadic_if_su (void)
12870 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12874 do_neon_dyadic_if_su_d (void)
12876 /* This version only allow D registers, but that constraint is enforced during
12877 operand parsing so we don't need to do anything extra here. */
12878 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
12882 do_neon_dyadic_if_i_d (void)
12884 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12885 affected if we specify unsigned args. */
12886 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
12889 enum vfp_or_neon_is_neon_bits
12892 NEON_CHECK_ARCH
= 2
12895 /* Call this function if an instruction which may have belonged to the VFP or
12896 Neon instruction sets, but turned out to be a Neon instruction (due to the
12897 operand types involved, etc.). We have to check and/or fix-up a couple of
12900 - Make sure the user hasn't attempted to make a Neon instruction
12902 - Alter the value in the condition code field if necessary.
12903 - Make sure that the arch supports Neon instructions.
12905 Which of these operations take place depends on bits from enum
12906 vfp_or_neon_is_neon_bits.
12908 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
12909 current instruction's condition is COND_ALWAYS, the condition field is
12910 changed to inst.uncond_value. This is necessary because instructions shared
12911 between VFP and Neon may be conditional for the VFP variants only, and the
12912 unconditional Neon version must have, e.g., 0xF in the condition field. */
12915 vfp_or_neon_is_neon (unsigned check
)
12917 /* Conditions are always legal in Thumb mode (IT blocks). */
12918 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
12920 if (inst
.cond
!= COND_ALWAYS
)
12922 first_error (_(BAD_COND
));
12925 if (inst
.uncond_value
!= -1)
12926 inst
.instruction
|= inst
.uncond_value
<< 28;
12929 if ((check
& NEON_CHECK_ARCH
)
12930 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
12932 first_error (_(BAD_FPU
));
12940 do_neon_addsub_if_i (void)
12942 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
12945 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
12948 /* The "untyped" case can't happen. Do this to stop the "U" bit being
12949 affected if we specify unsigned args. */
12950 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
12953 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
12955 V<op> A,B (A is operand 0, B is operand 2)
12960 so handle that case specially. */
12963 neon_exchange_operands (void)
12965 void *scratch
= alloca (sizeof (inst
.operands
[0]));
12966 if (inst
.operands
[1].present
)
12968 /* Swap operands[1] and operands[2]. */
12969 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
12970 inst
.operands
[1] = inst
.operands
[2];
12971 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
12975 inst
.operands
[1] = inst
.operands
[2];
12976 inst
.operands
[2] = inst
.operands
[0];
12981 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
12983 if (inst
.operands
[2].isreg
)
12986 neon_exchange_operands ();
12987 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
12991 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12992 struct neon_type_el et
= neon_check_type (2, rs
,
12993 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
12995 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
12996 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12997 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12998 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12999 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13000 inst
.instruction
|= neon_quad (rs
) << 6;
13001 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13002 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13004 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13011 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
13015 do_neon_cmp_inv (void)
13017 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
13023 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
13026 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13027 scalars, which are encoded in 5 bits, M : Rm.
13028 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13029 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13033 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
13035 unsigned regno
= NEON_SCALAR_REG (scalar
);
13036 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
13041 if (regno
> 7 || elno
> 3)
13043 return regno
| (elno
<< 3);
13046 if (regno
> 15 || elno
> 1)
13048 return regno
| (elno
<< 4);
13052 first_error (_("scalar out of range for multiply instruction"));
13058 /* Encode multiply / multiply-accumulate scalar instructions. */
13061 neon_mul_mac (struct neon_type_el et
, int ubit
)
13065 /* Give a more helpful error message if we have an invalid type. */
13066 if (et
.type
== NT_invtype
)
13069 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
13070 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13071 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13072 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13073 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13074 inst
.instruction
|= LOW4 (scalar
);
13075 inst
.instruction
|= HI1 (scalar
) << 5;
13076 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13077 inst
.instruction
|= neon_logbits (et
.size
) << 20;
13078 inst
.instruction
|= (ubit
!= 0) << 24;
13080 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13084 do_neon_mac_maybe_scalar (void)
13086 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
13089 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13092 if (inst
.operands
[2].isscalar
)
13094 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13095 struct neon_type_el et
= neon_check_type (3, rs
,
13096 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
13097 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13098 neon_mul_mac (et
, neon_quad (rs
));
13102 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13103 affected if we specify unsigned args. */
13104 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13111 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13112 struct neon_type_el et
= neon_check_type (3, rs
,
13113 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13114 neon_three_same (neon_quad (rs
), 0, et
.size
);
13117 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13118 same types as the MAC equivalents. The polynomial type for this instruction
13119 is encoded the same as the integer type. */
13124 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
13127 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13130 if (inst
.operands
[2].isscalar
)
13131 do_neon_mac_maybe_scalar ();
13133 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
13137 do_neon_qdmulh (void)
13139 if (inst
.operands
[2].isscalar
)
13141 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13142 struct neon_type_el et
= neon_check_type (3, rs
,
13143 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13144 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13145 neon_mul_mac (et
, neon_quad (rs
));
13149 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13150 struct neon_type_el et
= neon_check_type (3, rs
,
13151 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13152 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13153 /* The U bit (rounding) comes from bit mask. */
13154 neon_three_same (neon_quad (rs
), 0, et
.size
);
13159 do_neon_fcmp_absolute (void)
13161 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13162 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13163 /* Size field comes from bit mask. */
13164 neon_three_same (neon_quad (rs
), 1, -1);
13168 do_neon_fcmp_absolute_inv (void)
13170 neon_exchange_operands ();
13171 do_neon_fcmp_absolute ();
13175 do_neon_step (void)
13177 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13178 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13179 neon_three_same (neon_quad (rs
), 0, -1);
13183 do_neon_abs_neg (void)
13185 enum neon_shape rs
;
13186 struct neon_type_el et
;
13188 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
13191 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13194 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13195 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
13197 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13198 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13199 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13200 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13201 inst
.instruction
|= neon_quad (rs
) << 6;
13202 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13203 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13205 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13211 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13212 struct neon_type_el et
= neon_check_type (2, rs
,
13213 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13214 int imm
= inst
.operands
[2].imm
;
13215 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13216 _("immediate out of range for insert"));
13217 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13223 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13224 struct neon_type_el et
= neon_check_type (2, rs
,
13225 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13226 int imm
= inst
.operands
[2].imm
;
13227 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13228 _("immediate out of range for insert"));
13229 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
13233 do_neon_qshlu_imm (void)
13235 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13236 struct neon_type_el et
= neon_check_type (2, rs
,
13237 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
13238 int imm
= inst
.operands
[2].imm
;
13239 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13240 _("immediate out of range for shift"));
13241 /* Only encodes the 'U present' variant of the instruction.
13242 In this case, signed types have OP (bit 8) set to 0.
13243 Unsigned types have OP set to 1. */
13244 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
13245 /* The rest of the bits are the same as other immediate shifts. */
13246 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13250 do_neon_qmovn (void)
13252 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13253 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13254 /* Saturating move where operands can be signed or unsigned, and the
13255 destination has the same signedness. */
13256 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13257 if (et
.type
== NT_unsigned
)
13258 inst
.instruction
|= 0xc0;
13260 inst
.instruction
|= 0x80;
13261 neon_two_same (0, 1, et
.size
/ 2);
13265 do_neon_qmovun (void)
13267 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13268 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13269 /* Saturating move with unsigned results. Operands must be signed. */
13270 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13271 neon_two_same (0, 1, et
.size
/ 2);
13275 do_neon_rshift_sat_narrow (void)
13277 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13278 or unsigned. If operands are unsigned, results must also be unsigned. */
13279 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13280 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13281 int imm
= inst
.operands
[2].imm
;
13282 /* This gets the bounds check, size encoding and immediate bits calculation
13286 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13287 VQMOVN.I<size> <Dd>, <Qm>. */
13290 inst
.operands
[2].present
= 0;
13291 inst
.instruction
= N_MNEM_vqmovn
;
13296 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13297 _("immediate out of range"));
13298 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
13302 do_neon_rshift_sat_narrow_u (void)
13304 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13305 or unsigned. If operands are unsigned, results must also be unsigned. */
13306 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13307 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13308 int imm
= inst
.operands
[2].imm
;
13309 /* This gets the bounds check, size encoding and immediate bits calculation
13313 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13314 VQMOVUN.I<size> <Dd>, <Qm>. */
13317 inst
.operands
[2].present
= 0;
13318 inst
.instruction
= N_MNEM_vqmovun
;
13323 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13324 _("immediate out of range"));
13325 /* FIXME: The manual is kind of unclear about what value U should have in
13326 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13328 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
13332 do_neon_movn (void)
13334 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13335 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13336 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13337 neon_two_same (0, 1, et
.size
/ 2);
13341 do_neon_rshift_narrow (void)
13343 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13344 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13345 int imm
= inst
.operands
[2].imm
;
13346 /* This gets the bounds check, size encoding and immediate bits calculation
13350 /* If immediate is zero then we are a pseudo-instruction for
13351 VMOVN.I<size> <Dd>, <Qm> */
13354 inst
.operands
[2].present
= 0;
13355 inst
.instruction
= N_MNEM_vmovn
;
13360 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13361 _("immediate out of range for narrowing operation"));
13362 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
13366 do_neon_shll (void)
13368 /* FIXME: Type checking when lengthening. */
13369 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
13370 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
13371 unsigned imm
= inst
.operands
[2].imm
;
13373 if (imm
== et
.size
)
13375 /* Maximum shift variant. */
13376 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13377 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13378 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13379 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13380 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13381 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13383 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13387 /* A more-specific type check for non-max versions. */
13388 et
= neon_check_type (2, NS_QDI
,
13389 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13390 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13391 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
13395 /* Check the various types for the VCVT instruction, and return which version
13396 the current instruction is. */
13399 neon_cvt_flavour (enum neon_shape rs
)
13401 #define CVT_VAR(C,X,Y) \
13402 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13403 if (et.type != NT_invtype) \
13405 inst.error = NULL; \
13408 struct neon_type_el et
;
13409 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
13410 || rs
== NS_FF
) ? N_VFP
: 0;
13411 /* The instruction versions which take an immediate take one register
13412 argument, which is extended to the width of the full register. Thus the
13413 "source" and "destination" registers must have the same width. Hack that
13414 here by making the size equal to the key (wider, in this case) operand. */
13415 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
13417 CVT_VAR (0, N_S32
, N_F32
);
13418 CVT_VAR (1, N_U32
, N_F32
);
13419 CVT_VAR (2, N_F32
, N_S32
);
13420 CVT_VAR (3, N_F32
, N_U32
);
13421 /* Half-precision conversions. */
13422 CVT_VAR (4, N_F32
, N_F16
);
13423 CVT_VAR (5, N_F16
, N_F32
);
13427 /* VFP instructions. */
13428 CVT_VAR (6, N_F32
, N_F64
);
13429 CVT_VAR (7, N_F64
, N_F32
);
13430 CVT_VAR (8, N_S32
, N_F64
| key
);
13431 CVT_VAR (9, N_U32
, N_F64
| key
);
13432 CVT_VAR (10, N_F64
| key
, N_S32
);
13433 CVT_VAR (11, N_F64
| key
, N_U32
);
13434 /* VFP instructions with bitshift. */
13435 CVT_VAR (12, N_F32
| key
, N_S16
);
13436 CVT_VAR (13, N_F32
| key
, N_U16
);
13437 CVT_VAR (14, N_F64
| key
, N_S16
);
13438 CVT_VAR (15, N_F64
| key
, N_U16
);
13439 CVT_VAR (16, N_S16
, N_F32
| key
);
13440 CVT_VAR (17, N_U16
, N_F32
| key
);
13441 CVT_VAR (18, N_S16
, N_F64
| key
);
13442 CVT_VAR (19, N_U16
, N_F64
| key
);
13448 /* Neon-syntax VFP conversions. */
13451 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
13453 const char *opname
= 0;
13455 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
13457 /* Conversions with immediate bitshift. */
13458 const char *enc
[] =
13482 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13484 opname
= enc
[flavour
];
13485 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13486 _("operands 0 and 1 must be the same register"));
13487 inst
.operands
[1] = inst
.operands
[2];
13488 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
13493 /* Conversions without bitshift. */
13494 const char *enc
[] =
13510 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13511 opname
= enc
[flavour
];
13515 do_vfp_nsyn_opcode (opname
);
13519 do_vfp_nsyn_cvtz (void)
13521 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
13522 int flavour
= neon_cvt_flavour (rs
);
13523 const char *enc
[] =
13537 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
13538 do_vfp_nsyn_opcode (enc
[flavour
]);
13544 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
13545 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
13546 int flavour
= neon_cvt_flavour (rs
);
13548 /* VFP rather than Neon conversions. */
13551 do_vfp_nsyn_cvt (rs
, flavour
);
13561 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13563 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13566 /* Fixed-point conversion with #0 immediate is encoded as an
13567 integer conversion. */
13568 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
13570 immbits
= 32 - inst
.operands
[2].imm
;
13571 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13573 inst
.instruction
|= enctab
[flavour
];
13574 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13575 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13576 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13577 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13578 inst
.instruction
|= neon_quad (rs
) << 6;
13579 inst
.instruction
|= 1 << 21;
13580 inst
.instruction
|= immbits
<< 16;
13582 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13590 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
13592 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13594 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13598 inst
.instruction
|= enctab
[flavour
];
13600 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13601 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13602 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13603 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13604 inst
.instruction
|= neon_quad (rs
) << 6;
13605 inst
.instruction
|= 2 << 18;
13607 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13611 /* Half-precision conversions for Advanced SIMD -- neon. */
13616 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
13618 as_bad (_("operand size must match register width"));
13623 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
13625 as_bad (_("operand size must match register width"));
13630 inst
.instruction
= 0x3b60600;
13632 inst
.instruction
= 0x3b60700;
13634 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13635 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13636 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13637 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13638 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13642 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13643 do_vfp_nsyn_cvt (rs
, flavour
);
13648 do_neon_cvtb (void)
13650 inst
.instruction
= 0xeb20a40;
13652 /* The sizes are attached to the mnemonic. */
13653 if (inst
.vectype
.el
[0].type
!= NT_invtype
13654 && inst
.vectype
.el
[0].size
== 16)
13655 inst
.instruction
|= 0x00010000;
13657 /* Programmer's syntax: the sizes are attached to the operands. */
13658 else if (inst
.operands
[0].vectype
.type
!= NT_invtype
13659 && inst
.operands
[0].vectype
.size
== 16)
13660 inst
.instruction
|= 0x00010000;
13662 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
13663 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
13664 do_vfp_cond_or_thumb ();
13669 do_neon_cvtt (void)
13672 inst
.instruction
|= 0x80;
13676 neon_move_immediate (void)
13678 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
13679 struct neon_type_el et
= neon_check_type (2, rs
,
13680 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
13681 unsigned immlo
, immhi
= 0, immbits
;
13682 int op
, cmode
, float_p
;
13684 constraint (et
.type
== NT_invtype
,
13685 _("operand size must be specified for immediate VMOV"));
13687 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13688 op
= (inst
.instruction
& (1 << 5)) != 0;
13690 immlo
= inst
.operands
[1].imm
;
13691 if (inst
.operands
[1].regisimm
)
13692 immhi
= inst
.operands
[1].reg
;
13694 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
13695 _("immediate has bits set outside the operand size"));
13697 float_p
= inst
.operands
[1].immisfloat
;
13699 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
13700 et
.size
, et
.type
)) == FAIL
)
13702 /* Invert relevant bits only. */
13703 neon_invert_size (&immlo
, &immhi
, et
.size
);
13704 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13705 with one or the other; those cases are caught by
13706 neon_cmode_for_move_imm. */
13708 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
13709 &op
, et
.size
, et
.type
)) == FAIL
)
13711 first_error (_("immediate out of range"));
13716 inst
.instruction
&= ~(1 << 5);
13717 inst
.instruction
|= op
<< 5;
13719 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13720 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13721 inst
.instruction
|= neon_quad (rs
) << 6;
13722 inst
.instruction
|= cmode
<< 8;
13724 neon_write_immbits (immbits
);
13730 if (inst
.operands
[1].isreg
)
13732 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13734 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13735 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13736 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13737 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13738 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13739 inst
.instruction
|= neon_quad (rs
) << 6;
13743 inst
.instruction
= NEON_ENC_IMMED (inst
.instruction
);
13744 neon_move_immediate ();
13747 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13750 /* Encode instructions of form:
13752 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13753 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
13756 neon_mixed_length (struct neon_type_el et
, unsigned size
)
13758 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13759 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13760 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13761 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13762 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13763 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13764 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
13765 inst
.instruction
|= neon_logbits (size
) << 20;
13767 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13771 do_neon_dyadic_long (void)
13773 /* FIXME: Type checking for lengthening op. */
13774 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13775 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
13776 neon_mixed_length (et
, et
.size
);
13780 do_neon_abal (void)
13782 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13783 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
13784 neon_mixed_length (et
, et
.size
);
13788 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
13790 if (inst
.operands
[2].isscalar
)
13792 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
13793 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
13794 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13795 neon_mul_mac (et
, et
.type
== NT_unsigned
);
13799 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13800 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
13801 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13802 neon_mixed_length (et
, et
.size
);
13807 do_neon_mac_maybe_scalar_long (void)
13809 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
13813 do_neon_dyadic_wide (void)
13815 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
13816 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13817 neon_mixed_length (et
, et
.size
);
13821 do_neon_dyadic_narrow (void)
13823 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13824 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
13825 /* Operand sign is unimportant, and the U bit is part of the opcode,
13826 so force the operand type to integer. */
13827 et
.type
= NT_integer
;
13828 neon_mixed_length (et
, et
.size
/ 2);
13832 do_neon_mul_sat_scalar_long (void)
13834 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
13838 do_neon_vmull (void)
13840 if (inst
.operands
[2].isscalar
)
13841 do_neon_mac_maybe_scalar_long ();
13844 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
13845 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
13846 if (et
.type
== NT_poly
)
13847 inst
.instruction
= NEON_ENC_POLY (inst
.instruction
);
13849 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
13850 /* For polynomial encoding, size field must be 0b00 and the U bit must be
13851 zero. Should be OK as-is. */
13852 neon_mixed_length (et
, et
.size
);
13859 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
13860 struct neon_type_el et
= neon_check_type (3, rs
,
13861 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13862 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
13864 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
13865 _("shift out of range"));
13866 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13867 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13868 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13869 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13870 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
13871 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
13872 inst
.instruction
|= neon_quad (rs
) << 6;
13873 inst
.instruction
|= imm
<< 8;
13875 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13881 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13882 struct neon_type_el et
= neon_check_type (2, rs
,
13883 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13884 unsigned op
= (inst
.instruction
>> 7) & 3;
13885 /* N (width of reversed regions) is encoded as part of the bitmask. We
13886 extract it here to check the elements to be reversed are smaller.
13887 Otherwise we'd get a reserved instruction. */
13888 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
13889 gas_assert (elsize
!= 0);
13890 constraint (et
.size
>= elsize
,
13891 _("elements must be smaller than reversal region"));
13892 neon_two_same (neon_quad (rs
), 1, et
.size
);
13898 if (inst
.operands
[1].isscalar
)
13900 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
13901 struct neon_type_el et
= neon_check_type (2, rs
,
13902 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13903 unsigned sizebits
= et
.size
>> 3;
13904 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
13905 int logsize
= neon_logbits (et
.size
);
13906 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
13908 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
13911 inst
.instruction
= NEON_ENC_SCALAR (inst
.instruction
);
13912 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13913 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13914 inst
.instruction
|= LOW4 (dm
);
13915 inst
.instruction
|= HI1 (dm
) << 5;
13916 inst
.instruction
|= neon_quad (rs
) << 6;
13917 inst
.instruction
|= x
<< 17;
13918 inst
.instruction
|= sizebits
<< 16;
13920 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
13924 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
13925 struct neon_type_el et
= neon_check_type (2, rs
,
13926 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
13927 /* Duplicate ARM register to lanes of vector. */
13928 inst
.instruction
= NEON_ENC_ARMREG (inst
.instruction
);
13931 case 8: inst
.instruction
|= 0x400000; break;
13932 case 16: inst
.instruction
|= 0x000020; break;
13933 case 32: inst
.instruction
|= 0x000000; break;
13936 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
13937 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
13938 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
13939 inst
.instruction
|= neon_quad (rs
) << 21;
13940 /* The encoding for this instruction is identical for the ARM and Thumb
13941 variants, except for the condition field. */
13942 do_vfp_cond_or_thumb ();
13946 /* VMOV has particularly many variations. It can be one of:
13947 0. VMOV<c><q> <Qd>, <Qm>
13948 1. VMOV<c><q> <Dd>, <Dm>
13949 (Register operations, which are VORR with Rm = Rn.)
13950 2. VMOV<c><q>.<dt> <Qd>, #<imm>
13951 3. VMOV<c><q>.<dt> <Dd>, #<imm>
13953 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
13954 (ARM register to scalar.)
13955 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
13956 (Two ARM registers to vector.)
13957 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
13958 (Scalar to ARM register.)
13959 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
13960 (Vector to two ARM registers.)
13961 8. VMOV.F32 <Sd>, <Sm>
13962 9. VMOV.F64 <Dd>, <Dm>
13963 (VFP register moves.)
13964 10. VMOV.F32 <Sd>, #imm
13965 11. VMOV.F64 <Dd>, #imm
13966 (VFP float immediate load.)
13967 12. VMOV <Rd>, <Sm>
13968 (VFP single to ARM reg.)
13969 13. VMOV <Sd>, <Rm>
13970 (ARM reg to VFP single.)
13971 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
13972 (Two ARM regs to two VFP singles.)
13973 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
13974 (Two VFP singles to two ARM regs.)
13976 These cases can be disambiguated using neon_select_shape, except cases 1/9
13977 and 3/11 which depend on the operand type too.
13979 All the encoded bits are hardcoded by this function.
13981 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
13982 Cases 5, 7 may be used with VFPv2 and above.
13984 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
13985 can specify a type where it doesn't make sense to, and is ignored). */
13990 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
13991 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
13993 struct neon_type_el et
;
13994 const char *ldconst
= 0;
13998 case NS_DD
: /* case 1/9. */
13999 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14000 /* It is not an error here if no type is given. */
14002 if (et
.type
== NT_float
&& et
.size
== 64)
14004 do_vfp_nsyn_opcode ("fcpyd");
14007 /* fall through. */
14009 case NS_QQ
: /* case 0/1. */
14011 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14013 /* The architecture manual I have doesn't explicitly state which
14014 value the U bit should have for register->register moves, but
14015 the equivalent VORR instruction has U = 0, so do that. */
14016 inst
.instruction
= 0x0200110;
14017 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14018 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14019 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14020 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14021 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14022 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14023 inst
.instruction
|= neon_quad (rs
) << 6;
14025 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
14029 case NS_DI
: /* case 3/11. */
14030 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14032 if (et
.type
== NT_float
&& et
.size
== 64)
14034 /* case 11 (fconstd). */
14035 ldconst
= "fconstd";
14036 goto encode_fconstd
;
14038 /* fall through. */
14040 case NS_QI
: /* case 2/3. */
14041 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14043 inst
.instruction
= 0x0800010;
14044 neon_move_immediate ();
14045 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
14048 case NS_SR
: /* case 4. */
14050 unsigned bcdebits
= 0;
14051 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
14052 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
14053 int logsize
= neon_logbits (et
.size
);
14054 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
14055 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
14057 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14059 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14060 && et
.size
!= 32, _(BAD_FPU
));
14061 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14062 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14066 case 8: bcdebits
= 0x8; break;
14067 case 16: bcdebits
= 0x1; break;
14068 case 32: bcdebits
= 0x0; break;
14072 bcdebits
|= x
<< logsize
;
14074 inst
.instruction
= 0xe000b10;
14075 do_vfp_cond_or_thumb ();
14076 inst
.instruction
|= LOW4 (dn
) << 16;
14077 inst
.instruction
|= HI1 (dn
) << 7;
14078 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14079 inst
.instruction
|= (bcdebits
& 3) << 5;
14080 inst
.instruction
|= (bcdebits
>> 2) << 21;
14084 case NS_DRR
: /* case 5 (fmdrr). */
14085 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14088 inst
.instruction
= 0xc400b10;
14089 do_vfp_cond_or_thumb ();
14090 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
14091 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
14092 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14093 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
14096 case NS_RS
: /* case 6. */
14098 struct neon_type_el et
= neon_check_type (2, NS_NULL
,
14099 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
14100 unsigned logsize
= neon_logbits (et
.size
);
14101 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
14102 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
14103 unsigned abcdebits
= 0;
14105 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14107 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14108 && et
.size
!= 32, _(BAD_FPU
));
14109 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14110 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14114 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
14115 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
14116 case 32: abcdebits
= 0x00; break;
14120 abcdebits
|= x
<< logsize
;
14121 inst
.instruction
= 0xe100b10;
14122 do_vfp_cond_or_thumb ();
14123 inst
.instruction
|= LOW4 (dn
) << 16;
14124 inst
.instruction
|= HI1 (dn
) << 7;
14125 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14126 inst
.instruction
|= (abcdebits
& 3) << 5;
14127 inst
.instruction
|= (abcdebits
>> 2) << 21;
14131 case NS_RRD
: /* case 7 (fmrrd). */
14132 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14135 inst
.instruction
= 0xc500b10;
14136 do_vfp_cond_or_thumb ();
14137 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14138 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14139 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14140 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14143 case NS_FF
: /* case 8 (fcpys). */
14144 do_vfp_nsyn_opcode ("fcpys");
14147 case NS_FI
: /* case 10 (fconsts). */
14148 ldconst
= "fconsts";
14150 if (is_quarter_float (inst
.operands
[1].imm
))
14152 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
14153 do_vfp_nsyn_opcode (ldconst
);
14156 first_error (_("immediate out of range"));
14159 case NS_RF
: /* case 12 (fmrs). */
14160 do_vfp_nsyn_opcode ("fmrs");
14163 case NS_FR
: /* case 13 (fmsr). */
14164 do_vfp_nsyn_opcode ("fmsr");
14167 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14168 (one of which is a list), but we have parsed four. Do some fiddling to
14169 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14171 case NS_RRFF
: /* case 14 (fmrrs). */
14172 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
14173 _("VFP registers must be adjacent"));
14174 inst
.operands
[2].imm
= 2;
14175 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14176 do_vfp_nsyn_opcode ("fmrrs");
14179 case NS_FFRR
: /* case 15 (fmsrr). */
14180 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
14181 _("VFP registers must be adjacent"));
14182 inst
.operands
[1] = inst
.operands
[2];
14183 inst
.operands
[2] = inst
.operands
[3];
14184 inst
.operands
[0].imm
= 2;
14185 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14186 do_vfp_nsyn_opcode ("fmsrr");
14195 do_neon_rshift_round_imm (void)
14197 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14198 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14199 int imm
= inst
.operands
[2].imm
;
14201 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14204 inst
.operands
[2].present
= 0;
14209 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14210 _("immediate out of range for shift"));
14211 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
14216 do_neon_movl (void)
14218 struct neon_type_el et
= neon_check_type (2, NS_QD
,
14219 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14220 unsigned sizebits
= et
.size
>> 3;
14221 inst
.instruction
|= sizebits
<< 19;
14222 neon_two_same (0, et
.type
== NT_unsigned
, -1);
14228 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14229 struct neon_type_el et
= neon_check_type (2, rs
,
14230 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14231 inst
.instruction
= NEON_ENC_INTEGER (inst
.instruction
);
14232 neon_two_same (neon_quad (rs
), 1, et
.size
);
14236 do_neon_zip_uzp (void)
14238 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14239 struct neon_type_el et
= neon_check_type (2, rs
,
14240 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14241 if (rs
== NS_DD
&& et
.size
== 32)
14243 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14244 inst
.instruction
= N_MNEM_vtrn
;
14248 neon_two_same (neon_quad (rs
), 1, et
.size
);
14252 do_neon_sat_abs_neg (void)
14254 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14255 struct neon_type_el et
= neon_check_type (2, rs
,
14256 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14257 neon_two_same (neon_quad (rs
), 1, et
.size
);
14261 do_neon_pair_long (void)
14263 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14264 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
14265 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14266 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
14267 neon_two_same (neon_quad (rs
), 1, et
.size
);
14271 do_neon_recip_est (void)
14273 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14274 struct neon_type_el et
= neon_check_type (2, rs
,
14275 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
14276 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14277 neon_two_same (neon_quad (rs
), 1, et
.size
);
14283 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14284 struct neon_type_el et
= neon_check_type (2, rs
,
14285 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14286 neon_two_same (neon_quad (rs
), 1, et
.size
);
14292 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14293 struct neon_type_el et
= neon_check_type (2, rs
,
14294 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
14295 neon_two_same (neon_quad (rs
), 1, et
.size
);
14301 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14302 struct neon_type_el et
= neon_check_type (2, rs
,
14303 N_EQK
| N_INT
, N_8
| N_KEY
);
14304 neon_two_same (neon_quad (rs
), 1, et
.size
);
14310 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14311 neon_two_same (neon_quad (rs
), 1, -1);
14315 do_neon_tbl_tbx (void)
14317 unsigned listlenbits
;
14318 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
14320 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
14322 first_error (_("bad list length for table lookup"));
14326 listlenbits
= inst
.operands
[1].imm
- 1;
14327 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14328 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14329 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14330 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14331 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14332 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14333 inst
.instruction
|= listlenbits
<< 8;
14335 inst
.instruction
= neon_dp_fixup (inst
.instruction
);
14339 do_neon_ldm_stm (void)
14341 /* P, U and L bits are part of bitmask. */
14342 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
14343 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
14345 if (inst
.operands
[1].issingle
)
14347 do_vfp_nsyn_ldm_stm (is_dbmode
);
14351 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
14352 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14354 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14355 _("register list must contain at least 1 and at most 16 "
14358 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
14359 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
14360 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
14361 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
14363 inst
.instruction
|= offsetbits
;
14365 do_vfp_cond_or_thumb ();
14369 do_neon_ldr_str (void)
14371 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
14373 if (inst
.operands
[0].issingle
)
14376 do_vfp_nsyn_opcode ("flds");
14378 do_vfp_nsyn_opcode ("fsts");
14383 do_vfp_nsyn_opcode ("fldd");
14385 do_vfp_nsyn_opcode ("fstd");
14389 /* "interleave" version also handles non-interleaving register VLD1/VST1
14393 do_neon_ld_st_interleave (void)
14395 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
14396 N_8
| N_16
| N_32
| N_64
);
14397 unsigned alignbits
= 0;
14399 /* The bits in this table go:
14400 0: register stride of one (0) or two (1)
14401 1,2: register list length, minus one (1, 2, 3, 4).
14402 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14403 We use -1 for invalid entries. */
14404 const int typetable
[] =
14406 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14407 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14408 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14409 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14413 if (et
.type
== NT_invtype
)
14416 if (inst
.operands
[1].immisalign
)
14417 switch (inst
.operands
[1].imm
>> 8)
14419 case 64: alignbits
= 1; break;
14421 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
14422 goto bad_alignment
;
14426 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) == 3)
14427 goto bad_alignment
;
14432 first_error (_("bad alignment"));
14436 inst
.instruction
|= alignbits
<< 4;
14437 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14439 /* Bits [4:6] of the immediate in a list specifier encode register stride
14440 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14441 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14442 up the right value for "type" in a table based on this value and the given
14443 list style, then stick it back. */
14444 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
14445 | (((inst
.instruction
>> 8) & 3) << 3);
14447 typebits
= typetable
[idx
];
14449 constraint (typebits
== -1, _("bad list type for instruction"));
14451 inst
.instruction
&= ~0xf00;
14452 inst
.instruction
|= typebits
<< 8;
14455 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14456 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14457 otherwise. The variable arguments are a list of pairs of legal (size, align)
14458 values, terminated with -1. */
14461 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
14464 int result
= FAIL
, thissize
, thisalign
;
14466 if (!inst
.operands
[1].immisalign
)
14472 va_start (ap
, do_align
);
14476 thissize
= va_arg (ap
, int);
14477 if (thissize
== -1)
14479 thisalign
= va_arg (ap
, int);
14481 if (size
== thissize
&& align
== thisalign
)
14484 while (result
!= SUCCESS
);
14488 if (result
== SUCCESS
)
14491 first_error (_("unsupported alignment for instruction"));
14497 do_neon_ld_st_lane (void)
14499 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14500 int align_good
, do_align
= 0;
14501 int logsize
= neon_logbits (et
.size
);
14502 int align
= inst
.operands
[1].imm
>> 8;
14503 int n
= (inst
.instruction
>> 8) & 3;
14504 int max_el
= 64 / et
.size
;
14506 if (et
.type
== NT_invtype
)
14509 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
14510 _("bad list length"));
14511 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
14512 _("scalar index out of range"));
14513 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
14515 _("stride of 2 unavailable when element size is 8"));
14519 case 0: /* VLD1 / VST1. */
14520 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
14522 if (align_good
== FAIL
)
14526 unsigned alignbits
= 0;
14529 case 16: alignbits
= 0x1; break;
14530 case 32: alignbits
= 0x3; break;
14533 inst
.instruction
|= alignbits
<< 4;
14537 case 1: /* VLD2 / VST2. */
14538 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
14540 if (align_good
== FAIL
)
14543 inst
.instruction
|= 1 << 4;
14546 case 2: /* VLD3 / VST3. */
14547 constraint (inst
.operands
[1].immisalign
,
14548 _("can't use alignment with this instruction"));
14551 case 3: /* VLD4 / VST4. */
14552 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14553 16, 64, 32, 64, 32, 128, -1);
14554 if (align_good
== FAIL
)
14558 unsigned alignbits
= 0;
14561 case 8: alignbits
= 0x1; break;
14562 case 16: alignbits
= 0x1; break;
14563 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
14566 inst
.instruction
|= alignbits
<< 4;
14573 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14574 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14575 inst
.instruction
|= 1 << (4 + logsize
);
14577 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
14578 inst
.instruction
|= logsize
<< 10;
14581 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14584 do_neon_ld_dup (void)
14586 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14587 int align_good
, do_align
= 0;
14589 if (et
.type
== NT_invtype
)
14592 switch ((inst
.instruction
>> 8) & 3)
14594 case 0: /* VLD1. */
14595 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
14596 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14597 &do_align
, 16, 16, 32, 32, -1);
14598 if (align_good
== FAIL
)
14600 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
14603 case 2: inst
.instruction
|= 1 << 5; break;
14604 default: first_error (_("bad list length")); return;
14606 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14609 case 1: /* VLD2. */
14610 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14611 &do_align
, 8, 16, 16, 32, 32, 64, -1);
14612 if (align_good
== FAIL
)
14614 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
14615 _("bad list length"));
14616 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14617 inst
.instruction
|= 1 << 5;
14618 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14621 case 2: /* VLD3. */
14622 constraint (inst
.operands
[1].immisalign
,
14623 _("can't use alignment with this instruction"));
14624 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
14625 _("bad list length"));
14626 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14627 inst
.instruction
|= 1 << 5;
14628 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14631 case 3: /* VLD4. */
14633 int align
= inst
.operands
[1].imm
>> 8;
14634 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14635 16, 64, 32, 64, 32, 128, -1);
14636 if (align_good
== FAIL
)
14638 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
14639 _("bad list length"));
14640 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14641 inst
.instruction
|= 1 << 5;
14642 if (et
.size
== 32 && align
== 128)
14643 inst
.instruction
|= 0x3 << 6;
14645 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14652 inst
.instruction
|= do_align
<< 4;
14655 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14656 apart from bits [11:4]. */
14659 do_neon_ldx_stx (void)
14661 switch (NEON_LANE (inst
.operands
[0].imm
))
14663 case NEON_INTERLEAVE_LANES
:
14664 inst
.instruction
= NEON_ENC_INTERLV (inst
.instruction
);
14665 do_neon_ld_st_interleave ();
14668 case NEON_ALL_LANES
:
14669 inst
.instruction
= NEON_ENC_DUP (inst
.instruction
);
14674 inst
.instruction
= NEON_ENC_LANE (inst
.instruction
);
14675 do_neon_ld_st_lane ();
14678 /* L bit comes from bit mask. */
14679 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14680 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14681 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14683 if (inst
.operands
[1].postind
)
14685 int postreg
= inst
.operands
[1].imm
& 0xf;
14686 constraint (!inst
.operands
[1].immisreg
,
14687 _("post-index must be a register"));
14688 constraint (postreg
== 0xd || postreg
== 0xf,
14689 _("bad register for post-index"));
14690 inst
.instruction
|= postreg
;
14692 else if (inst
.operands
[1].writeback
)
14694 inst
.instruction
|= 0xd;
14697 inst
.instruction
|= 0xf;
14700 inst
.instruction
|= 0xf9000000;
14702 inst
.instruction
|= 0xf4000000;
14705 /* Overall per-instruction processing. */
14707 /* We need to be able to fix up arbitrary expressions in some statements.
14708 This is so that we can handle symbols that are an arbitrary distance from
14709 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14710 which returns part of an address in a form which will be valid for
14711 a data instruction. We do this by pushing the expression into a symbol
14712 in the expr_section, and creating a fix for that. */
14715 fix_new_arm (fragS
* frag
,
14730 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
, reloc
);
14734 new_fix
= fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
14739 /* Mark whether the fix is to a THUMB instruction, or an ARM
14741 new_fix
->tc_fix_data
= thumb_mode
;
14744 /* Create a frg for an instruction requiring relaxation. */
14746 output_relax_insn (void)
14752 /* The size of the instruction is unknown, so tie the debug info to the
14753 start of the instruction. */
14754 dwarf2_emit_insn (0);
14756 switch (inst
.reloc
.exp
.X_op
)
14759 sym
= inst
.reloc
.exp
.X_add_symbol
;
14760 offset
= inst
.reloc
.exp
.X_add_number
;
14764 offset
= inst
.reloc
.exp
.X_add_number
;
14767 sym
= make_expr_symbol (&inst
.reloc
.exp
);
14771 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
14772 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
14773 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
14776 /* Write a 32-bit thumb instruction to buf. */
14778 put_thumb32_insn (char * buf
, unsigned long insn
)
14780 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
14781 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
14785 output_inst (const char * str
)
14791 as_bad ("%s -- `%s'", inst
.error
, str
);
14796 output_relax_insn ();
14799 if (inst
.size
== 0)
14802 to
= frag_more (inst
.size
);
14803 /* PR 9814: Record the thumb mode into the current frag so that we know
14804 what type of NOP padding to use, if necessary. We override any previous
14805 setting so that if the mode has changed then the NOPS that we use will
14806 match the encoding of the last instruction in the frag. */
14807 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
14809 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
14811 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
14812 put_thumb32_insn (to
, inst
.instruction
);
14814 else if (inst
.size
> INSN_SIZE
)
14816 gas_assert (inst
.size
== (2 * INSN_SIZE
));
14817 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
14818 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
14821 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
14823 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
14824 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
14825 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
14828 dwarf2_emit_insn (inst
.size
);
14832 output_it_inst (int cond
, int mask
, char * to
)
14834 unsigned long instruction
= 0xbf00;
14837 instruction
|= mask
;
14838 instruction
|= cond
<< 4;
14842 to
= frag_more (2);
14844 dwarf2_emit_insn (2);
14848 md_number_to_chars (to
, instruction
, 2);
14853 /* Tag values used in struct asm_opcode's tag field. */
14856 OT_unconditional
, /* Instruction cannot be conditionalized.
14857 The ARM condition field is still 0xE. */
14858 OT_unconditionalF
, /* Instruction cannot be conditionalized
14859 and carries 0xF in its ARM condition field. */
14860 OT_csuffix
, /* Instruction takes a conditional suffix. */
14861 OT_csuffixF
, /* Some forms of the instruction take a conditional
14862 suffix, others place 0xF where the condition field
14864 OT_cinfix3
, /* Instruction takes a conditional infix,
14865 beginning at character index 3. (In
14866 unified mode, it becomes a suffix.) */
14867 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
14868 tsts, cmps, cmns, and teqs. */
14869 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
14870 character index 3, even in unified mode. Used for
14871 legacy instructions where suffix and infix forms
14872 may be ambiguous. */
14873 OT_csuf_or_in3
, /* Instruction takes either a conditional
14874 suffix or an infix at character index 3. */
14875 OT_odd_infix_unc
, /* This is the unconditional variant of an
14876 instruction that takes a conditional infix
14877 at an unusual position. In unified mode,
14878 this variant will accept a suffix. */
14879 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
14880 are the conditional variants of instructions that
14881 take conditional infixes in unusual positions.
14882 The infix appears at character index
14883 (tag - OT_odd_infix_0). These are not accepted
14884 in unified mode. */
14887 /* Subroutine of md_assemble, responsible for looking up the primary
14888 opcode from the mnemonic the user wrote. STR points to the
14889 beginning of the mnemonic.
14891 This is not simply a hash table lookup, because of conditional
14892 variants. Most instructions have conditional variants, which are
14893 expressed with a _conditional affix_ to the mnemonic. If we were
14894 to encode each conditional variant as a literal string in the opcode
14895 table, it would have approximately 20,000 entries.
14897 Most mnemonics take this affix as a suffix, and in unified syntax,
14898 'most' is upgraded to 'all'. However, in the divided syntax, some
14899 instructions take the affix as an infix, notably the s-variants of
14900 the arithmetic instructions. Of those instructions, all but six
14901 have the infix appear after the third character of the mnemonic.
14903 Accordingly, the algorithm for looking up primary opcodes given
14906 1. Look up the identifier in the opcode table.
14907 If we find a match, go to step U.
14909 2. Look up the last two characters of the identifier in the
14910 conditions table. If we find a match, look up the first N-2
14911 characters of the identifier in the opcode table. If we
14912 find a match, go to step CE.
14914 3. Look up the fourth and fifth characters of the identifier in
14915 the conditions table. If we find a match, extract those
14916 characters from the identifier, and look up the remaining
14917 characters in the opcode table. If we find a match, go
14922 U. Examine the tag field of the opcode structure, in case this is
14923 one of the six instructions with its conditional infix in an
14924 unusual place. If it is, the tag tells us where to find the
14925 infix; look it up in the conditions table and set inst.cond
14926 accordingly. Otherwise, this is an unconditional instruction.
14927 Again set inst.cond accordingly. Return the opcode structure.
14929 CE. Examine the tag field to make sure this is an instruction that
14930 should receive a conditional suffix. If it is not, fail.
14931 Otherwise, set inst.cond from the suffix we already looked up,
14932 and return the opcode structure.
14934 CM. Examine the tag field to make sure this is an instruction that
14935 should receive a conditional infix after the third character.
14936 If it is not, fail. Otherwise, undo the edits to the current
14937 line of input and proceed as for case CE. */
14939 static const struct asm_opcode
*
14940 opcode_lookup (char **str
)
14944 const struct asm_opcode
*opcode
;
14945 const struct asm_cond
*cond
;
14947 bfd_boolean neon_supported
;
14949 neon_supported
= ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
);
14951 /* Scan up to the end of the mnemonic, which must end in white space,
14952 '.' (in unified mode, or for Neon instructions), or end of string. */
14953 for (base
= end
= *str
; *end
!= '\0'; end
++)
14954 if (*end
== ' ' || ((unified_syntax
|| neon_supported
) && *end
== '.'))
14960 /* Handle a possible width suffix and/or Neon type suffix. */
14965 /* The .w and .n suffixes are only valid if the unified syntax is in
14967 if (unified_syntax
&& end
[1] == 'w')
14969 else if (unified_syntax
&& end
[1] == 'n')
14974 inst
.vectype
.elems
= 0;
14976 *str
= end
+ offset
;
14978 if (end
[offset
] == '.')
14980 /* See if we have a Neon type suffix (possible in either unified or
14981 non-unified ARM syntax mode). */
14982 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
14985 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
14991 /* Look for unaffixed or special-case affixed mnemonic. */
14992 opcode
= hash_find_n (arm_ops_hsh
, base
, end
- base
);
14996 if (opcode
->tag
< OT_odd_infix_0
)
14998 inst
.cond
= COND_ALWAYS
;
15002 if (warn_on_deprecated
&& unified_syntax
)
15003 as_warn (_("conditional infixes are deprecated in unified syntax"));
15004 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
15005 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
15008 inst
.cond
= cond
->value
;
15012 /* Cannot have a conditional suffix on a mnemonic of less than two
15014 if (end
- base
< 3)
15017 /* Look for suffixed mnemonic. */
15019 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
15020 opcode
= hash_find_n (arm_ops_hsh
, base
, affix
- base
);
15021 if (opcode
&& cond
)
15024 switch (opcode
->tag
)
15026 case OT_cinfix3_legacy
:
15027 /* Ignore conditional suffixes matched on infix only mnemonics. */
15031 case OT_cinfix3_deprecated
:
15032 case OT_odd_infix_unc
:
15033 if (!unified_syntax
)
15035 /* else fall through */
15039 case OT_csuf_or_in3
:
15040 inst
.cond
= cond
->value
;
15043 case OT_unconditional
:
15044 case OT_unconditionalF
:
15046 inst
.cond
= cond
->value
;
15049 /* Delayed diagnostic. */
15050 inst
.error
= BAD_COND
;
15051 inst
.cond
= COND_ALWAYS
;
15060 /* Cannot have a usual-position infix on a mnemonic of less than
15061 six characters (five would be a suffix). */
15062 if (end
- base
< 6)
15065 /* Look for infixed mnemonic in the usual position. */
15067 cond
= hash_find_n (arm_cond_hsh
, affix
, 2);
15071 memcpy (save
, affix
, 2);
15072 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
15073 opcode
= hash_find_n (arm_ops_hsh
, base
, (end
- base
) - 2);
15074 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
15075 memcpy (affix
, save
, 2);
15078 && (opcode
->tag
== OT_cinfix3
15079 || opcode
->tag
== OT_cinfix3_deprecated
15080 || opcode
->tag
== OT_csuf_or_in3
15081 || opcode
->tag
== OT_cinfix3_legacy
))
15084 if (warn_on_deprecated
&& unified_syntax
15085 && (opcode
->tag
== OT_cinfix3
15086 || opcode
->tag
== OT_cinfix3_deprecated
))
15087 as_warn (_("conditional infixes are deprecated in unified syntax"));
15089 inst
.cond
= cond
->value
;
15096 /* This function generates an initial IT instruction, leaving its block
15097 virtually open for the new instructions. Eventually,
15098 the mask will be updated by now_it_add_mask () each time
15099 a new instruction needs to be included in the IT block.
15100 Finally, the block is closed with close_automatic_it_block ().
15101 The block closure can be requested either from md_assemble (),
15102 a tencode (), or due to a label hook. */
15105 new_automatic_it_block (int cond
)
15107 now_it
.state
= AUTOMATIC_IT_BLOCK
;
15108 now_it
.mask
= 0x18;
15110 now_it
.block_length
= 1;
15111 mapping_state (MAP_THUMB
);
15112 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
15115 /* Close an automatic IT block.
15116 See comments in new_automatic_it_block (). */
15119 close_automatic_it_block (void)
15121 now_it
.mask
= 0x10;
15122 now_it
.block_length
= 0;
15125 /* Update the mask of the current automatically-generated IT
15126 instruction. See comments in new_automatic_it_block (). */
15129 now_it_add_mask (int cond
)
15131 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15132 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15133 | ((bitvalue) << (nbit)))
15134 const int resulting_bit
= (cond
& 1);
15136 now_it
.mask
&= 0xf;
15137 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15139 (5 - now_it
.block_length
));
15140 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15142 ((5 - now_it
.block_length
) - 1) );
15143 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
15146 #undef SET_BIT_VALUE
15149 /* The IT blocks handling machinery is accessed through the these functions:
15150 it_fsm_pre_encode () from md_assemble ()
15151 set_it_insn_type () optional, from the tencode functions
15152 set_it_insn_type_last () ditto
15153 in_it_block () ditto
15154 it_fsm_post_encode () from md_assemble ()
15155 force_automatic_it_block_close () from label habdling functions
15158 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15159 initializing the IT insn type with a generic initial value depending
15160 on the inst.condition.
15161 2) During the tencode function, two things may happen:
15162 a) The tencode function overrides the IT insn type by
15163 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15164 b) The tencode function queries the IT block state by
15165 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15167 Both set_it_insn_type and in_it_block run the internal FSM state
15168 handling function (handle_it_state), because: a) setting the IT insn
15169 type may incur in an invalid state (exiting the function),
15170 and b) querying the state requires the FSM to be updated.
15171 Specifically we want to avoid creating an IT block for conditional
15172 branches, so it_fsm_pre_encode is actually a guess and we can't
15173 determine whether an IT block is required until the tencode () routine
15174 has decided what type of instruction this actually it.
15175 Because of this, if set_it_insn_type and in_it_block have to be used,
15176 set_it_insn_type has to be called first.
15178 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15179 determines the insn IT type depending on the inst.cond code.
15180 When a tencode () routine encodes an instruction that can be
15181 either outside an IT block, or, in the case of being inside, has to be
15182 the last one, set_it_insn_type_last () will determine the proper
15183 IT instruction type based on the inst.cond code. Otherwise,
15184 set_it_insn_type can be called for overriding that logic or
15185 for covering other cases.
15187 Calling handle_it_state () may not transition the IT block state to
15188 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15189 still queried. Instead, if the FSM determines that the state should
15190 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15191 after the tencode () function: that's what it_fsm_post_encode () does.
15193 Since in_it_block () calls the state handling function to get an
15194 updated state, an error may occur (due to invalid insns combination).
15195 In that case, inst.error is set.
15196 Therefore, inst.error has to be checked after the execution of
15197 the tencode () routine.
15199 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15200 any pending state change (if any) that didn't take place in
15201 handle_it_state () as explained above. */
15204 it_fsm_pre_encode (void)
15206 if (inst
.cond
!= COND_ALWAYS
)
15207 inst
.it_insn_type
= INSIDE_IT_INSN
;
15209 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
15211 now_it
.state_handled
= 0;
15214 /* IT state FSM handling function. */
15217 handle_it_state (void)
15219 now_it
.state_handled
= 1;
15221 switch (now_it
.state
)
15223 case OUTSIDE_IT_BLOCK
:
15224 switch (inst
.it_insn_type
)
15226 case OUTSIDE_IT_INSN
:
15229 case INSIDE_IT_INSN
:
15230 case INSIDE_IT_LAST_INSN
:
15231 if (thumb_mode
== 0)
15234 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
15235 as_tsktsk (_("Warning: conditional outside an IT block"\
15240 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
15241 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
15243 /* Automatically generate the IT instruction. */
15244 new_automatic_it_block (inst
.cond
);
15245 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
15246 close_automatic_it_block ();
15250 inst
.error
= BAD_OUT_IT
;
15256 case IF_INSIDE_IT_LAST_INSN
:
15257 case NEUTRAL_IT_INSN
:
15261 now_it
.state
= MANUAL_IT_BLOCK
;
15262 now_it
.block_length
= 0;
15267 case AUTOMATIC_IT_BLOCK
:
15268 /* Three things may happen now:
15269 a) We should increment current it block size;
15270 b) We should close current it block (closing insn or 4 insns);
15271 c) We should close current it block and start a new one (due
15272 to incompatible conditions or
15273 4 insns-length block reached). */
15275 switch (inst
.it_insn_type
)
15277 case OUTSIDE_IT_INSN
:
15278 /* The closure of the block shall happen immediatelly,
15279 so any in_it_block () call reports the block as closed. */
15280 force_automatic_it_block_close ();
15283 case INSIDE_IT_INSN
:
15284 case INSIDE_IT_LAST_INSN
:
15285 case IF_INSIDE_IT_LAST_INSN
:
15286 now_it
.block_length
++;
15288 if (now_it
.block_length
> 4
15289 || !now_it_compatible (inst
.cond
))
15291 force_automatic_it_block_close ();
15292 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
15293 new_automatic_it_block (inst
.cond
);
15297 now_it_add_mask (inst
.cond
);
15300 if (now_it
.state
== AUTOMATIC_IT_BLOCK
15301 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
15302 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
15303 close_automatic_it_block ();
15306 case NEUTRAL_IT_INSN
:
15307 now_it
.block_length
++;
15309 if (now_it
.block_length
> 4)
15310 force_automatic_it_block_close ();
15312 now_it_add_mask (now_it
.cc
& 1);
15316 close_automatic_it_block ();
15317 now_it
.state
= MANUAL_IT_BLOCK
;
15322 case MANUAL_IT_BLOCK
:
15324 /* Check conditional suffixes. */
15325 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
15328 now_it
.mask
&= 0x1f;
15329 is_last
= (now_it
.mask
== 0x10);
15331 switch (inst
.it_insn_type
)
15333 case OUTSIDE_IT_INSN
:
15334 inst
.error
= BAD_NOT_IT
;
15337 case INSIDE_IT_INSN
:
15338 if (cond
!= inst
.cond
)
15340 inst
.error
= BAD_IT_COND
;
15345 case INSIDE_IT_LAST_INSN
:
15346 case IF_INSIDE_IT_LAST_INSN
:
15347 if (cond
!= inst
.cond
)
15349 inst
.error
= BAD_IT_COND
;
15354 inst
.error
= BAD_BRANCH
;
15359 case NEUTRAL_IT_INSN
:
15360 /* The BKPT instruction is unconditional even in an IT block. */
15364 inst
.error
= BAD_IT_IT
;
15375 it_fsm_post_encode (void)
15379 if (!now_it
.state_handled
)
15380 handle_it_state ();
15382 is_last
= (now_it
.mask
== 0x10);
15385 now_it
.state
= OUTSIDE_IT_BLOCK
;
15391 force_automatic_it_block_close (void)
15393 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
15395 close_automatic_it_block ();
15396 now_it
.state
= OUTSIDE_IT_BLOCK
;
15404 if (!now_it
.state_handled
)
15405 handle_it_state ();
15407 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
15411 md_assemble (char *str
)
15414 const struct asm_opcode
* opcode
;
15416 /* Align the previous label if needed. */
15417 if (last_label_seen
!= NULL
)
15419 symbol_set_frag (last_label_seen
, frag_now
);
15420 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
15421 S_SET_SEGMENT (last_label_seen
, now_seg
);
15424 memset (&inst
, '\0', sizeof (inst
));
15425 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
15427 opcode
= opcode_lookup (&p
);
15430 /* It wasn't an instruction, but it might be a register alias of
15431 the form alias .req reg, or a Neon .dn/.qn directive. */
15432 if (! create_register_alias (str
, p
)
15433 && ! create_neon_reg_alias (str
, p
))
15434 as_bad (_("bad instruction `%s'"), str
);
15439 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
15440 as_warn (_("s suffix on comparison instruction is deprecated"));
15442 /* The value which unconditional instructions should have in place of the
15443 condition field. */
15444 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
15448 arm_feature_set variant
;
15450 variant
= cpu_variant
;
15451 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15452 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
15453 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
15454 /* Check that this instruction is supported for this CPU. */
15455 if (!opcode
->tvariant
15456 || (thumb_mode
== 1
15457 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
15459 as_bad (_("selected processor does not support `%s'"), str
);
15462 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
15463 && opcode
->tencode
!= do_t_branch
)
15465 as_bad (_("Thumb does not support conditional execution"));
15469 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
))
15471 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
15472 && !(ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
)
15473 || ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_barrier
)))
15475 /* Two things are addressed here.
15476 1) Implicit require narrow instructions on Thumb-1.
15477 This avoids relaxation accidentally introducing Thumb-2
15479 2) Reject wide instructions in non Thumb-2 cores. */
15480 if (inst
.size_req
== 0)
15482 else if (inst
.size_req
== 4)
15484 as_bad (_("selected processor does not support `%s'"), str
);
15490 inst
.instruction
= opcode
->tvalue
;
15492 if (!parse_operands (p
, opcode
->operands
))
15494 /* Prepare the it_insn_type for those encodings that don't set
15496 it_fsm_pre_encode ();
15498 opcode
->tencode ();
15500 it_fsm_post_encode ();
15503 if (!(inst
.error
|| inst
.relax
))
15505 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
15506 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
15507 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
15509 as_bad (_("cannot honor width suffix -- `%s'"), str
);
15514 /* Something has gone badly wrong if we try to relax a fixed size
15516 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
15518 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15519 *opcode
->tvariant
);
15520 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15521 set those bits when Thumb-2 32-bit instructions are seen. ie.
15522 anything other than bl/blx and v6-M instructions.
15523 This is overly pessimistic for relaxable instructions. */
15524 if (((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
15526 && !(ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
15527 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
)))
15528 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15533 mapping_state (MAP_THUMB
);
15536 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
15540 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
15541 is_bx
= (opcode
->aencode
== do_bx
);
15543 /* Check that this instruction is supported for this CPU. */
15544 if (!(is_bx
&& fix_v4bx
)
15545 && !(opcode
->avariant
&&
15546 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
15548 as_bad (_("selected processor does not support `%s'"), str
);
15553 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
15557 inst
.instruction
= opcode
->avalue
;
15558 if (opcode
->tag
== OT_unconditionalF
)
15559 inst
.instruction
|= 0xF << 28;
15561 inst
.instruction
|= inst
.cond
<< 28;
15562 inst
.size
= INSN_SIZE
;
15563 if (!parse_operands (p
, opcode
->operands
))
15565 it_fsm_pre_encode ();
15566 opcode
->aencode ();
15567 it_fsm_post_encode ();
15569 /* Arm mode bx is marked as both v4T and v5 because it's still required
15570 on a hypothetical non-thumb v5 core. */
15572 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
15574 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
15575 *opcode
->avariant
);
15578 mapping_state (MAP_ARM
);
15583 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
15591 check_it_blocks_finished (void)
15596 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
15597 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
15598 == MANUAL_IT_BLOCK
)
15600 as_warn (_("section '%s' finished with an open IT block."),
15604 if (now_it
.state
== MANUAL_IT_BLOCK
)
15605 as_warn (_("file finished with an open IT block."));
15609 /* Various frobbings of labels and their addresses. */
15612 arm_start_line_hook (void)
15614 last_label_seen
= NULL
;
15618 arm_frob_label (symbolS
* sym
)
15620 last_label_seen
= sym
;
15622 ARM_SET_THUMB (sym
, thumb_mode
);
15624 #if defined OBJ_COFF || defined OBJ_ELF
15625 ARM_SET_INTERWORK (sym
, support_interwork
);
15628 force_automatic_it_block_close ();
15630 /* Note - do not allow local symbols (.Lxxx) to be labelled
15631 as Thumb functions. This is because these labels, whilst
15632 they exist inside Thumb code, are not the entry points for
15633 possible ARM->Thumb calls. Also, these labels can be used
15634 as part of a computed goto or switch statement. eg gcc
15635 can generate code that looks like this:
15637 ldr r2, [pc, .Laaa]
15647 The first instruction loads the address of the jump table.
15648 The second instruction converts a table index into a byte offset.
15649 The third instruction gets the jump address out of the table.
15650 The fourth instruction performs the jump.
15652 If the address stored at .Laaa is that of a symbol which has the
15653 Thumb_Func bit set, then the linker will arrange for this address
15654 to have the bottom bit set, which in turn would mean that the
15655 address computation performed by the third instruction would end
15656 up with the bottom bit set. Since the ARM is capable of unaligned
15657 word loads, the instruction would then load the incorrect address
15658 out of the jump table, and chaos would ensue. */
15659 if (label_is_thumb_function_name
15660 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
15661 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
15663 /* When the address of a Thumb function is taken the bottom
15664 bit of that address should be set. This will allow
15665 interworking between Arm and Thumb functions to work
15668 THUMB_SET_FUNC (sym
, 1);
15670 label_is_thumb_function_name
= FALSE
;
15673 dwarf2_emit_label (sym
);
15677 arm_data_in_code (void)
15679 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
15681 *input_line_pointer
= '/';
15682 input_line_pointer
+= 5;
15683 *input_line_pointer
= 0;
15691 arm_canonicalize_symbol_name (char * name
)
15695 if (thumb_mode
&& (len
= strlen (name
)) > 5
15696 && streq (name
+ len
- 5, "/data"))
15697 *(name
+ len
- 5) = 0;
15702 /* Table of all register names defined by default. The user can
15703 define additional names with .req. Note that all register names
15704 should appear in both upper and lowercase variants. Some registers
15705 also have mixed-case names. */
15707 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
15708 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
15709 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
15710 #define REGSET(p,t) \
15711 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
15712 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
15713 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
15714 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
15715 #define REGSETH(p,t) \
15716 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
15717 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
15718 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
15719 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
15720 #define REGSET2(p,t) \
15721 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
15722 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
15723 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
15724 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
15726 static const struct reg_entry reg_names
[] =
15728 /* ARM integer registers. */
15729 REGSET(r
, RN
), REGSET(R
, RN
),
15731 /* ATPCS synonyms. */
15732 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
15733 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
15734 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
15736 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
15737 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
15738 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
15740 /* Well-known aliases. */
15741 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
15742 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
15744 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
15745 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
15747 /* Coprocessor numbers. */
15748 REGSET(p
, CP
), REGSET(P
, CP
),
15750 /* Coprocessor register numbers. The "cr" variants are for backward
15752 REGSET(c
, CN
), REGSET(C
, CN
),
15753 REGSET(cr
, CN
), REGSET(CR
, CN
),
15755 /* FPA registers. */
15756 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
15757 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
15759 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
15760 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
15762 /* VFP SP registers. */
15763 REGSET(s
,VFS
), REGSET(S
,VFS
),
15764 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
15766 /* VFP DP Registers. */
15767 REGSET(d
,VFD
), REGSET(D
,VFD
),
15768 /* Extra Neon DP registers. */
15769 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
15771 /* Neon QP registers. */
15772 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
15774 /* VFP control registers. */
15775 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
15776 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
15777 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
15778 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
15779 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
15780 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
15782 /* Maverick DSP coprocessor registers. */
15783 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
15784 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
15786 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
15787 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
15788 REGDEF(dspsc
,0,DSPSC
),
15790 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
15791 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
15792 REGDEF(DSPSC
,0,DSPSC
),
15794 /* iWMMXt data registers - p0, c0-15. */
15795 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
15797 /* iWMMXt control registers - p1, c0-3. */
15798 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
15799 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
15800 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
15801 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
15803 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
15804 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
15805 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
15806 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
15807 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
15809 /* XScale accumulator registers. */
15810 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
15816 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
15817 within psr_required_here. */
15818 static const struct asm_psr psrs
[] =
15820 /* Backward compatibility notation. Note that "all" is no longer
15821 truly all possible PSR bits. */
15822 {"all", PSR_c
| PSR_f
},
15826 /* Individual flags. */
15831 /* Combinations of flags. */
15832 {"fs", PSR_f
| PSR_s
},
15833 {"fx", PSR_f
| PSR_x
},
15834 {"fc", PSR_f
| PSR_c
},
15835 {"sf", PSR_s
| PSR_f
},
15836 {"sx", PSR_s
| PSR_x
},
15837 {"sc", PSR_s
| PSR_c
},
15838 {"xf", PSR_x
| PSR_f
},
15839 {"xs", PSR_x
| PSR_s
},
15840 {"xc", PSR_x
| PSR_c
},
15841 {"cf", PSR_c
| PSR_f
},
15842 {"cs", PSR_c
| PSR_s
},
15843 {"cx", PSR_c
| PSR_x
},
15844 {"fsx", PSR_f
| PSR_s
| PSR_x
},
15845 {"fsc", PSR_f
| PSR_s
| PSR_c
},
15846 {"fxs", PSR_f
| PSR_x
| PSR_s
},
15847 {"fxc", PSR_f
| PSR_x
| PSR_c
},
15848 {"fcs", PSR_f
| PSR_c
| PSR_s
},
15849 {"fcx", PSR_f
| PSR_c
| PSR_x
},
15850 {"sfx", PSR_s
| PSR_f
| PSR_x
},
15851 {"sfc", PSR_s
| PSR_f
| PSR_c
},
15852 {"sxf", PSR_s
| PSR_x
| PSR_f
},
15853 {"sxc", PSR_s
| PSR_x
| PSR_c
},
15854 {"scf", PSR_s
| PSR_c
| PSR_f
},
15855 {"scx", PSR_s
| PSR_c
| PSR_x
},
15856 {"xfs", PSR_x
| PSR_f
| PSR_s
},
15857 {"xfc", PSR_x
| PSR_f
| PSR_c
},
15858 {"xsf", PSR_x
| PSR_s
| PSR_f
},
15859 {"xsc", PSR_x
| PSR_s
| PSR_c
},
15860 {"xcf", PSR_x
| PSR_c
| PSR_f
},
15861 {"xcs", PSR_x
| PSR_c
| PSR_s
},
15862 {"cfs", PSR_c
| PSR_f
| PSR_s
},
15863 {"cfx", PSR_c
| PSR_f
| PSR_x
},
15864 {"csf", PSR_c
| PSR_s
| PSR_f
},
15865 {"csx", PSR_c
| PSR_s
| PSR_x
},
15866 {"cxf", PSR_c
| PSR_x
| PSR_f
},
15867 {"cxs", PSR_c
| PSR_x
| PSR_s
},
15868 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
15869 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
15870 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
15871 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
15872 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
15873 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
15874 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
15875 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
15876 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
15877 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
15878 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
15879 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
15880 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
15881 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
15882 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
15883 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
15884 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
15885 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
15886 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
15887 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
15888 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
15889 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
15890 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
15891 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
15894 /* Table of V7M psr names. */
15895 static const struct asm_psr v7m_psrs
[] =
15897 {"apsr", 0 }, {"APSR", 0 },
15898 {"iapsr", 1 }, {"IAPSR", 1 },
15899 {"eapsr", 2 }, {"EAPSR", 2 },
15900 {"psr", 3 }, {"PSR", 3 },
15901 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
15902 {"ipsr", 5 }, {"IPSR", 5 },
15903 {"epsr", 6 }, {"EPSR", 6 },
15904 {"iepsr", 7 }, {"IEPSR", 7 },
15905 {"msp", 8 }, {"MSP", 8 },
15906 {"psp", 9 }, {"PSP", 9 },
15907 {"primask", 16}, {"PRIMASK", 16},
15908 {"basepri", 17}, {"BASEPRI", 17},
15909 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
15910 {"faultmask", 19}, {"FAULTMASK", 19},
15911 {"control", 20}, {"CONTROL", 20}
15914 /* Table of all shift-in-operand names. */
15915 static const struct asm_shift_name shift_names
[] =
15917 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
15918 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
15919 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
15920 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
15921 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
15922 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
15925 /* Table of all explicit relocation names. */
15927 static struct reloc_entry reloc_names
[] =
15929 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
15930 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
15931 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
15932 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
15933 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
15934 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
15935 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
15936 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
15937 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
15938 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
15939 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
}
15943 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
15944 static const struct asm_cond conds
[] =
15948 {"cs", 0x2}, {"hs", 0x2},
15949 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
15963 static struct asm_barrier_opt barrier_opt_names
[] =
15971 /* Table of ARM-format instructions. */
15973 /* Macros for gluing together operand strings. N.B. In all cases
15974 other than OPS0, the trailing OP_stop comes from default
15975 zero-initialization of the unspecified elements of the array. */
15976 #define OPS0() { OP_stop, }
15977 #define OPS1(a) { OP_##a, }
15978 #define OPS2(a,b) { OP_##a,OP_##b, }
15979 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
15980 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
15981 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
15982 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
15984 /* These macros abstract out the exact format of the mnemonic table and
15985 save some repeated characters. */
15987 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
15988 #define TxCE(mnem, op, top, nops, ops, ae, te) \
15989 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
15990 THUMB_VARIANT, do_##ae, do_##te }
15992 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
15993 a T_MNEM_xyz enumerator. */
15994 #define TCE(mnem, aop, top, nops, ops, ae, te) \
15995 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
15996 #define tCE(mnem, aop, top, nops, ops, ae, te) \
15997 TxCE (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
15999 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16000 infix after the third character. */
16001 #define TxC3(mnem, op, top, nops, ops, ae, te) \
16002 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16003 THUMB_VARIANT, do_##ae, do_##te }
16004 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
16005 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16006 THUMB_VARIANT, do_##ae, do_##te }
16007 #define TC3(mnem, aop, top, nops, ops, ae, te) \
16008 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16009 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
16010 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16011 #define tC3(mnem, aop, top, nops, ops, ae, te) \
16012 TxC3 (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
16013 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16014 TxC3w (mnem, aop, T_MNEM_##top, nops, ops, ae, te)
16016 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16017 appear in the condition table. */
16018 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16019 { #m1 #m2 #m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (#m1) - 1, \
16020 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16022 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16023 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16024 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16025 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16026 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16027 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16028 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16029 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16030 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16031 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16032 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16033 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16034 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16035 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16036 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16037 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16038 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16039 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16040 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16041 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16043 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16044 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16045 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16046 TxCM (m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
16048 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16049 field is still 0xE. Many of the Thumb variants can be executed
16050 conditionally, so this is checked separately. */
16051 #define TUE(mnem, op, top, nops, ops, ae, te) \
16052 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16053 THUMB_VARIANT, do_##ae, do_##te }
16055 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16056 condition code field. */
16057 #define TUF(mnem, op, top, nops, ops, ae, te) \
16058 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16059 THUMB_VARIANT, do_##ae, do_##te }
16061 /* ARM-only variants of all the above. */
16062 #define CE(mnem, op, nops, ops, ae) \
16063 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16065 #define C3(mnem, op, nops, ops, ae) \
16066 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16068 /* Legacy mnemonics that always have conditional infix after the third
16070 #define CL(mnem, op, nops, ops, ae) \
16071 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
16072 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16074 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16075 #define cCE(mnem, op, nops, ops, ae) \
16076 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16078 /* Legacy coprocessor instructions where conditional infix and conditional
16079 suffix are ambiguous. For consistency this includes all FPA instructions,
16080 not just the potentially ambiguous ones. */
16081 #define cCL(mnem, op, nops, ops, ae) \
16082 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
16083 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16085 /* Coprocessor, takes either a suffix or a position-3 infix
16086 (for an FPA corner case). */
16087 #define C3E(mnem, op, nops, ops, ae) \
16088 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
16089 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16091 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16092 { #m1 #m2 #m3, OPS##nops ops, \
16093 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (#m1) - 1, \
16094 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16096 #define CM(m1, m2, op, nops, ops, ae) \
16097 xCM_ (m1, , m2, op, nops, ops, ae), \
16098 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16099 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16100 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16101 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16102 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16103 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16104 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16105 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16106 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16107 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16108 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16109 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16110 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16111 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16112 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16113 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16114 xCM_ (m1, le, m2, op, nops, ops, ae), \
16115 xCM_ (m1, al, m2, op, nops, ops, ae)
16117 #define UE(mnem, op, nops, ops, ae) \
16118 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16120 #define UF(mnem, op, nops, ops, ae) \
16121 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16123 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16124 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16125 use the same encoding function for each. */
16126 #define NUF(mnem, op, nops, ops, enc) \
16127 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16128 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16130 /* Neon data processing, version which indirects through neon_enc_tab for
16131 the various overloaded versions of opcodes. */
16132 #define nUF(mnem, op, nops, ops, enc) \
16133 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
16134 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16136 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16138 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16139 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16140 THUMB_VARIANT, do_##enc, do_##enc }
16142 #define NCE(mnem, op, nops, ops, enc) \
16143 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16145 #define NCEF(mnem, op, nops, ops, enc) \
16146 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16148 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16149 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16150 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
16151 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16153 #define nCE(mnem, op, nops, ops, enc) \
16154 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16156 #define nCEF(mnem, op, nops, ops, enc) \
16157 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16161 /* Thumb-only, unconditional. */
16162 #define UT(mnem, op, nops, ops, te) TUE (mnem, 0, op, nops, ops, 0, te)
16164 static const struct asm_opcode insns
[] =
16166 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16167 #define THUMB_VARIANT &arm_ext_v4t
16168 tCE(and, 0000000, and, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16169 tC3(ands
, 0100000, ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16170 tCE(eor
, 0200000, eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16171 tC3(eors
, 0300000, eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16172 tCE(sub
, 0400000, sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16173 tC3(subs
, 0500000, subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16174 tCE(add
, 0800000, add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16175 tC3(adds
, 0900000, adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16176 tCE(adc
, 0a00000
, adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16177 tC3(adcs
, 0b00000, adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16178 tCE(sbc
, 0c00000
, sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16179 tC3(sbcs
, 0d00000
, sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16180 tCE(orr
, 1800000, orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16181 tC3(orrs
, 1900000, orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16182 tCE(bic
, 1c00000
, bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16183 tC3(bics
, 1d00000
, bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16185 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16186 for setting PSR flag bits. They are obsolete in V6 and do not
16187 have Thumb equivalents. */
16188 tCE(tst
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16189 tC3w(tsts
, 1100000, tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16190 CL(tstp
, 110f000
, 2, (RR
, SH
), cmp
),
16191 tCE(cmp
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16192 tC3w(cmps
, 1500000, cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16193 CL(cmpp
, 150f000
, 2, (RR
, SH
), cmp
),
16194 tCE(cmn
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16195 tC3w(cmns
, 1700000, cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16196 CL(cmnp
, 170f000
, 2, (RR
, SH
), cmp
),
16198 tCE(mov
, 1a00000
, mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16199 tC3(movs
, 1b00000
, movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16200 tCE(mvn
, 1e00000
, mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16201 tC3(mvns
, 1f00000
, mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16203 tCE(ldr
, 4100000, ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16204 tC3(ldrb
, 4500000, ldrb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16205 tCE(str
, 4000000, str
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16206 tC3(strb
, 4400000, strb
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16208 tCE(stm
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16209 tC3(stmia
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16210 tC3(stmea
, 8800000, stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16211 tCE(ldm
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16212 tC3(ldmia
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16213 tC3(ldmfd
, 8900000, ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16215 TCE(swi
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16216 TCE(svc
, f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16217 tCE(b
, a000000
, b
, 1, (EXPr
), branch
, t_branch
),
16218 TCE(bl
, b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
16221 tCE(adr
, 28f0000
, adr
, 2, (RR
, EXP
), adr
, t_adr
),
16222 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
16223 tCE(nop
, 1a00000
, nop
, 1, (oI255c
), nop
, t_nop
),
16225 /* Thumb-compatibility pseudo ops. */
16226 tCE(lsl
, 1a00000
, lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16227 tC3(lsls
, 1b00000
, lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16228 tCE(lsr
, 1a00020
, lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16229 tC3(lsrs
, 1b00020
, lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16230 tCE(asr
, 1a00040
, asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16231 tC3(asrs
, 1b00040
, asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16232 tCE(ror
, 1a00060
, ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16233 tC3(rors
, 1b00060
, rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16234 tCE(neg
, 2600000, neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
16235 tC3(negs
, 2700000, negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
16236 tCE(push
, 92d0000
, push
, 1, (REGLST
), push_pop
, t_push_pop
),
16237 tCE(pop
, 8bd0000
, pop
, 1, (REGLST
), push_pop
, t_push_pop
),
16239 /* These may simplify to neg. */
16240 TCE(rsb
, 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16241 TC3(rsbs
, 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16243 #undef THUMB_VARIANT
16244 #define THUMB_VARIANT & arm_ext_v6
16246 TCE(cpy
, 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
16248 /* V1 instructions with no Thumb analogue prior to V6T2. */
16249 #undef THUMB_VARIANT
16250 #define THUMB_VARIANT & arm_ext_v6t2
16252 TCE(teq
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16253 TC3w(teqs
, 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16254 CL(teqp
, 130f000
, 2, (RR
, SH
), cmp
),
16256 TC3(ldrt
, 4300000, f8500e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
16257 TC3(ldrbt
, 4700000, f8100e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
16258 TC3(strt
, 4200000, f8400e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
16259 TC3(strbt
, 4600000, f8000e00
, 2, (RR
, ADDR
), ldstt
, t_ldstt
),
16261 TC3(stmdb
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16262 TC3(stmfd
, 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16264 TC3(ldmdb
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16265 TC3(ldmea
, 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16267 /* V1 instructions with no Thumb analogue at all. */
16268 CE(rsc
, 0e00000
, 3, (RR
, oRR
, SH
), arit
),
16269 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
16271 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16272 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16273 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16274 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16275 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16276 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16277 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16278 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16281 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16282 #undef THUMB_VARIANT
16283 #define THUMB_VARIANT & arm_ext_v4t
16285 tCE(mul
, 0000090, mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16286 tC3(muls
, 0100090, muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16288 #undef THUMB_VARIANT
16289 #define THUMB_VARIANT & arm_ext_v6t2
16291 TCE(mla
, 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
16292 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
16294 /* Generic coprocessor instructions. */
16295 TCE(cdp
, e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16296 TCE(ldc
, c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16297 TC3(ldcl
, c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16298 TCE(stc
, c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16299 TC3(stcl
, c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16300 TCE(mcr
, e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16301 TCE(mrc
, e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16304 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16306 CE(swp
, 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16307 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16310 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16311 #undef THUMB_VARIANT
16312 #define THUMB_VARIANT & arm_ext_msr
16314 TCE(mrs
, 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
16315 TCE(msr
, 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
16318 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16319 #undef THUMB_VARIANT
16320 #define THUMB_VARIANT & arm_ext_v6t2
16322 TCE(smull
, 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16323 CM(smull
,s
, 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16324 TCE(umull
, 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16325 CM(umull
,s
, 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16326 TCE(smlal
, 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16327 CM(smlal
,s
, 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16328 TCE(umlal
, 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16329 CM(umlal
,s
, 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16332 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16333 #undef THUMB_VARIANT
16334 #define THUMB_VARIANT & arm_ext_v4t
16336 tC3(ldrh
, 01000b0
, ldrh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16337 tC3(strh
, 00000b0
, strh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16338 tC3(ldrsh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16339 tC3(ldrsb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16340 tCM(ld
,sh
, 01000f0
, ldrsh
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16341 tCM(ld
,sb
, 01000d0
, ldrsb
, 2, (RR
, ADDRGLDRS
), ldstv4
, t_ldst
),
16344 #define ARM_VARIANT & arm_ext_v4t_5
16346 /* ARM Architecture 4T. */
16347 /* Note: bx (and blx) are required on V5, even if the processor does
16348 not support Thumb. */
16349 TCE(bx
, 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
16352 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16353 #undef THUMB_VARIANT
16354 #define THUMB_VARIANT & arm_ext_v5t
16356 /* Note: blx has 2 variants; the .value coded here is for
16357 BLX(2). Only this variant has conditional execution. */
16358 TCE(blx
, 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
16359 TUE(bkpt
, 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
16361 #undef THUMB_VARIANT
16362 #define THUMB_VARIANT & arm_ext_v6t2
16364 TCE(clz
, 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
16365 TUF(ldc2
, c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16366 TUF(ldc2l
, c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16367 TUF(stc2
, c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16368 TUF(stc2l
, c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16369 TUF(cdp2
, e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16370 TUF(mcr2
, e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16371 TUF(mrc2
, e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16374 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16376 TCE(smlabb
, 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16377 TCE(smlatb
, 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16378 TCE(smlabt
, 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16379 TCE(smlatt
, 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16381 TCE(smlawb
, 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16382 TCE(smlawt
, 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16384 TCE(smlalbb
, 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16385 TCE(smlaltb
, 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16386 TCE(smlalbt
, 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16387 TCE(smlaltt
, 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16389 TCE(smulbb
, 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16390 TCE(smultb
, 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16391 TCE(smulbt
, 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16392 TCE(smultt
, 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16394 TCE(smulwb
, 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16395 TCE(smulwt
, 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16397 TCE(qadd
, 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
16398 TCE(qdadd
, 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
16399 TCE(qsub
, 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
16400 TCE(qdsub
, 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd
),
16403 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16405 TUF(pld
, 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
16406 TC3(ldrd
, 00000d0
, e8500000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
16407 TC3(strd
, 00000f0
, e8400000
, 3, (RRnpc
, oRRnpc
, ADDRGLDRS
), ldrd
, t_ldstd
),
16409 TCE(mcrr
, c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16410 TCE(mrrc
, c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16413 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16415 TCE(bxj
, 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
16418 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16419 #undef THUMB_VARIANT
16420 #define THUMB_VARIANT & arm_ext_v6
16422 TUF(cpsie
, 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16423 TUF(cpsid
, 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16424 tCE(rev
, 6bf0f30
, rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16425 tCE(rev16
, 6bf0fb0
, rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16426 tCE(revsh
, 6ff0fb0
, revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16427 tCE(sxth
, 6bf0070
, sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16428 tCE(uxth
, 6ff0070
, uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16429 tCE(sxtb
, 6af0070
, sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16430 tCE(uxtb
, 6ef0070
, uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16431 TUF(setend
, 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
16433 #undef THUMB_VARIANT
16434 #define THUMB_VARIANT & arm_ext_v6t2
16436 TCE(ldrex
, 1900f9f
, e8500f00
, 2, (RRnpc
, ADDR
), ldrex
, t_ldrex
),
16437 TCE(strex
, 1800f90
, e8400000
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, t_strex
),
16438 TUF(mcrr2
, c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16439 TUF(mrrc2
, c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16441 TCE(ssat
, 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
16442 TCE(usat
, 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
16444 /* ARM V6 not included in V7M (eg. integer SIMD). */
16445 #undef THUMB_VARIANT
16446 #define THUMB_VARIANT & arm_ext_v6_notm
16448 TUF(cps
, 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
16449 TCE(pkhbt
, 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
16450 TCE(pkhtb
, 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
16451 TCE(qadd16
, 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16452 TCE(qadd8
, 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16453 TCE(qasx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16454 /* Old name for QASX. */
16455 TCE(qaddsubx
, 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16456 TCE(qsax
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16457 /* Old name for QSAX. */
16458 TCE(qsubaddx
, 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16459 TCE(qsub16
, 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16460 TCE(qsub8
, 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16461 TCE(sadd16
, 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16462 TCE(sadd8
, 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16463 TCE(sasx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16464 /* Old name for SASX. */
16465 TCE(saddsubx
, 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16466 TCE(shadd16
, 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16467 TCE(shadd8
, 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16468 TCE(shasx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16469 /* Old name for SHASX. */
16470 TCE(shaddsubx
, 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16471 TCE(shsax
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16472 /* Old name for SHSAX. */
16473 TCE(shsubaddx
, 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16474 TCE(shsub16
, 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16475 TCE(shsub8
, 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16476 TCE(ssax
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16477 /* Old name for SSAX. */
16478 TCE(ssubaddx
, 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16479 TCE(ssub16
, 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16480 TCE(ssub8
, 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16481 TCE(uadd16
, 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16482 TCE(uadd8
, 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16483 TCE(uasx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16484 /* Old name for UASX. */
16485 TCE(uaddsubx
, 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16486 TCE(uhadd16
, 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16487 TCE(uhadd8
, 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16488 TCE(uhasx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16489 /* Old name for UHASX. */
16490 TCE(uhaddsubx
, 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16491 TCE(uhsax
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16492 /* Old name for UHSAX. */
16493 TCE(uhsubaddx
, 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16494 TCE(uhsub16
, 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16495 TCE(uhsub8
, 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16496 TCE(uqadd16
, 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16497 TCE(uqadd8
, 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16498 TCE(uqasx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16499 /* Old name for UQASX. */
16500 TCE(uqaddsubx
, 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16501 TCE(uqsax
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16502 /* Old name for UQSAX. */
16503 TCE(uqsubaddx
, 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16504 TCE(uqsub16
, 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16505 TCE(uqsub8
, 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16506 TCE(usub16
, 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16507 TCE(usax
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16508 /* Old name for USAX. */
16509 TCE(usubaddx
, 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16510 TCE(usub8
, 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16511 TUF(rfeia
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16512 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
16513 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
16514 TUF(rfedb
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16515 TUF(rfefd
, 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16516 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
16517 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
16518 TUF(rfeed
, 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16519 TCE(sxtah
, 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16520 TCE(sxtab16
, 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16521 TCE(sxtab
, 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16522 TCE(sxtb16
, 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16523 TCE(uxtah
, 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16524 TCE(uxtab16
, 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16525 TCE(uxtab
, 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16526 TCE(uxtb16
, 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16527 TCE(sel
, 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16528 TCE(smlad
, 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16529 TCE(smladx
, 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16530 TCE(smlald
, 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16531 TCE(smlaldx
, 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16532 TCE(smlsd
, 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16533 TCE(smlsdx
, 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16534 TCE(smlsld
, 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16535 TCE(smlsldx
, 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16536 TCE(smmla
, 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16537 TCE(smmlar
, 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16538 TCE(smmls
, 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16539 TCE(smmlsr
, 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16540 TCE(smmul
, 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16541 TCE(smmulr
, 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16542 TCE(smuad
, 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16543 TCE(smuadx
, 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16544 TCE(smusd
, 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16545 TCE(smusdx
, 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16546 TUF(srsia
, 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
16547 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
16548 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
16549 TUF(srsdb
, 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
16550 TCE(ssat16
, 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
16551 TCE(umaal
, 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
16552 TCE(usad8
, 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16553 TCE(usada8
, 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16554 TCE(usat16
, 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
16557 #define ARM_VARIANT & arm_ext_v6k
16558 #undef THUMB_VARIANT
16559 #define THUMB_VARIANT & arm_ext_v6k
16561 tCE(yield
, 320f001
, yield
, 0, (), noargs
, t_hint
),
16562 tCE(wfe
, 320f002
, wfe
, 0, (), noargs
, t_hint
),
16563 tCE(wfi
, 320f003
, wfi
, 0, (), noargs
, t_hint
),
16564 tCE(sev
, 320f004
, sev
, 0, (), noargs
, t_hint
),
16566 #undef THUMB_VARIANT
16567 #define THUMB_VARIANT & arm_ext_v6_notm
16569 TCE(ldrexd
, 1b00f9f
, e8d0007f
, 3, (RRnpc
, oRRnpc
, RRnpcb
), ldrexd
, t_ldrexd
),
16570 TCE(strexd
, 1a00f90
, e8c00070
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
), strexd
, t_strexd
),
16572 #undef THUMB_VARIANT
16573 #define THUMB_VARIANT & arm_ext_v6t2
16575 TCE(ldrexb
, 1d00f9f
, e8d00f4f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
16576 TCE(ldrexh
, 1f00f9f
, e8d00f5f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
16577 TCE(strexb
, 1c00f90
, e8c00f40
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
16578 TCE(strexh
, 1e00f90
, e8c00f50
, 3, (RRnpc
, RRnpc
, ADDR
), strex
, rm_rd_rn
),
16579 TUF(clrex
, 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
16582 #define ARM_VARIANT & arm_ext_v6z
16584 TCE(smc
, 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
16587 #define ARM_VARIANT & arm_ext_v6t2
16589 TCE(bfc
, 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
16590 TCE(bfi
, 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
16591 TCE(sbfx
, 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
16592 TCE(ubfx
, 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
16594 TCE(mls
, 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
16595 TCE(movw
, 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
16596 TCE(movt
, 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
16597 TCE(rbit
, 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
16599 TC3(ldrht
, 03000b0
, f8300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
16600 TC3(ldrsht
, 03000f0
, f9300e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
16601 TC3(ldrsbt
, 03000d0
, f9100e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
16602 TC3(strht
, 02000b0
, f8200e00
, 2, (RR
, ADDR
), ldsttv4
, t_ldstt
),
16604 UT(cbnz
, b900
, 2, (RR
, EXP
), t_cbz
),
16605 UT(cbz
, b100
, 2, (RR
, EXP
), t_cbz
),
16607 /* ARM does not really have an IT instruction, so always allow it.
16608 The opcode is copied from Thumb in order to allow warnings in
16609 -mimplicit-it=[never | arm] modes. */
16611 #define ARM_VARIANT & arm_ext_v1
16613 TUE(it
, bf08
, bf08
, 1, (COND
), it
, t_it
),
16614 TUE(itt
, bf0c
, bf0c
, 1, (COND
), it
, t_it
),
16615 TUE(ite
, bf04
, bf04
, 1, (COND
), it
, t_it
),
16616 TUE(ittt
, bf0e
, bf0e
, 1, (COND
), it
, t_it
),
16617 TUE(itet
, bf06
, bf06
, 1, (COND
), it
, t_it
),
16618 TUE(itte
, bf0a
, bf0a
, 1, (COND
), it
, t_it
),
16619 TUE(itee
, bf02
, bf02
, 1, (COND
), it
, t_it
),
16620 TUE(itttt
, bf0f
, bf0f
, 1, (COND
), it
, t_it
),
16621 TUE(itett
, bf07
, bf07
, 1, (COND
), it
, t_it
),
16622 TUE(ittet
, bf0b
, bf0b
, 1, (COND
), it
, t_it
),
16623 TUE(iteet
, bf03
, bf03
, 1, (COND
), it
, t_it
),
16624 TUE(ittte
, bf0d
, bf0d
, 1, (COND
), it
, t_it
),
16625 TUE(itete
, bf05
, bf05
, 1, (COND
), it
, t_it
),
16626 TUE(ittee
, bf09
, bf09
, 1, (COND
), it
, t_it
),
16627 TUE(iteee
, bf01
, bf01
, 1, (COND
), it
, t_it
),
16628 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
16629 TC3(rrx
, 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
16630 TC3(rrxs
, 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
16632 /* Thumb2 only instructions. */
16634 #define ARM_VARIANT NULL
16636 TCE(addw
, 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
16637 TCE(subw
, 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
16638 TCE(orn
, 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
16639 TCE(orns
, 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
16640 TCE(tbb
, 0, e8d0f000
, 1, (TB
), 0, t_tb
),
16641 TCE(tbh
, 0, e8d0f010
, 1, (TB
), 0, t_tb
),
16643 /* Thumb-2 hardware division instructions (R and M profiles only). */
16644 #undef THUMB_VARIANT
16645 #define THUMB_VARIANT & arm_ext_div
16647 TCE(sdiv
, 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
16648 TCE(udiv
, 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
16650 /* ARM V6M/V7 instructions. */
16652 #define ARM_VARIANT & arm_ext_barrier
16653 #undef THUMB_VARIANT
16654 #define THUMB_VARIANT & arm_ext_barrier
16656 TUF(dmb
, 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
16657 TUF(dsb
, 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
16658 TUF(isb
, 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
16660 /* ARM V7 instructions. */
16662 #define ARM_VARIANT & arm_ext_v7
16663 #undef THUMB_VARIANT
16664 #define THUMB_VARIANT & arm_ext_v7
16666 TUF(pli
, 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
16667 TCE(dbg
, 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
16670 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
16672 cCE(wfs
, e200110
, 1, (RR
), rd
),
16673 cCE(rfs
, e300110
, 1, (RR
), rd
),
16674 cCE(wfc
, e400110
, 1, (RR
), rd
),
16675 cCE(rfc
, e500110
, 1, (RR
), rd
),
16677 cCL(ldfs
, c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16678 cCL(ldfd
, c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16679 cCL(ldfe
, c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16680 cCL(ldfp
, c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16682 cCL(stfs
, c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16683 cCL(stfd
, c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16684 cCL(stfe
, c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16685 cCL(stfp
, c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16687 cCL(mvfs
, e008100
, 2, (RF
, RF_IF
), rd_rm
),
16688 cCL(mvfsp
, e008120
, 2, (RF
, RF_IF
), rd_rm
),
16689 cCL(mvfsm
, e008140
, 2, (RF
, RF_IF
), rd_rm
),
16690 cCL(mvfsz
, e008160
, 2, (RF
, RF_IF
), rd_rm
),
16691 cCL(mvfd
, e008180
, 2, (RF
, RF_IF
), rd_rm
),
16692 cCL(mvfdp
, e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
16693 cCL(mvfdm
, e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
16694 cCL(mvfdz
, e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
16695 cCL(mvfe
, e088100
, 2, (RF
, RF_IF
), rd_rm
),
16696 cCL(mvfep
, e088120
, 2, (RF
, RF_IF
), rd_rm
),
16697 cCL(mvfem
, e088140
, 2, (RF
, RF_IF
), rd_rm
),
16698 cCL(mvfez
, e088160
, 2, (RF
, RF_IF
), rd_rm
),
16700 cCL(mnfs
, e108100
, 2, (RF
, RF_IF
), rd_rm
),
16701 cCL(mnfsp
, e108120
, 2, (RF
, RF_IF
), rd_rm
),
16702 cCL(mnfsm
, e108140
, 2, (RF
, RF_IF
), rd_rm
),
16703 cCL(mnfsz
, e108160
, 2, (RF
, RF_IF
), rd_rm
),
16704 cCL(mnfd
, e108180
, 2, (RF
, RF_IF
), rd_rm
),
16705 cCL(mnfdp
, e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
16706 cCL(mnfdm
, e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
16707 cCL(mnfdz
, e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
16708 cCL(mnfe
, e188100
, 2, (RF
, RF_IF
), rd_rm
),
16709 cCL(mnfep
, e188120
, 2, (RF
, RF_IF
), rd_rm
),
16710 cCL(mnfem
, e188140
, 2, (RF
, RF_IF
), rd_rm
),
16711 cCL(mnfez
, e188160
, 2, (RF
, RF_IF
), rd_rm
),
16713 cCL(abss
, e208100
, 2, (RF
, RF_IF
), rd_rm
),
16714 cCL(abssp
, e208120
, 2, (RF
, RF_IF
), rd_rm
),
16715 cCL(abssm
, e208140
, 2, (RF
, RF_IF
), rd_rm
),
16716 cCL(abssz
, e208160
, 2, (RF
, RF_IF
), rd_rm
),
16717 cCL(absd
, e208180
, 2, (RF
, RF_IF
), rd_rm
),
16718 cCL(absdp
, e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
16719 cCL(absdm
, e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
16720 cCL(absdz
, e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
16721 cCL(abse
, e288100
, 2, (RF
, RF_IF
), rd_rm
),
16722 cCL(absep
, e288120
, 2, (RF
, RF_IF
), rd_rm
),
16723 cCL(absem
, e288140
, 2, (RF
, RF_IF
), rd_rm
),
16724 cCL(absez
, e288160
, 2, (RF
, RF_IF
), rd_rm
),
16726 cCL(rnds
, e308100
, 2, (RF
, RF_IF
), rd_rm
),
16727 cCL(rndsp
, e308120
, 2, (RF
, RF_IF
), rd_rm
),
16728 cCL(rndsm
, e308140
, 2, (RF
, RF_IF
), rd_rm
),
16729 cCL(rndsz
, e308160
, 2, (RF
, RF_IF
), rd_rm
),
16730 cCL(rndd
, e308180
, 2, (RF
, RF_IF
), rd_rm
),
16731 cCL(rnddp
, e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
16732 cCL(rnddm
, e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
16733 cCL(rnddz
, e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
16734 cCL(rnde
, e388100
, 2, (RF
, RF_IF
), rd_rm
),
16735 cCL(rndep
, e388120
, 2, (RF
, RF_IF
), rd_rm
),
16736 cCL(rndem
, e388140
, 2, (RF
, RF_IF
), rd_rm
),
16737 cCL(rndez
, e388160
, 2, (RF
, RF_IF
), rd_rm
),
16739 cCL(sqts
, e408100
, 2, (RF
, RF_IF
), rd_rm
),
16740 cCL(sqtsp
, e408120
, 2, (RF
, RF_IF
), rd_rm
),
16741 cCL(sqtsm
, e408140
, 2, (RF
, RF_IF
), rd_rm
),
16742 cCL(sqtsz
, e408160
, 2, (RF
, RF_IF
), rd_rm
),
16743 cCL(sqtd
, e408180
, 2, (RF
, RF_IF
), rd_rm
),
16744 cCL(sqtdp
, e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
16745 cCL(sqtdm
, e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
16746 cCL(sqtdz
, e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
16747 cCL(sqte
, e488100
, 2, (RF
, RF_IF
), rd_rm
),
16748 cCL(sqtep
, e488120
, 2, (RF
, RF_IF
), rd_rm
),
16749 cCL(sqtem
, e488140
, 2, (RF
, RF_IF
), rd_rm
),
16750 cCL(sqtez
, e488160
, 2, (RF
, RF_IF
), rd_rm
),
16752 cCL(logs
, e508100
, 2, (RF
, RF_IF
), rd_rm
),
16753 cCL(logsp
, e508120
, 2, (RF
, RF_IF
), rd_rm
),
16754 cCL(logsm
, e508140
, 2, (RF
, RF_IF
), rd_rm
),
16755 cCL(logsz
, e508160
, 2, (RF
, RF_IF
), rd_rm
),
16756 cCL(logd
, e508180
, 2, (RF
, RF_IF
), rd_rm
),
16757 cCL(logdp
, e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
16758 cCL(logdm
, e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
16759 cCL(logdz
, e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
16760 cCL(loge
, e588100
, 2, (RF
, RF_IF
), rd_rm
),
16761 cCL(logep
, e588120
, 2, (RF
, RF_IF
), rd_rm
),
16762 cCL(logem
, e588140
, 2, (RF
, RF_IF
), rd_rm
),
16763 cCL(logez
, e588160
, 2, (RF
, RF_IF
), rd_rm
),
16765 cCL(lgns
, e608100
, 2, (RF
, RF_IF
), rd_rm
),
16766 cCL(lgnsp
, e608120
, 2, (RF
, RF_IF
), rd_rm
),
16767 cCL(lgnsm
, e608140
, 2, (RF
, RF_IF
), rd_rm
),
16768 cCL(lgnsz
, e608160
, 2, (RF
, RF_IF
), rd_rm
),
16769 cCL(lgnd
, e608180
, 2, (RF
, RF_IF
), rd_rm
),
16770 cCL(lgndp
, e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
16771 cCL(lgndm
, e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
16772 cCL(lgndz
, e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
16773 cCL(lgne
, e688100
, 2, (RF
, RF_IF
), rd_rm
),
16774 cCL(lgnep
, e688120
, 2, (RF
, RF_IF
), rd_rm
),
16775 cCL(lgnem
, e688140
, 2, (RF
, RF_IF
), rd_rm
),
16776 cCL(lgnez
, e688160
, 2, (RF
, RF_IF
), rd_rm
),
16778 cCL(exps
, e708100
, 2, (RF
, RF_IF
), rd_rm
),
16779 cCL(expsp
, e708120
, 2, (RF
, RF_IF
), rd_rm
),
16780 cCL(expsm
, e708140
, 2, (RF
, RF_IF
), rd_rm
),
16781 cCL(expsz
, e708160
, 2, (RF
, RF_IF
), rd_rm
),
16782 cCL(expd
, e708180
, 2, (RF
, RF_IF
), rd_rm
),
16783 cCL(expdp
, e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
16784 cCL(expdm
, e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
16785 cCL(expdz
, e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
16786 cCL(expe
, e788100
, 2, (RF
, RF_IF
), rd_rm
),
16787 cCL(expep
, e788120
, 2, (RF
, RF_IF
), rd_rm
),
16788 cCL(expem
, e788140
, 2, (RF
, RF_IF
), rd_rm
),
16789 cCL(expdz
, e788160
, 2, (RF
, RF_IF
), rd_rm
),
16791 cCL(sins
, e808100
, 2, (RF
, RF_IF
), rd_rm
),
16792 cCL(sinsp
, e808120
, 2, (RF
, RF_IF
), rd_rm
),
16793 cCL(sinsm
, e808140
, 2, (RF
, RF_IF
), rd_rm
),
16794 cCL(sinsz
, e808160
, 2, (RF
, RF_IF
), rd_rm
),
16795 cCL(sind
, e808180
, 2, (RF
, RF_IF
), rd_rm
),
16796 cCL(sindp
, e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
16797 cCL(sindm
, e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
16798 cCL(sindz
, e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
16799 cCL(sine
, e888100
, 2, (RF
, RF_IF
), rd_rm
),
16800 cCL(sinep
, e888120
, 2, (RF
, RF_IF
), rd_rm
),
16801 cCL(sinem
, e888140
, 2, (RF
, RF_IF
), rd_rm
),
16802 cCL(sinez
, e888160
, 2, (RF
, RF_IF
), rd_rm
),
16804 cCL(coss
, e908100
, 2, (RF
, RF_IF
), rd_rm
),
16805 cCL(cossp
, e908120
, 2, (RF
, RF_IF
), rd_rm
),
16806 cCL(cossm
, e908140
, 2, (RF
, RF_IF
), rd_rm
),
16807 cCL(cossz
, e908160
, 2, (RF
, RF_IF
), rd_rm
),
16808 cCL(cosd
, e908180
, 2, (RF
, RF_IF
), rd_rm
),
16809 cCL(cosdp
, e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
16810 cCL(cosdm
, e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
16811 cCL(cosdz
, e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
16812 cCL(cose
, e988100
, 2, (RF
, RF_IF
), rd_rm
),
16813 cCL(cosep
, e988120
, 2, (RF
, RF_IF
), rd_rm
),
16814 cCL(cosem
, e988140
, 2, (RF
, RF_IF
), rd_rm
),
16815 cCL(cosez
, e988160
, 2, (RF
, RF_IF
), rd_rm
),
16817 cCL(tans
, ea08100
, 2, (RF
, RF_IF
), rd_rm
),
16818 cCL(tansp
, ea08120
, 2, (RF
, RF_IF
), rd_rm
),
16819 cCL(tansm
, ea08140
, 2, (RF
, RF_IF
), rd_rm
),
16820 cCL(tansz
, ea08160
, 2, (RF
, RF_IF
), rd_rm
),
16821 cCL(tand
, ea08180
, 2, (RF
, RF_IF
), rd_rm
),
16822 cCL(tandp
, ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
16823 cCL(tandm
, ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
16824 cCL(tandz
, ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
16825 cCL(tane
, ea88100
, 2, (RF
, RF_IF
), rd_rm
),
16826 cCL(tanep
, ea88120
, 2, (RF
, RF_IF
), rd_rm
),
16827 cCL(tanem
, ea88140
, 2, (RF
, RF_IF
), rd_rm
),
16828 cCL(tanez
, ea88160
, 2, (RF
, RF_IF
), rd_rm
),
16830 cCL(asns
, eb08100
, 2, (RF
, RF_IF
), rd_rm
),
16831 cCL(asnsp
, eb08120
, 2, (RF
, RF_IF
), rd_rm
),
16832 cCL(asnsm
, eb08140
, 2, (RF
, RF_IF
), rd_rm
),
16833 cCL(asnsz
, eb08160
, 2, (RF
, RF_IF
), rd_rm
),
16834 cCL(asnd
, eb08180
, 2, (RF
, RF_IF
), rd_rm
),
16835 cCL(asndp
, eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
16836 cCL(asndm
, eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
16837 cCL(asndz
, eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
16838 cCL(asne
, eb88100
, 2, (RF
, RF_IF
), rd_rm
),
16839 cCL(asnep
, eb88120
, 2, (RF
, RF_IF
), rd_rm
),
16840 cCL(asnem
, eb88140
, 2, (RF
, RF_IF
), rd_rm
),
16841 cCL(asnez
, eb88160
, 2, (RF
, RF_IF
), rd_rm
),
16843 cCL(acss
, ec08100
, 2, (RF
, RF_IF
), rd_rm
),
16844 cCL(acssp
, ec08120
, 2, (RF
, RF_IF
), rd_rm
),
16845 cCL(acssm
, ec08140
, 2, (RF
, RF_IF
), rd_rm
),
16846 cCL(acssz
, ec08160
, 2, (RF
, RF_IF
), rd_rm
),
16847 cCL(acsd
, ec08180
, 2, (RF
, RF_IF
), rd_rm
),
16848 cCL(acsdp
, ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
16849 cCL(acsdm
, ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
16850 cCL(acsdz
, ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
16851 cCL(acse
, ec88100
, 2, (RF
, RF_IF
), rd_rm
),
16852 cCL(acsep
, ec88120
, 2, (RF
, RF_IF
), rd_rm
),
16853 cCL(acsem
, ec88140
, 2, (RF
, RF_IF
), rd_rm
),
16854 cCL(acsez
, ec88160
, 2, (RF
, RF_IF
), rd_rm
),
16856 cCL(atns
, ed08100
, 2, (RF
, RF_IF
), rd_rm
),
16857 cCL(atnsp
, ed08120
, 2, (RF
, RF_IF
), rd_rm
),
16858 cCL(atnsm
, ed08140
, 2, (RF
, RF_IF
), rd_rm
),
16859 cCL(atnsz
, ed08160
, 2, (RF
, RF_IF
), rd_rm
),
16860 cCL(atnd
, ed08180
, 2, (RF
, RF_IF
), rd_rm
),
16861 cCL(atndp
, ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
16862 cCL(atndm
, ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
16863 cCL(atndz
, ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
16864 cCL(atne
, ed88100
, 2, (RF
, RF_IF
), rd_rm
),
16865 cCL(atnep
, ed88120
, 2, (RF
, RF_IF
), rd_rm
),
16866 cCL(atnem
, ed88140
, 2, (RF
, RF_IF
), rd_rm
),
16867 cCL(atnez
, ed88160
, 2, (RF
, RF_IF
), rd_rm
),
16869 cCL(urds
, ee08100
, 2, (RF
, RF_IF
), rd_rm
),
16870 cCL(urdsp
, ee08120
, 2, (RF
, RF_IF
), rd_rm
),
16871 cCL(urdsm
, ee08140
, 2, (RF
, RF_IF
), rd_rm
),
16872 cCL(urdsz
, ee08160
, 2, (RF
, RF_IF
), rd_rm
),
16873 cCL(urdd
, ee08180
, 2, (RF
, RF_IF
), rd_rm
),
16874 cCL(urddp
, ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
16875 cCL(urddm
, ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
16876 cCL(urddz
, ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
16877 cCL(urde
, ee88100
, 2, (RF
, RF_IF
), rd_rm
),
16878 cCL(urdep
, ee88120
, 2, (RF
, RF_IF
), rd_rm
),
16879 cCL(urdem
, ee88140
, 2, (RF
, RF_IF
), rd_rm
),
16880 cCL(urdez
, ee88160
, 2, (RF
, RF_IF
), rd_rm
),
16882 cCL(nrms
, ef08100
, 2, (RF
, RF_IF
), rd_rm
),
16883 cCL(nrmsp
, ef08120
, 2, (RF
, RF_IF
), rd_rm
),
16884 cCL(nrmsm
, ef08140
, 2, (RF
, RF_IF
), rd_rm
),
16885 cCL(nrmsz
, ef08160
, 2, (RF
, RF_IF
), rd_rm
),
16886 cCL(nrmd
, ef08180
, 2, (RF
, RF_IF
), rd_rm
),
16887 cCL(nrmdp
, ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
16888 cCL(nrmdm
, ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
16889 cCL(nrmdz
, ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
16890 cCL(nrme
, ef88100
, 2, (RF
, RF_IF
), rd_rm
),
16891 cCL(nrmep
, ef88120
, 2, (RF
, RF_IF
), rd_rm
),
16892 cCL(nrmem
, ef88140
, 2, (RF
, RF_IF
), rd_rm
),
16893 cCL(nrmez
, ef88160
, 2, (RF
, RF_IF
), rd_rm
),
16895 cCL(adfs
, e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16896 cCL(adfsp
, e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16897 cCL(adfsm
, e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16898 cCL(adfsz
, e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16899 cCL(adfd
, e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16900 cCL(adfdp
, e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16901 cCL(adfdm
, e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16902 cCL(adfdz
, e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16903 cCL(adfe
, e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16904 cCL(adfep
, e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16905 cCL(adfem
, e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16906 cCL(adfez
, e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16908 cCL(sufs
, e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16909 cCL(sufsp
, e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16910 cCL(sufsm
, e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16911 cCL(sufsz
, e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16912 cCL(sufd
, e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16913 cCL(sufdp
, e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16914 cCL(sufdm
, e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16915 cCL(sufdz
, e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16916 cCL(sufe
, e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16917 cCL(sufep
, e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16918 cCL(sufem
, e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16919 cCL(sufez
, e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16921 cCL(rsfs
, e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16922 cCL(rsfsp
, e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16923 cCL(rsfsm
, e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16924 cCL(rsfsz
, e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16925 cCL(rsfd
, e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16926 cCL(rsfdp
, e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16927 cCL(rsfdm
, e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16928 cCL(rsfdz
, e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16929 cCL(rsfe
, e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16930 cCL(rsfep
, e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16931 cCL(rsfem
, e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16932 cCL(rsfez
, e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16934 cCL(mufs
, e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16935 cCL(mufsp
, e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16936 cCL(mufsm
, e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16937 cCL(mufsz
, e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16938 cCL(mufd
, e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16939 cCL(mufdp
, e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16940 cCL(mufdm
, e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16941 cCL(mufdz
, e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16942 cCL(mufe
, e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16943 cCL(mufep
, e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16944 cCL(mufem
, e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16945 cCL(mufez
, e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16947 cCL(dvfs
, e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16948 cCL(dvfsp
, e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16949 cCL(dvfsm
, e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16950 cCL(dvfsz
, e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16951 cCL(dvfd
, e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16952 cCL(dvfdp
, e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16953 cCL(dvfdm
, e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16954 cCL(dvfdz
, e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16955 cCL(dvfe
, e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16956 cCL(dvfep
, e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16957 cCL(dvfem
, e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16958 cCL(dvfez
, e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16960 cCL(rdfs
, e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16961 cCL(rdfsp
, e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16962 cCL(rdfsm
, e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16963 cCL(rdfsz
, e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16964 cCL(rdfd
, e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16965 cCL(rdfdp
, e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16966 cCL(rdfdm
, e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16967 cCL(rdfdz
, e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16968 cCL(rdfe
, e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16969 cCL(rdfep
, e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16970 cCL(rdfem
, e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16971 cCL(rdfez
, e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16973 cCL(pows
, e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16974 cCL(powsp
, e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16975 cCL(powsm
, e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16976 cCL(powsz
, e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16977 cCL(powd
, e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16978 cCL(powdp
, e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16979 cCL(powdm
, e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16980 cCL(powdz
, e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16981 cCL(powe
, e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16982 cCL(powep
, e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16983 cCL(powem
, e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16984 cCL(powez
, e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16986 cCL(rpws
, e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16987 cCL(rpwsp
, e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16988 cCL(rpwsm
, e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16989 cCL(rpwsz
, e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16990 cCL(rpwd
, e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16991 cCL(rpwdp
, e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16992 cCL(rpwdm
, e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16993 cCL(rpwdz
, e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16994 cCL(rpwe
, e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16995 cCL(rpwep
, e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16996 cCL(rpwem
, e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16997 cCL(rpwez
, e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
16999 cCL(rmfs
, e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17000 cCL(rmfsp
, e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17001 cCL(rmfsm
, e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17002 cCL(rmfsz
, e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17003 cCL(rmfd
, e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17004 cCL(rmfdp
, e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17005 cCL(rmfdm
, e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17006 cCL(rmfdz
, e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17007 cCL(rmfe
, e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17008 cCL(rmfep
, e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17009 cCL(rmfem
, e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17010 cCL(rmfez
, e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17012 cCL(fmls
, e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17013 cCL(fmlsp
, e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17014 cCL(fmlsm
, e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17015 cCL(fmlsz
, e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17016 cCL(fmld
, e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17017 cCL(fmldp
, e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17018 cCL(fmldm
, e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17019 cCL(fmldz
, e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17020 cCL(fmle
, e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17021 cCL(fmlep
, e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17022 cCL(fmlem
, e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17023 cCL(fmlez
, e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17025 cCL(fdvs
, ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17026 cCL(fdvsp
, ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17027 cCL(fdvsm
, ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17028 cCL(fdvsz
, ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17029 cCL(fdvd
, ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17030 cCL(fdvdp
, ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17031 cCL(fdvdm
, ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17032 cCL(fdvdz
, ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17033 cCL(fdve
, ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17034 cCL(fdvep
, ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17035 cCL(fdvem
, ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17036 cCL(fdvez
, ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17038 cCL(frds
, eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17039 cCL(frdsp
, eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17040 cCL(frdsm
, eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17041 cCL(frdsz
, eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17042 cCL(frdd
, eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17043 cCL(frddp
, eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17044 cCL(frddm
, eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17045 cCL(frddz
, eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17046 cCL(frde
, eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17047 cCL(frdep
, eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17048 cCL(frdem
, eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17049 cCL(frdez
, eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17051 cCL(pols
, ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17052 cCL(polsp
, ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17053 cCL(polsm
, ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17054 cCL(polsz
, ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17055 cCL(pold
, ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17056 cCL(poldp
, ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17057 cCL(poldm
, ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17058 cCL(poldz
, ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17059 cCL(pole
, ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17060 cCL(polep
, ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17061 cCL(polem
, ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17062 cCL(polez
, ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17064 cCE(cmf
, e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17065 C3E(cmfe
, ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17066 cCE(cnf
, eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17067 C3E(cnfe
, ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17069 cCL(flts
, e000110
, 2, (RF
, RR
), rn_rd
),
17070 cCL(fltsp
, e000130
, 2, (RF
, RR
), rn_rd
),
17071 cCL(fltsm
, e000150
, 2, (RF
, RR
), rn_rd
),
17072 cCL(fltsz
, e000170
, 2, (RF
, RR
), rn_rd
),
17073 cCL(fltd
, e000190
, 2, (RF
, RR
), rn_rd
),
17074 cCL(fltdp
, e0001b0
, 2, (RF
, RR
), rn_rd
),
17075 cCL(fltdm
, e0001d0
, 2, (RF
, RR
), rn_rd
),
17076 cCL(fltdz
, e0001f0
, 2, (RF
, RR
), rn_rd
),
17077 cCL(flte
, e080110
, 2, (RF
, RR
), rn_rd
),
17078 cCL(fltep
, e080130
, 2, (RF
, RR
), rn_rd
),
17079 cCL(fltem
, e080150
, 2, (RF
, RR
), rn_rd
),
17080 cCL(fltez
, e080170
, 2, (RF
, RR
), rn_rd
),
17082 /* The implementation of the FIX instruction is broken on some
17083 assemblers, in that it accepts a precision specifier as well as a
17084 rounding specifier, despite the fact that this is meaningless.
17085 To be more compatible, we accept it as well, though of course it
17086 does not set any bits. */
17087 cCE(fix
, e100110
, 2, (RR
, RF
), rd_rm
),
17088 cCL(fixp
, e100130
, 2, (RR
, RF
), rd_rm
),
17089 cCL(fixm
, e100150
, 2, (RR
, RF
), rd_rm
),
17090 cCL(fixz
, e100170
, 2, (RR
, RF
), rd_rm
),
17091 cCL(fixsp
, e100130
, 2, (RR
, RF
), rd_rm
),
17092 cCL(fixsm
, e100150
, 2, (RR
, RF
), rd_rm
),
17093 cCL(fixsz
, e100170
, 2, (RR
, RF
), rd_rm
),
17094 cCL(fixdp
, e100130
, 2, (RR
, RF
), rd_rm
),
17095 cCL(fixdm
, e100150
, 2, (RR
, RF
), rd_rm
),
17096 cCL(fixdz
, e100170
, 2, (RR
, RF
), rd_rm
),
17097 cCL(fixep
, e100130
, 2, (RR
, RF
), rd_rm
),
17098 cCL(fixem
, e100150
, 2, (RR
, RF
), rd_rm
),
17099 cCL(fixez
, e100170
, 2, (RR
, RF
), rd_rm
),
17101 /* Instructions that were new with the real FPA, call them V2. */
17103 #define ARM_VARIANT & fpu_fpa_ext_v2
17105 cCE(lfm
, c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17106 cCL(lfmfd
, c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17107 cCL(lfmea
, d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17108 cCE(sfm
, c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17109 cCL(sfmfd
, d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17110 cCL(sfmea
, c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17113 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17115 /* Moves and type conversions. */
17116 cCE(fcpys
, eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17117 cCE(fmrs
, e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
17118 cCE(fmsr
, e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
17119 cCE(fmstat
, ef1fa10
, 0, (), noargs
),
17120 cCE(fsitos
, eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17121 cCE(fuitos
, eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17122 cCE(ftosis
, ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17123 cCE(ftosizs
, ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17124 cCE(ftouis
, ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17125 cCE(ftouizs
, ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17126 cCE(fmrx
, ef00a10
, 2, (RR
, RVC
), rd_rn
),
17127 cCE(fmxr
, ee00a10
, 2, (RVC
, RR
), rn_rd
),
17129 /* Memory operations. */
17130 cCE(flds
, d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17131 cCE(fsts
, d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17132 cCE(fldmias
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17133 cCE(fldmfds
, c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17134 cCE(fldmdbs
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17135 cCE(fldmeas
, d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17136 cCE(fldmiax
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17137 cCE(fldmfdx
, c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17138 cCE(fldmdbx
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17139 cCE(fldmeax
, d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17140 cCE(fstmias
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17141 cCE(fstmeas
, c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17142 cCE(fstmdbs
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17143 cCE(fstmfds
, d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17144 cCE(fstmiax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17145 cCE(fstmeax
, c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17146 cCE(fstmdbx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17147 cCE(fstmfdx
, d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17149 /* Monadic operations. */
17150 cCE(fabss
, eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17151 cCE(fnegs
, eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17152 cCE(fsqrts
, eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17154 /* Dyadic operations. */
17155 cCE(fadds
, e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17156 cCE(fsubs
, e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17157 cCE(fmuls
, e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17158 cCE(fdivs
, e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17159 cCE(fmacs
, e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17160 cCE(fmscs
, e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17161 cCE(fnmuls
, e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17162 cCE(fnmacs
, e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17163 cCE(fnmscs
, e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17166 cCE(fcmps
, eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17167 cCE(fcmpzs
, eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
17168 cCE(fcmpes
, eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17169 cCE(fcmpezs
, eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
17172 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17174 /* Moves and type conversions. */
17175 cCE(fcpyd
, eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17176 cCE(fcvtds
, eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17177 cCE(fcvtsd
, eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17178 cCE(fmdhr
, e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17179 cCE(fmdlr
, e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17180 cCE(fmrdh
, e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17181 cCE(fmrdl
, e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17182 cCE(fsitod
, eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17183 cCE(fuitod
, eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17184 cCE(ftosid
, ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17185 cCE(ftosizd
, ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17186 cCE(ftouid
, ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17187 cCE(ftouizd
, ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17189 /* Memory operations. */
17190 cCE(fldd
, d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17191 cCE(fstd
, d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17192 cCE(fldmiad
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17193 cCE(fldmfdd
, c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17194 cCE(fldmdbd
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17195 cCE(fldmead
, d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17196 cCE(fstmiad
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17197 cCE(fstmead
, c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17198 cCE(fstmdbd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17199 cCE(fstmfdd
, d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17201 /* Monadic operations. */
17202 cCE(fabsd
, eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17203 cCE(fnegd
, eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17204 cCE(fsqrtd
, eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17206 /* Dyadic operations. */
17207 cCE(faddd
, e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17208 cCE(fsubd
, e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17209 cCE(fmuld
, e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17210 cCE(fdivd
, e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17211 cCE(fmacd
, e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17212 cCE(fmscd
, e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17213 cCE(fnmuld
, e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17214 cCE(fnmacd
, e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17215 cCE(fnmscd
, e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17218 cCE(fcmpd
, eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17219 cCE(fcmpzd
, eb50b40
, 1, (RVD
), vfp_dp_rd
),
17220 cCE(fcmped
, eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17221 cCE(fcmpezd
, eb50bc0
, 1, (RVD
), vfp_dp_rd
),
17224 #define ARM_VARIANT & fpu_vfp_ext_v2
17226 cCE(fmsrr
, c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
17227 cCE(fmrrs
, c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
17228 cCE(fmdrr
, c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
17229 cCE(fmrrd
, c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
17231 /* Instructions which may belong to either the Neon or VFP instruction sets.
17232 Individual encoder functions perform additional architecture checks. */
17234 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17235 #undef THUMB_VARIANT
17236 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17238 /* These mnemonics are unique to VFP. */
17239 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
17240 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
17241 nCE(vnmul
, vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17242 nCE(vnmla
, vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17243 nCE(vnmls
, vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17244 nCE(vcmp
, vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17245 nCE(vcmpe
, vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17246 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
17247 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
17248 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
17250 /* Mnemonics shared by Neon and VFP. */
17251 nCEF(vmul
, vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
17252 nCEF(vmla
, vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17253 nCEF(vmls
, vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17255 nCEF(vadd
, vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17256 nCEF(vsub
, vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17258 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17259 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17261 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17262 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17263 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17264 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17265 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17266 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17267 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17268 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17270 nCEF(vcvt
, vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
17271 nCEF(vcvtb
, vcvt
, 2, (RVS
, RVS
), neon_cvtb
),
17272 nCEF(vcvtt
, vcvt
, 2, (RVS
, RVS
), neon_cvtt
),
17275 /* NOTE: All VMOV encoding is special-cased! */
17276 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
17277 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
17279 #undef THUMB_VARIANT
17280 #define THUMB_VARIANT & fpu_neon_ext_v1
17282 #define ARM_VARIANT & fpu_neon_ext_v1
17284 /* Data processing with three registers of the same length. */
17285 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17286 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
17287 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
17288 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17289 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17290 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17291 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17292 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17293 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17294 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17295 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17296 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17297 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17298 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17299 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17300 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17301 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17302 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17303 /* If not immediate, fall back to neon_dyadic_i64_su.
17304 shl_imm should accept I8 I16 I32 I64,
17305 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17306 nUF(vshl
, vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
17307 nUF(vshlq
, vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
17308 nUF(vqshl
, vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
17309 nUF(vqshlq
, vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
17310 /* Logic ops, types optional & ignored. */
17311 nUF(vand
, vand
, 2, (RNDQ
, NILO
), neon_logic
),
17312 nUF(vandq
, vand
, 2, (RNQ
, NILO
), neon_logic
),
17313 nUF(vbic
, vbic
, 2, (RNDQ
, NILO
), neon_logic
),
17314 nUF(vbicq
, vbic
, 2, (RNQ
, NILO
), neon_logic
),
17315 nUF(vorr
, vorr
, 2, (RNDQ
, NILO
), neon_logic
),
17316 nUF(vorrq
, vorr
, 2, (RNQ
, NILO
), neon_logic
),
17317 nUF(vorn
, vorn
, 2, (RNDQ
, NILO
), neon_logic
),
17318 nUF(vornq
, vorn
, 2, (RNQ
, NILO
), neon_logic
),
17319 nUF(veor
, veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
17320 nUF(veorq
, veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
17321 /* Bitfield ops, untyped. */
17322 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17323 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17324 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17325 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17326 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17327 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17328 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17329 nUF(vabd
, vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17330 nUF(vabdq
, vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17331 nUF(vmax
, vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17332 nUF(vmaxq
, vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17333 nUF(vmin
, vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17334 nUF(vminq
, vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17335 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17336 back to neon_dyadic_if_su. */
17337 nUF(vcge
, vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17338 nUF(vcgeq
, vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17339 nUF(vcgt
, vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17340 nUF(vcgtq
, vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17341 nUF(vclt
, vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17342 nUF(vcltq
, vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17343 nUF(vcle
, vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17344 nUF(vcleq
, vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17345 /* Comparison. Type I8 I16 I32 F32. */
17346 nUF(vceq
, vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
17347 nUF(vceqq
, vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
17348 /* As above, D registers only. */
17349 nUF(vpmax
, vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17350 nUF(vpmin
, vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17351 /* Int and float variants, signedness unimportant. */
17352 nUF(vmlaq
, vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17353 nUF(vmlsq
, vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17354 nUF(vpadd
, vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
17355 /* Add/sub take types I8 I16 I32 I64 F32. */
17356 nUF(vaddq
, vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17357 nUF(vsubq
, vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17358 /* vtst takes sizes 8, 16, 32. */
17359 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
17360 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
17361 /* VMUL takes I8 I16 I32 F32 P8. */
17362 nUF(vmulq
, vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
17363 /* VQD{R}MULH takes S16 S32. */
17364 nUF(vqdmulh
, vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17365 nUF(vqdmulhq
, vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17366 nUF(vqrdmulh
, vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17367 nUF(vqrdmulhq
, vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17368 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17369 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17370 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17371 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17372 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17373 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17374 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17375 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17376 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17377 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17378 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17379 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17381 /* Two address, int/float. Types S8 S16 S32 F32. */
17382 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17383 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17385 /* Data processing with two registers and a shift amount. */
17386 /* Right shifts, and variants with rounding.
17387 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17388 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17389 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17390 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17391 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17392 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17393 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17394 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17395 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17396 /* Shift and insert. Sizes accepted 8 16 32 64. */
17397 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
17398 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
17399 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
17400 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
17401 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17402 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
17403 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
17404 /* Right shift immediate, saturating & narrowing, with rounding variants.
17405 Types accepted S16 S32 S64 U16 U32 U64. */
17406 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17407 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17408 /* As above, unsigned. Types accepted S16 S32 S64. */
17409 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17410 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17411 /* Right shift narrowing. Types accepted I16 I32 I64. */
17412 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17413 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17414 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17415 nUF(vshll
, vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
17416 /* CVT with optional immediate for fixed-point variant. */
17417 nUF(vcvtq
, vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
17419 nUF(vmvn
, vmvn
, 2, (RNDQ
, RNDQ_IMVNb
), neon_mvn
),
17420 nUF(vmvnq
, vmvn
, 2, (RNQ
, RNDQ_IMVNb
), neon_mvn
),
17422 /* Data processing, three registers of different lengths. */
17423 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17424 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
17425 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17426 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17427 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17428 /* If not scalar, fall back to neon_dyadic_long.
17429 Vector types as above, scalar types S16 S32 U16 U32. */
17430 nUF(vmlal
, vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17431 nUF(vmlsl
, vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17432 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17433 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17434 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17435 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17436 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17437 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17438 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17439 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17440 /* Saturating doubling multiplies. Types S16 S32. */
17441 nUF(vqdmlal
, vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17442 nUF(vqdmlsl
, vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17443 nUF(vqdmull
, vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17444 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17445 S16 S32 U16 U32. */
17446 nUF(vmull
, vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
17448 /* Extract. Size 8. */
17449 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
17450 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
17452 /* Two registers, miscellaneous. */
17453 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17454 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
17455 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
17456 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
17457 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
17458 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
17459 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
17460 /* Vector replicate. Sizes 8 16 32. */
17461 nCE(vdup
, vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
17462 nCE(vdupq
, vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
17463 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17464 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
17465 /* VMOVN. Types I16 I32 I64. */
17466 nUF(vmovn
, vmovn
, 2, (RND
, RNQ
), neon_movn
),
17467 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17468 nUF(vqmovn
, vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
17469 /* VQMOVUN. Types S16 S32 S64. */
17470 nUF(vqmovun
, vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
17471 /* VZIP / VUZP. Sizes 8 16 32. */
17472 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17473 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17474 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17475 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17476 /* VQABS / VQNEG. Types S8 S16 S32. */
17477 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
17478 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
17479 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
17480 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
17481 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
17482 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
17483 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
17484 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
17485 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
17486 /* Reciprocal estimates. Types U32 F32. */
17487 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
17488 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
17489 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
17490 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
17491 /* VCLS. Types S8 S16 S32. */
17492 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
17493 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
17494 /* VCLZ. Types I8 I16 I32. */
17495 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
17496 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
17497 /* VCNT. Size 8. */
17498 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
17499 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
17500 /* Two address, untyped. */
17501 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
17502 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
17503 /* VTRN. Sizes 8 16 32. */
17504 nUF(vtrn
, vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
17505 nUF(vtrnq
, vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
17507 /* Table lookup. Size 8. */
17508 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
17509 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
17511 #undef THUMB_VARIANT
17512 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
17514 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
17516 /* Neon element/structure load/store. */
17517 nUF(vld1
, vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17518 nUF(vst1
, vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17519 nUF(vld2
, vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17520 nUF(vst2
, vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17521 nUF(vld3
, vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17522 nUF(vst3
, vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17523 nUF(vld4
, vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17524 nUF(vst4
, vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17526 #undef THUMB_VARIANT
17527 #define THUMB_VARIANT & fpu_vfp_ext_v3
17529 #define ARM_VARIANT & fpu_vfp_ext_v3
17531 cCE(fconsts
, eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
17532 cCE(fconstd
, eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
17533 cCE(fshtos
, eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17534 cCE(fshtod
, eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17535 cCE(fsltos
, eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17536 cCE(fsltod
, eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17537 cCE(fuhtos
, ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17538 cCE(fuhtod
, ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17539 cCE(fultos
, ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17540 cCE(fultod
, ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17541 cCE(ftoshs
, ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17542 cCE(ftoshd
, ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17543 cCE(ftosls
, ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17544 cCE(ftosld
, ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17545 cCE(ftouhs
, ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17546 cCE(ftouhd
, ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17547 cCE(ftouls
, ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17548 cCE(ftould
, ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17550 #undef THUMB_VARIANT
17552 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
17554 cCE(mia
, e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17555 cCE(miaph
, e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17556 cCE(miabb
, e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17557 cCE(miabt
, e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17558 cCE(miatb
, e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17559 cCE(miatt
, e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17560 cCE(mar
, c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
17561 cCE(mra
, c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
17564 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
17566 cCE(tandcb
, e13f130
, 1, (RR
), iwmmxt_tandorc
),
17567 cCE(tandch
, e53f130
, 1, (RR
), iwmmxt_tandorc
),
17568 cCE(tandcw
, e93f130
, 1, (RR
), iwmmxt_tandorc
),
17569 cCE(tbcstb
, e400010
, 2, (RIWR
, RR
), rn_rd
),
17570 cCE(tbcsth
, e400050
, 2, (RIWR
, RR
), rn_rd
),
17571 cCE(tbcstw
, e400090
, 2, (RIWR
, RR
), rn_rd
),
17572 cCE(textrcb
, e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
17573 cCE(textrch
, e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
17574 cCE(textrcw
, e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
17575 cCE(textrmub
, e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17576 cCE(textrmuh
, e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17577 cCE(textrmuw
, e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17578 cCE(textrmsb
, e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17579 cCE(textrmsh
, e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17580 cCE(textrmsw
, e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17581 cCE(tinsrb
, e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17582 cCE(tinsrh
, e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17583 cCE(tinsrw
, e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17584 cCE(tmcr
, e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
17585 cCE(tmcrr
, c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
17586 cCE(tmia
, e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17587 cCE(tmiaph
, e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17588 cCE(tmiabb
, e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17589 cCE(tmiabt
, e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17590 cCE(tmiatb
, e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17591 cCE(tmiatt
, e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17592 cCE(tmovmskb
, e100030
, 2, (RR
, RIWR
), rd_rn
),
17593 cCE(tmovmskh
, e500030
, 2, (RR
, RIWR
), rd_rn
),
17594 cCE(tmovmskw
, e900030
, 2, (RR
, RIWR
), rd_rn
),
17595 cCE(tmrc
, e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
17596 cCE(tmrrc
, c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
17597 cCE(torcb
, e13f150
, 1, (RR
), iwmmxt_tandorc
),
17598 cCE(torch
, e53f150
, 1, (RR
), iwmmxt_tandorc
),
17599 cCE(torcw
, e93f150
, 1, (RR
), iwmmxt_tandorc
),
17600 cCE(waccb
, e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17601 cCE(wacch
, e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17602 cCE(waccw
, e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17603 cCE(waddbss
, e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17604 cCE(waddb
, e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17605 cCE(waddbus
, e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17606 cCE(waddhss
, e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17607 cCE(waddh
, e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17608 cCE(waddhus
, e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17609 cCE(waddwss
, eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17610 cCE(waddw
, e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17611 cCE(waddwus
, e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17612 cCE(waligni
, e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
17613 cCE(walignr0
, e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17614 cCE(walignr1
, e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17615 cCE(walignr2
, ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17616 cCE(walignr3
, eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17617 cCE(wand
, e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17618 cCE(wandn
, e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17619 cCE(wavg2b
, e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17620 cCE(wavg2br
, e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17621 cCE(wavg2h
, ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17622 cCE(wavg2hr
, ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17623 cCE(wcmpeqb
, e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17624 cCE(wcmpeqh
, e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17625 cCE(wcmpeqw
, e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17626 cCE(wcmpgtub
, e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17627 cCE(wcmpgtuh
, e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17628 cCE(wcmpgtuw
, e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17629 cCE(wcmpgtsb
, e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17630 cCE(wcmpgtsh
, e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17631 cCE(wcmpgtsw
, eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17632 cCE(wldrb
, c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17633 cCE(wldrh
, c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17634 cCE(wldrw
, c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
17635 cCE(wldrd
, c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
17636 cCE(wmacs
, e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17637 cCE(wmacsz
, e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17638 cCE(wmacu
, e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17639 cCE(wmacuz
, e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17640 cCE(wmadds
, ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17641 cCE(wmaddu
, e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17642 cCE(wmaxsb
, e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17643 cCE(wmaxsh
, e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17644 cCE(wmaxsw
, ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17645 cCE(wmaxub
, e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17646 cCE(wmaxuh
, e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17647 cCE(wmaxuw
, e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17648 cCE(wminsb
, e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17649 cCE(wminsh
, e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17650 cCE(wminsw
, eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17651 cCE(wminub
, e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17652 cCE(wminuh
, e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17653 cCE(wminuw
, e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17654 cCE(wmov
, e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
17655 cCE(wmulsm
, e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17656 cCE(wmulsl
, e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17657 cCE(wmulum
, e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17658 cCE(wmulul
, e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17659 cCE(wor
, e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17660 cCE(wpackhss
, e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17661 cCE(wpackhus
, e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17662 cCE(wpackwss
, eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17663 cCE(wpackwus
, e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17664 cCE(wpackdss
, ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17665 cCE(wpackdus
, ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17666 cCE(wrorh
, e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17667 cCE(wrorhg
, e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17668 cCE(wrorw
, eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17669 cCE(wrorwg
, eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17670 cCE(wrord
, ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17671 cCE(wrordg
, ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17672 cCE(wsadb
, e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17673 cCE(wsadbz
, e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17674 cCE(wsadh
, e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17675 cCE(wsadhz
, e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17676 cCE(wshufh
, e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
17677 cCE(wsllh
, e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17678 cCE(wsllhg
, e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17679 cCE(wsllw
, e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17680 cCE(wsllwg
, e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17681 cCE(wslld
, ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17682 cCE(wslldg
, ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17683 cCE(wsrah
, e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17684 cCE(wsrahg
, e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17685 cCE(wsraw
, e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17686 cCE(wsrawg
, e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17687 cCE(wsrad
, ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17688 cCE(wsradg
, ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17689 cCE(wsrlh
, e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17690 cCE(wsrlhg
, e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17691 cCE(wsrlw
, ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17692 cCE(wsrlwg
, ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17693 cCE(wsrld
, ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17694 cCE(wsrldg
, ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17695 cCE(wstrb
, c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17696 cCE(wstrh
, c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17697 cCE(wstrw
, c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
17698 cCE(wstrd
, c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
17699 cCE(wsubbss
, e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17700 cCE(wsubb
, e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17701 cCE(wsubbus
, e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17702 cCE(wsubhss
, e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17703 cCE(wsubh
, e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17704 cCE(wsubhus
, e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17705 cCE(wsubwss
, eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17706 cCE(wsubw
, e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17707 cCE(wsubwus
, e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17708 cCE(wunpckehub
,e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17709 cCE(wunpckehuh
,e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17710 cCE(wunpckehuw
,e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17711 cCE(wunpckehsb
,e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17712 cCE(wunpckehsh
,e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17713 cCE(wunpckehsw
,ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
17714 cCE(wunpckihb
, e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17715 cCE(wunpckihh
, e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17716 cCE(wunpckihw
, e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17717 cCE(wunpckelub
,e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17718 cCE(wunpckeluh
,e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17719 cCE(wunpckeluw
,e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17720 cCE(wunpckelsb
,e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17721 cCE(wunpckelsh
,e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17722 cCE(wunpckelsw
,ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
17723 cCE(wunpckilb
, e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17724 cCE(wunpckilh
, e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17725 cCE(wunpckilw
, e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17726 cCE(wxor
, e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17727 cCE(wzero
, e300000
, 1, (RIWR
), iwmmxt_wzero
),
17730 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
17732 cCE(torvscb
, e12f190
, 1, (RR
), iwmmxt_tandorc
),
17733 cCE(torvsch
, e52f190
, 1, (RR
), iwmmxt_tandorc
),
17734 cCE(torvscw
, e92f190
, 1, (RR
), iwmmxt_tandorc
),
17735 cCE(wabsb
, e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17736 cCE(wabsh
, e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17737 cCE(wabsw
, ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17738 cCE(wabsdiffb
, e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17739 cCE(wabsdiffh
, e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17740 cCE(wabsdiffw
, e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17741 cCE(waddbhusl
, e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17742 cCE(waddbhusm
, e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17743 cCE(waddhc
, e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17744 cCE(waddwc
, ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17745 cCE(waddsubhx
, ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17746 cCE(wavg4
, e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17747 cCE(wavg4r
, e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17748 cCE(wmaddsn
, ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17749 cCE(wmaddsx
, eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17750 cCE(wmaddun
, ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17751 cCE(wmaddux
, e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17752 cCE(wmerge
, e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
17753 cCE(wmiabb
, e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17754 cCE(wmiabt
, e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17755 cCE(wmiatb
, e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17756 cCE(wmiatt
, e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17757 cCE(wmiabbn
, e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17758 cCE(wmiabtn
, e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17759 cCE(wmiatbn
, e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17760 cCE(wmiattn
, e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17761 cCE(wmiawbb
, e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17762 cCE(wmiawbt
, e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17763 cCE(wmiawtb
, ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17764 cCE(wmiawtt
, eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17765 cCE(wmiawbbn
, ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17766 cCE(wmiawbtn
, ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17767 cCE(wmiawtbn
, ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17768 cCE(wmiawttn
, ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17769 cCE(wmulsmr
, ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17770 cCE(wmulumr
, ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17771 cCE(wmulwumr
, ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17772 cCE(wmulwsmr
, ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17773 cCE(wmulwum
, ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17774 cCE(wmulwsm
, ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17775 cCE(wmulwl
, eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17776 cCE(wqmiabb
, e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17777 cCE(wqmiabt
, e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17778 cCE(wqmiatb
, ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17779 cCE(wqmiatt
, eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17780 cCE(wqmiabbn
, ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17781 cCE(wqmiabtn
, ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17782 cCE(wqmiatbn
, ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17783 cCE(wqmiattn
, ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17784 cCE(wqmulm
, e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17785 cCE(wqmulmr
, e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17786 cCE(wqmulwm
, ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17787 cCE(wqmulwmr
, ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17788 cCE(wsubaddhx
, ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17791 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
17793 cCE(cfldrs
, c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
17794 cCE(cfldrd
, c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
17795 cCE(cfldr32
, c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
17796 cCE(cfldr64
, c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
17797 cCE(cfstrs
, c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
17798 cCE(cfstrd
, c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
17799 cCE(cfstr32
, c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
17800 cCE(cfstr64
, c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
17801 cCE(cfmvsr
, e000450
, 2, (RMF
, RR
), rn_rd
),
17802 cCE(cfmvrs
, e100450
, 2, (RR
, RMF
), rd_rn
),
17803 cCE(cfmvdlr
, e000410
, 2, (RMD
, RR
), rn_rd
),
17804 cCE(cfmvrdl
, e100410
, 2, (RR
, RMD
), rd_rn
),
17805 cCE(cfmvdhr
, e000430
, 2, (RMD
, RR
), rn_rd
),
17806 cCE(cfmvrdh
, e100430
, 2, (RR
, RMD
), rd_rn
),
17807 cCE(cfmv64lr
, e000510
, 2, (RMDX
, RR
), rn_rd
),
17808 cCE(cfmvr64l
, e100510
, 2, (RR
, RMDX
), rd_rn
),
17809 cCE(cfmv64hr
, e000530
, 2, (RMDX
, RR
), rn_rd
),
17810 cCE(cfmvr64h
, e100530
, 2, (RR
, RMDX
), rd_rn
),
17811 cCE(cfmval32
, e200440
, 2, (RMAX
, RMFX
), rd_rn
),
17812 cCE(cfmv32al
, e100440
, 2, (RMFX
, RMAX
), rd_rn
),
17813 cCE(cfmvam32
, e200460
, 2, (RMAX
, RMFX
), rd_rn
),
17814 cCE(cfmv32am
, e100460
, 2, (RMFX
, RMAX
), rd_rn
),
17815 cCE(cfmvah32
, e200480
, 2, (RMAX
, RMFX
), rd_rn
),
17816 cCE(cfmv32ah
, e100480
, 2, (RMFX
, RMAX
), rd_rn
),
17817 cCE(cfmva32
, e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
17818 cCE(cfmv32a
, e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
17819 cCE(cfmva64
, e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
17820 cCE(cfmv64a
, e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
17821 cCE(cfmvsc32
, e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
17822 cCE(cfmv32sc
, e1004e0
, 2, (RMDX
, RMDS
), rd
),
17823 cCE(cfcpys
, e000400
, 2, (RMF
, RMF
), rd_rn
),
17824 cCE(cfcpyd
, e000420
, 2, (RMD
, RMD
), rd_rn
),
17825 cCE(cfcvtsd
, e000460
, 2, (RMD
, RMF
), rd_rn
),
17826 cCE(cfcvtds
, e000440
, 2, (RMF
, RMD
), rd_rn
),
17827 cCE(cfcvt32s
, e000480
, 2, (RMF
, RMFX
), rd_rn
),
17828 cCE(cfcvt32d
, e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
17829 cCE(cfcvt64s
, e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
17830 cCE(cfcvt64d
, e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
17831 cCE(cfcvts32
, e100580
, 2, (RMFX
, RMF
), rd_rn
),
17832 cCE(cfcvtd32
, e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
17833 cCE(cftruncs32
,e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
17834 cCE(cftruncd32
,e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
17835 cCE(cfrshl32
, e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
17836 cCE(cfrshl64
, e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
17837 cCE(cfsh32
, e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
17838 cCE(cfsh64
, e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
17839 cCE(cfcmps
, e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
17840 cCE(cfcmpd
, e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
17841 cCE(cfcmp32
, e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
17842 cCE(cfcmp64
, e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
17843 cCE(cfabss
, e300400
, 2, (RMF
, RMF
), rd_rn
),
17844 cCE(cfabsd
, e300420
, 2, (RMD
, RMD
), rd_rn
),
17845 cCE(cfnegs
, e300440
, 2, (RMF
, RMF
), rd_rn
),
17846 cCE(cfnegd
, e300460
, 2, (RMD
, RMD
), rd_rn
),
17847 cCE(cfadds
, e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17848 cCE(cfaddd
, e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17849 cCE(cfsubs
, e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17850 cCE(cfsubd
, e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17851 cCE(cfmuls
, e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
17852 cCE(cfmuld
, e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
17853 cCE(cfabs32
, e300500
, 2, (RMFX
, RMFX
), rd_rn
),
17854 cCE(cfabs64
, e300520
, 2, (RMDX
, RMDX
), rd_rn
),
17855 cCE(cfneg32
, e300540
, 2, (RMFX
, RMFX
), rd_rn
),
17856 cCE(cfneg64
, e300560
, 2, (RMDX
, RMDX
), rd_rn
),
17857 cCE(cfadd32
, e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17858 cCE(cfadd64
, e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17859 cCE(cfsub32
, e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17860 cCE(cfsub64
, e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17861 cCE(cfmul32
, e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17862 cCE(cfmul64
, e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
17863 cCE(cfmac32
, e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17864 cCE(cfmsc32
, e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
17865 cCE(cfmadd32
, e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
17866 cCE(cfmsub32
, e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
17867 cCE(cfmadda32
, e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
17868 cCE(cfmsuba32
, e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
17871 #undef THUMB_VARIANT
17898 /* MD interface: bits in the object file. */
17900 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
17901 for use in the a.out file, and stores them in the array pointed to by buf.
17902 This knows about the endian-ness of the target machine and does
17903 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
17904 2 (short) and 4 (long) Floating numbers are put out as a series of
17905 LITTLENUMS (shorts, here at least). */
17908 md_number_to_chars (char * buf
, valueT val
, int n
)
17910 if (target_big_endian
)
17911 number_to_chars_bigendian (buf
, val
, n
);
17913 number_to_chars_littleendian (buf
, val
, n
);
17917 md_chars_to_number (char * buf
, int n
)
17920 unsigned char * where
= (unsigned char *) buf
;
17922 if (target_big_endian
)
17927 result
|= (*where
++ & 255);
17935 result
|= (where
[n
] & 255);
17942 /* MD interface: Sections. */
17944 /* Estimate the size of a frag before relaxing. Assume everything fits in
17948 md_estimate_size_before_relax (fragS
* fragp
,
17949 segT segtype ATTRIBUTE_UNUSED
)
17955 /* Convert a machine dependent frag. */
17958 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
17960 unsigned long insn
;
17961 unsigned long old_op
;
17969 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
17971 old_op
= bfd_get_16(abfd
, buf
);
17972 if (fragp
->fr_symbol
)
17974 exp
.X_op
= O_symbol
;
17975 exp
.X_add_symbol
= fragp
->fr_symbol
;
17979 exp
.X_op
= O_constant
;
17981 exp
.X_add_number
= fragp
->fr_offset
;
17982 opcode
= fragp
->fr_subtype
;
17985 case T_MNEM_ldr_pc
:
17986 case T_MNEM_ldr_pc2
:
17987 case T_MNEM_ldr_sp
:
17988 case T_MNEM_str_sp
:
17995 if (fragp
->fr_var
== 4)
17997 insn
= THUMB_OP32 (opcode
);
17998 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
18000 insn
|= (old_op
& 0x700) << 4;
18004 insn
|= (old_op
& 7) << 12;
18005 insn
|= (old_op
& 0x38) << 13;
18007 insn
|= 0x00000c00;
18008 put_thumb32_insn (buf
, insn
);
18009 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
18013 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
18015 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
18018 if (fragp
->fr_var
== 4)
18020 insn
= THUMB_OP32 (opcode
);
18021 insn
|= (old_op
& 0xf0) << 4;
18022 put_thumb32_insn (buf
, insn
);
18023 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
18027 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18028 exp
.X_add_number
-= 4;
18036 if (fragp
->fr_var
== 4)
18038 int r0off
= (opcode
== T_MNEM_mov
18039 || opcode
== T_MNEM_movs
) ? 0 : 8;
18040 insn
= THUMB_OP32 (opcode
);
18041 insn
= (insn
& 0xe1ffffff) | 0x10000000;
18042 insn
|= (old_op
& 0x700) << r0off
;
18043 put_thumb32_insn (buf
, insn
);
18044 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18048 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
18053 if (fragp
->fr_var
== 4)
18055 insn
= THUMB_OP32(opcode
);
18056 put_thumb32_insn (buf
, insn
);
18057 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
18060 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
18064 if (fragp
->fr_var
== 4)
18066 insn
= THUMB_OP32(opcode
);
18067 insn
|= (old_op
& 0xf00) << 14;
18068 put_thumb32_insn (buf
, insn
);
18069 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
18072 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
18075 case T_MNEM_add_sp
:
18076 case T_MNEM_add_pc
:
18077 case T_MNEM_inc_sp
:
18078 case T_MNEM_dec_sp
:
18079 if (fragp
->fr_var
== 4)
18081 /* ??? Choose between add and addw. */
18082 insn
= THUMB_OP32 (opcode
);
18083 insn
|= (old_op
& 0xf0) << 4;
18084 put_thumb32_insn (buf
, insn
);
18085 if (opcode
== T_MNEM_add_pc
)
18086 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
18088 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18091 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18099 if (fragp
->fr_var
== 4)
18101 insn
= THUMB_OP32 (opcode
);
18102 insn
|= (old_op
& 0xf0) << 4;
18103 insn
|= (old_op
& 0xf) << 16;
18104 put_thumb32_insn (buf
, insn
);
18105 if (insn
& (1 << 20))
18106 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18108 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18111 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18117 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
18119 fixp
->fx_file
= fragp
->fr_file
;
18120 fixp
->fx_line
= fragp
->fr_line
;
18121 fragp
->fr_fix
+= fragp
->fr_var
;
18124 /* Return the size of a relaxable immediate operand instruction.
18125 SHIFT and SIZE specify the form of the allowable immediate. */
18127 relax_immediate (fragS
*fragp
, int size
, int shift
)
18133 /* ??? Should be able to do better than this. */
18134 if (fragp
->fr_symbol
)
18137 low
= (1 << shift
) - 1;
18138 mask
= (1 << (shift
+ size
)) - (1 << shift
);
18139 offset
= fragp
->fr_offset
;
18140 /* Force misaligned offsets to 32-bit variant. */
18143 if (offset
& ~mask
)
18148 /* Get the address of a symbol during relaxation. */
18150 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
18156 sym
= fragp
->fr_symbol
;
18157 sym_frag
= symbol_get_frag (sym
);
18158 know (S_GET_SEGMENT (sym
) != absolute_section
18159 || sym_frag
== &zero_address_frag
);
18160 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
18162 /* If frag has yet to be reached on this pass, assume it will
18163 move by STRETCH just as we did. If this is not so, it will
18164 be because some frag between grows, and that will force
18168 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
18172 /* Adjust stretch for any alignment frag. Note that if have
18173 been expanding the earlier code, the symbol may be
18174 defined in what appears to be an earlier frag. FIXME:
18175 This doesn't handle the fr_subtype field, which specifies
18176 a maximum number of bytes to skip when doing an
18178 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
18180 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
18183 stretch
= - ((- stretch
)
18184 & ~ ((1 << (int) f
->fr_offset
) - 1));
18186 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
18198 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18201 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
18206 /* Assume worst case for symbols not known to be in the same section. */
18207 if (!S_IS_DEFINED (fragp
->fr_symbol
)
18208 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
18211 val
= relaxed_symbol_addr (fragp
, stretch
);
18212 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
18213 addr
= (addr
+ 4) & ~3;
18214 /* Force misaligned targets to 32-bit variant. */
18218 if (val
< 0 || val
> 1020)
18223 /* Return the size of a relaxable add/sub immediate instruction. */
18225 relax_addsub (fragS
*fragp
, asection
*sec
)
18230 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
18231 op
= bfd_get_16(sec
->owner
, buf
);
18232 if ((op
& 0xf) == ((op
>> 4) & 0xf))
18233 return relax_immediate (fragp
, 8, 0);
18235 return relax_immediate (fragp
, 3, 0);
18239 /* Return the size of a relaxable branch instruction. BITS is the
18240 size of the offset field in the narrow instruction. */
18243 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
18249 /* Assume worst case for symbols not known to be in the same section. */
18250 if (!S_IS_DEFINED (fragp
->fr_symbol
)
18251 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
18255 if (S_IS_DEFINED (fragp
->fr_symbol
)
18256 && ARM_IS_FUNC (fragp
->fr_symbol
))
18260 val
= relaxed_symbol_addr (fragp
, stretch
);
18261 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
18264 /* Offset is a signed value *2 */
18266 if (val
>= limit
|| val
< -limit
)
18272 /* Relax a machine dependent frag. This returns the amount by which
18273 the current size of the frag should change. */
18276 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
18281 oldsize
= fragp
->fr_var
;
18282 switch (fragp
->fr_subtype
)
18284 case T_MNEM_ldr_pc2
:
18285 newsize
= relax_adr (fragp
, sec
, stretch
);
18287 case T_MNEM_ldr_pc
:
18288 case T_MNEM_ldr_sp
:
18289 case T_MNEM_str_sp
:
18290 newsize
= relax_immediate (fragp
, 8, 2);
18294 newsize
= relax_immediate (fragp
, 5, 2);
18298 newsize
= relax_immediate (fragp
, 5, 1);
18302 newsize
= relax_immediate (fragp
, 5, 0);
18305 newsize
= relax_adr (fragp
, sec
, stretch
);
18311 newsize
= relax_immediate (fragp
, 8, 0);
18314 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
18317 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
18319 case T_MNEM_add_sp
:
18320 case T_MNEM_add_pc
:
18321 newsize
= relax_immediate (fragp
, 8, 2);
18323 case T_MNEM_inc_sp
:
18324 case T_MNEM_dec_sp
:
18325 newsize
= relax_immediate (fragp
, 7, 2);
18331 newsize
= relax_addsub (fragp
, sec
);
18337 fragp
->fr_var
= newsize
;
18338 /* Freeze wide instructions that are at or before the same location as
18339 in the previous pass. This avoids infinite loops.
18340 Don't freeze them unconditionally because targets may be artificially
18341 misaligned by the expansion of preceding frags. */
18342 if (stretch
<= 0 && newsize
> 2)
18344 md_convert_frag (sec
->owner
, sec
, fragp
);
18348 return newsize
- oldsize
;
18351 /* Round up a section size to the appropriate boundary. */
18354 md_section_align (segT segment ATTRIBUTE_UNUSED
,
18357 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18358 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
18360 /* For a.out, force the section size to be aligned. If we don't do
18361 this, BFD will align it for us, but it will not write out the
18362 final bytes of the section. This may be a bug in BFD, but it is
18363 easier to fix it here since that is how the other a.out targets
18367 align
= bfd_get_section_alignment (stdoutput
, segment
);
18368 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
18375 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18376 of an rs_align_code fragment. */
18379 arm_handle_align (fragS
* fragP
)
18381 static char const arm_noop
[2][2][4] =
18384 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18385 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18388 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18389 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18392 static char const thumb_noop
[2][2][2] =
18395 {0xc0, 0x46}, /* LE */
18396 {0x46, 0xc0}, /* BE */
18399 {0x00, 0xbf}, /* LE */
18400 {0xbf, 0x00} /* BE */
18403 static char const wide_thumb_noop
[2][4] =
18404 { /* Wide Thumb-2 */
18405 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18406 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18409 unsigned bytes
, fix
, noop_size
;
18412 const char *narrow_noop
= NULL
;
18417 if (fragP
->fr_type
!= rs_align_code
)
18420 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
18421 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
18424 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18425 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
18428 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
18431 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
18433 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
18435 narrow_noop
= thumb_noop
[1][target_big_endian
];
18436 noop
= wide_thumb_noop
[target_big_endian
];
18439 noop
= thumb_noop
[0][target_big_endian
];
18447 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
) != 0]
18448 [target_big_endian
];
18455 fragP
->fr_var
= noop_size
;
18457 if (bytes
& (noop_size
- 1))
18459 fix
= bytes
& (noop_size
- 1);
18461 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
18463 memset (p
, 0, fix
);
18470 if (bytes
& noop_size
)
18472 /* Insert a narrow noop. */
18473 memcpy (p
, narrow_noop
, noop_size
);
18475 bytes
-= noop_size
;
18479 /* Use wide noops for the remainder */
18483 while (bytes
>= noop_size
)
18485 memcpy (p
, noop
, noop_size
);
18487 bytes
-= noop_size
;
18491 fragP
->fr_fix
+= fix
;
18494 /* Called from md_do_align. Used to create an alignment
18495 frag in a code section. */
18498 arm_frag_align_code (int n
, int max
)
18502 /* We assume that there will never be a requirement
18503 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
18504 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18509 _("alignments greater than %d bytes not supported in .text sections."),
18510 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
18511 as_fatal ("%s", err_msg
);
18514 p
= frag_var (rs_align_code
,
18515 MAX_MEM_FOR_RS_ALIGN_CODE
,
18517 (relax_substateT
) max
,
18524 /* Perform target specific initialisation of a frag.
18525 Note - despite the name this initialisation is not done when the frag
18526 is created, but only when its type is assigned. A frag can be created
18527 and used a long time before its type is set, so beware of assuming that
18528 this initialisationis performed first. */
18532 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
18534 /* Record whether this frag is in an ARM or a THUMB area. */
18535 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
;
18538 #else /* OBJ_ELF is defined. */
18540 arm_init_frag (fragS
* fragP
, int max_chars
)
18542 /* If the current ARM vs THUMB mode has not already
18543 been recorded into this frag then do so now. */
18544 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
18546 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18548 /* Record a mapping symbol for alignment frags. We will delete this
18549 later if the alignment ends up empty. */
18550 switch (fragP
->fr_type
)
18553 case rs_align_test
:
18555 mapping_state_2 (MAP_DATA
, max_chars
);
18557 case rs_align_code
:
18558 mapping_state_2 (thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
18566 /* When we change sections we need to issue a new mapping symbol. */
18569 arm_elf_change_section (void)
18571 /* Link an unlinked unwind index table section to the .text section. */
18572 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
18573 && elf_linked_to_section (now_seg
) == NULL
)
18574 elf_linked_to_section (now_seg
) = text_section
;
18578 arm_elf_section_type (const char * str
, size_t len
)
18580 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
18581 return SHT_ARM_EXIDX
;
18586 /* Code to deal with unwinding tables. */
18588 static void add_unwind_adjustsp (offsetT
);
18590 /* Generate any deferred unwind frame offset. */
18593 flush_pending_unwind (void)
18597 offset
= unwind
.pending_offset
;
18598 unwind
.pending_offset
= 0;
18600 add_unwind_adjustsp (offset
);
18603 /* Add an opcode to this list for this function. Two-byte opcodes should
18604 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
18608 add_unwind_opcode (valueT op
, int length
)
18610 /* Add any deferred stack adjustment. */
18611 if (unwind
.pending_offset
)
18612 flush_pending_unwind ();
18614 unwind
.sp_restored
= 0;
18616 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
18618 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
18619 if (unwind
.opcodes
)
18620 unwind
.opcodes
= xrealloc (unwind
.opcodes
,
18621 unwind
.opcode_alloc
);
18623 unwind
.opcodes
= xmalloc (unwind
.opcode_alloc
);
18628 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
18630 unwind
.opcode_count
++;
18634 /* Add unwind opcodes to adjust the stack pointer. */
18637 add_unwind_adjustsp (offsetT offset
)
18641 if (offset
> 0x200)
18643 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
18648 /* Long form: 0xb2, uleb128. */
18649 /* This might not fit in a word so add the individual bytes,
18650 remembering the list is built in reverse order. */
18651 o
= (valueT
) ((offset
- 0x204) >> 2);
18653 add_unwind_opcode (0, 1);
18655 /* Calculate the uleb128 encoding of the offset. */
18659 bytes
[n
] = o
& 0x7f;
18665 /* Add the insn. */
18667 add_unwind_opcode (bytes
[n
- 1], 1);
18668 add_unwind_opcode (0xb2, 1);
18670 else if (offset
> 0x100)
18672 /* Two short opcodes. */
18673 add_unwind_opcode (0x3f, 1);
18674 op
= (offset
- 0x104) >> 2;
18675 add_unwind_opcode (op
, 1);
18677 else if (offset
> 0)
18679 /* Short opcode. */
18680 op
= (offset
- 4) >> 2;
18681 add_unwind_opcode (op
, 1);
18683 else if (offset
< 0)
18686 while (offset
> 0x100)
18688 add_unwind_opcode (0x7f, 1);
18691 op
= ((offset
- 4) >> 2) | 0x40;
18692 add_unwind_opcode (op
, 1);
18696 /* Finish the list of unwind opcodes for this function. */
18698 finish_unwind_opcodes (void)
18702 if (unwind
.fp_used
)
18704 /* Adjust sp as necessary. */
18705 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
18706 flush_pending_unwind ();
18708 /* After restoring sp from the frame pointer. */
18709 op
= 0x90 | unwind
.fp_reg
;
18710 add_unwind_opcode (op
, 1);
18713 flush_pending_unwind ();
18717 /* Start an exception table entry. If idx is nonzero this is an index table
18721 start_unwind_section (const segT text_seg
, int idx
)
18723 const char * text_name
;
18724 const char * prefix
;
18725 const char * prefix_once
;
18726 const char * group_name
;
18730 size_t sec_name_len
;
18737 prefix
= ELF_STRING_ARM_unwind
;
18738 prefix_once
= ELF_STRING_ARM_unwind_once
;
18739 type
= SHT_ARM_EXIDX
;
18743 prefix
= ELF_STRING_ARM_unwind_info
;
18744 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
18745 type
= SHT_PROGBITS
;
18748 text_name
= segment_name (text_seg
);
18749 if (streq (text_name
, ".text"))
18752 if (strncmp (text_name
, ".gnu.linkonce.t.",
18753 strlen (".gnu.linkonce.t.")) == 0)
18755 prefix
= prefix_once
;
18756 text_name
+= strlen (".gnu.linkonce.t.");
18759 prefix_len
= strlen (prefix
);
18760 text_len
= strlen (text_name
);
18761 sec_name_len
= prefix_len
+ text_len
;
18762 sec_name
= xmalloc (sec_name_len
+ 1);
18763 memcpy (sec_name
, prefix
, prefix_len
);
18764 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
18765 sec_name
[prefix_len
+ text_len
] = '\0';
18771 /* Handle COMDAT group. */
18772 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
18774 group_name
= elf_group_name (text_seg
);
18775 if (group_name
== NULL
)
18777 as_bad (_("Group section `%s' has no group signature"),
18778 segment_name (text_seg
));
18779 ignore_rest_of_line ();
18782 flags
|= SHF_GROUP
;
18786 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
18788 /* Set the section link for index tables. */
18790 elf_linked_to_section (now_seg
) = text_seg
;
18794 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
18795 personality routine data. Returns zero, or the index table value for
18796 and inline entry. */
18799 create_unwind_entry (int have_data
)
18804 /* The current word of data. */
18806 /* The number of bytes left in this word. */
18809 finish_unwind_opcodes ();
18811 /* Remember the current text section. */
18812 unwind
.saved_seg
= now_seg
;
18813 unwind
.saved_subseg
= now_subseg
;
18815 start_unwind_section (now_seg
, 0);
18817 if (unwind
.personality_routine
== NULL
)
18819 if (unwind
.personality_index
== -2)
18822 as_bad (_("handlerdata in cantunwind frame"));
18823 return 1; /* EXIDX_CANTUNWIND. */
18826 /* Use a default personality routine if none is specified. */
18827 if (unwind
.personality_index
== -1)
18829 if (unwind
.opcode_count
> 3)
18830 unwind
.personality_index
= 1;
18832 unwind
.personality_index
= 0;
18835 /* Space for the personality routine entry. */
18836 if (unwind
.personality_index
== 0)
18838 if (unwind
.opcode_count
> 3)
18839 as_bad (_("too many unwind opcodes for personality routine 0"));
18843 /* All the data is inline in the index table. */
18846 while (unwind
.opcode_count
> 0)
18848 unwind
.opcode_count
--;
18849 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
18853 /* Pad with "finish" opcodes. */
18855 data
= (data
<< 8) | 0xb0;
18862 /* We get two opcodes "free" in the first word. */
18863 size
= unwind
.opcode_count
- 2;
18866 /* An extra byte is required for the opcode count. */
18867 size
= unwind
.opcode_count
+ 1;
18869 size
= (size
+ 3) >> 2;
18871 as_bad (_("too many unwind opcodes"));
18873 frag_align (2, 0, 0);
18874 record_alignment (now_seg
, 2);
18875 unwind
.table_entry
= expr_build_dot ();
18877 /* Allocate the table entry. */
18878 ptr
= frag_more ((size
<< 2) + 4);
18879 where
= frag_now_fix () - ((size
<< 2) + 4);
18881 switch (unwind
.personality_index
)
18884 /* ??? Should this be a PLT generating relocation? */
18885 /* Custom personality routine. */
18886 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
18887 BFD_RELOC_ARM_PREL31
);
18892 /* Set the first byte to the number of additional words. */
18897 /* ABI defined personality routines. */
18899 /* Three opcodes bytes are packed into the first word. */
18906 /* The size and first two opcode bytes go in the first word. */
18907 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
18912 /* Should never happen. */
18916 /* Pack the opcodes into words (MSB first), reversing the list at the same
18918 while (unwind
.opcode_count
> 0)
18922 md_number_to_chars (ptr
, data
, 4);
18927 unwind
.opcode_count
--;
18929 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
18932 /* Finish off the last word. */
18935 /* Pad with "finish" opcodes. */
18937 data
= (data
<< 8) | 0xb0;
18939 md_number_to_chars (ptr
, data
, 4);
18944 /* Add an empty descriptor if there is no user-specified data. */
18945 ptr
= frag_more (4);
18946 md_number_to_chars (ptr
, 0, 4);
18953 /* Initialize the DWARF-2 unwind information for this procedure. */
18956 tc_arm_frame_initial_instructions (void)
18958 cfi_add_CFA_def_cfa (REG_SP
, 0);
18960 #endif /* OBJ_ELF */
18962 /* Convert REGNAME to a DWARF-2 register number. */
18965 tc_arm_regname_to_dw2regnum (char *regname
)
18967 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
18977 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
18981 expr
.X_op
= O_secrel
;
18982 expr
.X_add_symbol
= symbol
;
18983 expr
.X_add_number
= 0;
18984 emit_expr (&expr
, size
);
18988 /* MD interface: Symbol and relocation handling. */
18990 /* Return the address within the segment that a PC-relative fixup is
18991 relative to. For ARM, PC-relative fixups applied to instructions
18992 are generally relative to the location of the fixup plus 8 bytes.
18993 Thumb branches are offset by 4, and Thumb loads relative to PC
18994 require special handling. */
18997 md_pcrel_from_section (fixS
* fixP
, segT seg
)
18999 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19001 /* If this is pc-relative and we are going to emit a relocation
19002 then we just want to put out any pipeline compensation that the linker
19003 will need. Otherwise we want to use the calculated base.
19004 For WinCE we skip the bias for externals as well, since this
19005 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19007 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19008 || (arm_force_relocation (fixP
)
19010 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19016 switch (fixP
->fx_r_type
)
19018 /* PC relative addressing on the Thumb is slightly odd as the
19019 bottom two bits of the PC are forced to zero for the
19020 calculation. This happens *after* application of the
19021 pipeline offset. However, Thumb adrl already adjusts for
19022 this, so we need not do it again. */
19023 case BFD_RELOC_ARM_THUMB_ADD
:
19026 case BFD_RELOC_ARM_THUMB_OFFSET
:
19027 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
19028 case BFD_RELOC_ARM_T32_ADD_PC12
:
19029 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
19030 return (base
+ 4) & ~3;
19032 /* Thumb branches are simply offset by +4. */
19033 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
19034 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
19035 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
19036 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19037 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19040 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19042 && ARM_IS_FUNC (fixP
->fx_addsy
)
19043 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19044 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19047 /* BLX is like branches above, but forces the low two bits of PC to
19049 case BFD_RELOC_THUMB_PCREL_BLX
:
19051 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19052 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19053 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19054 return (base
+ 4) & ~3;
19056 /* ARM mode branches are offset by +8. However, the Windows CE
19057 loader expects the relocation not to take this into account. */
19058 case BFD_RELOC_ARM_PCREL_BLX
:
19060 && ARM_IS_FUNC (fixP
->fx_addsy
)
19061 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19062 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19065 case BFD_RELOC_ARM_PCREL_CALL
:
19067 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19068 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19069 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19072 case BFD_RELOC_ARM_PCREL_BRANCH
:
19073 case BFD_RELOC_ARM_PCREL_JUMP
:
19074 case BFD_RELOC_ARM_PLT32
:
19076 /* When handling fixups immediately, because we have already
19077 discovered the value of a symbol, or the address of the frag involved
19078 we must account for the offset by +8, as the OS loader will never see the reloc.
19079 see fixup_segment() in write.c
19080 The S_IS_EXTERNAL test handles the case of global symbols.
19081 Those need the calculated base, not just the pipe compensation the linker will need. */
19083 && fixP
->fx_addsy
!= NULL
19084 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19085 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
19093 /* ARM mode loads relative to PC are also offset by +8. Unlike
19094 branches, the Windows CE loader *does* expect the relocation
19095 to take this into account. */
19096 case BFD_RELOC_ARM_OFFSET_IMM
:
19097 case BFD_RELOC_ARM_OFFSET_IMM8
:
19098 case BFD_RELOC_ARM_HWLITERAL
:
19099 case BFD_RELOC_ARM_LITERAL
:
19100 case BFD_RELOC_ARM_CP_OFF_IMM
:
19104 /* Other PC-relative relocations are un-offset. */
19110 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19111 Otherwise we have no need to default values of symbols. */
19114 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
19117 if (name
[0] == '_' && name
[1] == 'G'
19118 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
19122 if (symbol_find (name
))
19123 as_bad (_("GOT already in the symbol table"));
19125 GOT_symbol
= symbol_new (name
, undefined_section
,
19126 (valueT
) 0, & zero_address_frag
);
19136 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19137 computed as two separate immediate values, added together. We
19138 already know that this value cannot be computed by just one ARM
19141 static unsigned int
19142 validate_immediate_twopart (unsigned int val
,
19143 unsigned int * highpart
)
19148 for (i
= 0; i
< 32; i
+= 2)
19149 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
19155 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
19157 else if (a
& 0xff0000)
19159 if (a
& 0xff000000)
19161 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
19165 gas_assert (a
& 0xff000000);
19166 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
19169 return (a
& 0xff) | (i
<< 7);
19176 validate_offset_imm (unsigned int val
, int hwse
)
19178 if ((hwse
&& val
> 255) || val
> 4095)
19183 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19184 negative immediate constant by altering the instruction. A bit of
19189 by inverting the second operand, and
19192 by negating the second operand. */
19195 negate_data_op (unsigned long * instruction
,
19196 unsigned long value
)
19199 unsigned long negated
, inverted
;
19201 negated
= encode_arm_immediate (-value
);
19202 inverted
= encode_arm_immediate (~value
);
19204 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
19207 /* First negates. */
19208 case OPCODE_SUB
: /* ADD <-> SUB */
19209 new_inst
= OPCODE_ADD
;
19214 new_inst
= OPCODE_SUB
;
19218 case OPCODE_CMP
: /* CMP <-> CMN */
19219 new_inst
= OPCODE_CMN
;
19224 new_inst
= OPCODE_CMP
;
19228 /* Now Inverted ops. */
19229 case OPCODE_MOV
: /* MOV <-> MVN */
19230 new_inst
= OPCODE_MVN
;
19235 new_inst
= OPCODE_MOV
;
19239 case OPCODE_AND
: /* AND <-> BIC */
19240 new_inst
= OPCODE_BIC
;
19245 new_inst
= OPCODE_AND
;
19249 case OPCODE_ADC
: /* ADC <-> SBC */
19250 new_inst
= OPCODE_SBC
;
19255 new_inst
= OPCODE_ADC
;
19259 /* We cannot do anything. */
19264 if (value
== (unsigned) FAIL
)
19267 *instruction
&= OPCODE_MASK
;
19268 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
19272 /* Like negate_data_op, but for Thumb-2. */
19274 static unsigned int
19275 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
19279 unsigned int negated
, inverted
;
19281 negated
= encode_thumb32_immediate (-value
);
19282 inverted
= encode_thumb32_immediate (~value
);
19284 rd
= (*instruction
>> 8) & 0xf;
19285 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
19288 /* ADD <-> SUB. Includes CMP <-> CMN. */
19289 case T2_OPCODE_SUB
:
19290 new_inst
= T2_OPCODE_ADD
;
19294 case T2_OPCODE_ADD
:
19295 new_inst
= T2_OPCODE_SUB
;
19299 /* ORR <-> ORN. Includes MOV <-> MVN. */
19300 case T2_OPCODE_ORR
:
19301 new_inst
= T2_OPCODE_ORN
;
19305 case T2_OPCODE_ORN
:
19306 new_inst
= T2_OPCODE_ORR
;
19310 /* AND <-> BIC. TST has no inverted equivalent. */
19311 case T2_OPCODE_AND
:
19312 new_inst
= T2_OPCODE_BIC
;
19319 case T2_OPCODE_BIC
:
19320 new_inst
= T2_OPCODE_AND
;
19325 case T2_OPCODE_ADC
:
19326 new_inst
= T2_OPCODE_SBC
;
19330 case T2_OPCODE_SBC
:
19331 new_inst
= T2_OPCODE_ADC
;
19335 /* We cannot do anything. */
19340 if (value
== (unsigned int)FAIL
)
19343 *instruction
&= T2_OPCODE_MASK
;
19344 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
19348 /* Read a 32-bit thumb instruction from buf. */
19349 static unsigned long
19350 get_thumb32_insn (char * buf
)
19352 unsigned long insn
;
19353 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
19354 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19360 /* We usually want to set the low bit on the address of thumb function
19361 symbols. In particular .word foo - . should have the low bit set.
19362 Generic code tries to fold the difference of two symbols to
19363 a constant. Prevent this and force a relocation when the first symbols
19364 is a thumb function. */
19367 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
19369 if (op
== O_subtract
19370 && l
->X_op
== O_symbol
19371 && r
->X_op
== O_symbol
19372 && THUMB_IS_FUNC (l
->X_add_symbol
))
19374 l
->X_op
= O_subtract
;
19375 l
->X_op_symbol
= r
->X_add_symbol
;
19376 l
->X_add_number
-= r
->X_add_number
;
19380 /* Process as normal. */
19385 md_apply_fix (fixS
* fixP
,
19389 offsetT value
= * valP
;
19391 unsigned int newimm
;
19392 unsigned long temp
;
19394 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
19396 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
19398 /* Note whether this will delete the relocation. */
19400 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
19403 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19404 consistency with the behaviour on 32-bit hosts. Remember value
19406 value
&= 0xffffffff;
19407 value
^= 0x80000000;
19408 value
-= 0x80000000;
19411 fixP
->fx_addnumber
= value
;
19413 /* Same treatment for fixP->fx_offset. */
19414 fixP
->fx_offset
&= 0xffffffff;
19415 fixP
->fx_offset
^= 0x80000000;
19416 fixP
->fx_offset
-= 0x80000000;
19418 switch (fixP
->fx_r_type
)
19420 case BFD_RELOC_NONE
:
19421 /* This will need to go in the object file. */
19425 case BFD_RELOC_ARM_IMMEDIATE
:
19426 /* We claim that this fixup has been processed here,
19427 even if in fact we generate an error because we do
19428 not have a reloc for it, so tc_gen_reloc will reject it. */
19432 && ! S_IS_DEFINED (fixP
->fx_addsy
))
19434 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19435 _("undefined symbol %s used as an immediate value"),
19436 S_GET_NAME (fixP
->fx_addsy
));
19441 && S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19443 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19444 _("symbol %s is in a different section"),
19445 S_GET_NAME (fixP
->fx_addsy
));
19449 newimm
= encode_arm_immediate (value
);
19450 temp
= md_chars_to_number (buf
, INSN_SIZE
);
19452 /* If the instruction will fail, see if we can fix things up by
19453 changing the opcode. */
19454 if (newimm
== (unsigned int) FAIL
19455 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
19457 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19458 _("invalid constant (%lx) after fixup"),
19459 (unsigned long) value
);
19463 newimm
|= (temp
& 0xfffff000);
19464 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
19467 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
19469 unsigned int highpart
= 0;
19470 unsigned int newinsn
= 0xe1a00000; /* nop. */
19473 && ! S_IS_DEFINED (fixP
->fx_addsy
))
19475 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19476 _("undefined symbol %s used as an immediate value"),
19477 S_GET_NAME (fixP
->fx_addsy
));
19482 && S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19484 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19485 _("symbol %s is in a different section"),
19486 S_GET_NAME (fixP
->fx_addsy
));
19490 newimm
= encode_arm_immediate (value
);
19491 temp
= md_chars_to_number (buf
, INSN_SIZE
);
19493 /* If the instruction will fail, see if we can fix things up by
19494 changing the opcode. */
19495 if (newimm
== (unsigned int) FAIL
19496 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
19498 /* No ? OK - try using two ADD instructions to generate
19500 newimm
= validate_immediate_twopart (value
, & highpart
);
19502 /* Yes - then make sure that the second instruction is
19504 if (newimm
!= (unsigned int) FAIL
)
19506 /* Still No ? Try using a negated value. */
19507 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
19508 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
19509 /* Otherwise - give up. */
19512 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19513 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
19518 /* Replace the first operand in the 2nd instruction (which
19519 is the PC) with the destination register. We have
19520 already added in the PC in the first instruction and we
19521 do not want to do it again. */
19522 newinsn
&= ~ 0xf0000;
19523 newinsn
|= ((newinsn
& 0x0f000) << 4);
19526 newimm
|= (temp
& 0xfffff000);
19527 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
19529 highpart
|= (newinsn
& 0xfffff000);
19530 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
19534 case BFD_RELOC_ARM_OFFSET_IMM
:
19535 if (!fixP
->fx_done
&& seg
->use_rela_p
)
19538 case BFD_RELOC_ARM_LITERAL
:
19544 if (validate_offset_imm (value
, 0) == FAIL
)
19546 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
19547 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19548 _("invalid literal constant: pool needs to be closer"));
19550 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19551 _("bad immediate value for offset (%ld)"),
19556 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19557 newval
&= 0xff7ff000;
19558 newval
|= value
| (sign
? INDEX_UP
: 0);
19559 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19562 case BFD_RELOC_ARM_OFFSET_IMM8
:
19563 case BFD_RELOC_ARM_HWLITERAL
:
19569 if (validate_offset_imm (value
, 1) == FAIL
)
19571 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
19572 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19573 _("invalid literal constant: pool needs to be closer"));
19575 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
19580 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19581 newval
&= 0xff7ff0f0;
19582 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
19583 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19586 case BFD_RELOC_ARM_T32_OFFSET_U8
:
19587 if (value
< 0 || value
> 1020 || value
% 4 != 0)
19588 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19589 _("bad immediate value for offset (%ld)"), (long) value
);
19592 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
19594 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
19597 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
19598 /* This is a complicated relocation used for all varieties of Thumb32
19599 load/store instruction with immediate offset:
19601 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
19602 *4, optional writeback(W)
19603 (doubleword load/store)
19605 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
19606 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
19607 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
19608 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
19609 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
19611 Uppercase letters indicate bits that are already encoded at
19612 this point. Lowercase letters are our problem. For the
19613 second block of instructions, the secondary opcode nybble
19614 (bits 8..11) is present, and bit 23 is zero, even if this is
19615 a PC-relative operation. */
19616 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19618 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
19620 if ((newval
& 0xf0000000) == 0xe0000000)
19622 /* Doubleword load/store: 8-bit offset, scaled by 4. */
19624 newval
|= (1 << 23);
19627 if (value
% 4 != 0)
19629 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19630 _("offset not a multiple of 4"));
19636 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19637 _("offset out of range"));
19642 else if ((newval
& 0x000f0000) == 0x000f0000)
19644 /* PC-relative, 12-bit offset. */
19646 newval
|= (1 << 23);
19651 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19652 _("offset out of range"));
19657 else if ((newval
& 0x00000100) == 0x00000100)
19659 /* Writeback: 8-bit, +/- offset. */
19661 newval
|= (1 << 9);
19666 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19667 _("offset out of range"));
19672 else if ((newval
& 0x00000f00) == 0x00000e00)
19674 /* T-instruction: positive 8-bit offset. */
19675 if (value
< 0 || value
> 0xff)
19677 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19678 _("offset out of range"));
19686 /* Positive 12-bit or negative 8-bit offset. */
19690 newval
|= (1 << 23);
19700 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19701 _("offset out of range"));
19708 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
19709 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
19712 case BFD_RELOC_ARM_SHIFT_IMM
:
19713 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19714 if (((unsigned long) value
) > 32
19716 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
19718 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19719 _("shift expression is too large"));
19724 /* Shifts of zero must be done as lsl. */
19726 else if (value
== 32)
19728 newval
&= 0xfffff07f;
19729 newval
|= (value
& 0x1f) << 7;
19730 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19733 case BFD_RELOC_ARM_T32_IMMEDIATE
:
19734 case BFD_RELOC_ARM_T32_ADD_IMM
:
19735 case BFD_RELOC_ARM_T32_IMM12
:
19736 case BFD_RELOC_ARM_T32_ADD_PC12
:
19737 /* We claim that this fixup has been processed here,
19738 even if in fact we generate an error because we do
19739 not have a reloc for it, so tc_gen_reloc will reject it. */
19743 && ! S_IS_DEFINED (fixP
->fx_addsy
))
19745 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19746 _("undefined symbol %s used as an immediate value"),
19747 S_GET_NAME (fixP
->fx_addsy
));
19751 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19753 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
19756 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
19757 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
19759 newimm
= encode_thumb32_immediate (value
);
19760 if (newimm
== (unsigned int) FAIL
)
19761 newimm
= thumb32_negate_data_op (&newval
, value
);
19763 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
19764 && newimm
== (unsigned int) FAIL
)
19766 /* Turn add/sum into addw/subw. */
19767 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
19768 newval
= (newval
& 0xfeffffff) | 0x02000000;
19770 /* 12 bit immediate for addw/subw. */
19774 newval
^= 0x00a00000;
19777 newimm
= (unsigned int) FAIL
;
19782 if (newimm
== (unsigned int)FAIL
)
19784 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19785 _("invalid constant (%lx) after fixup"),
19786 (unsigned long) value
);
19790 newval
|= (newimm
& 0x800) << 15;
19791 newval
|= (newimm
& 0x700) << 4;
19792 newval
|= (newimm
& 0x0ff);
19794 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
19795 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
19798 case BFD_RELOC_ARM_SMC
:
19799 if (((unsigned long) value
) > 0xffff)
19800 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19801 _("invalid smc expression"));
19802 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19803 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
19804 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19807 case BFD_RELOC_ARM_SWI
:
19808 if (fixP
->tc_fix_data
!= 0)
19810 if (((unsigned long) value
) > 0xff)
19811 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19812 _("invalid swi expression"));
19813 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19815 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19819 if (((unsigned long) value
) > 0x00ffffff)
19820 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19821 _("invalid swi expression"));
19822 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19824 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19828 case BFD_RELOC_ARM_MULTI
:
19829 if (((unsigned long) value
) > 0xffff)
19830 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19831 _("invalid expression in load/store multiple"));
19832 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
19833 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19837 case BFD_RELOC_ARM_PCREL_CALL
:
19839 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
19841 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19842 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19843 && THUMB_IS_FUNC (fixP
->fx_addsy
))
19844 /* Flip the bl to blx. This is a simple flip
19845 bit here because we generate PCREL_CALL for
19846 unconditional bls. */
19848 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19849 newval
= newval
| 0x10000000;
19850 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19856 goto arm_branch_common
;
19858 case BFD_RELOC_ARM_PCREL_JUMP
:
19859 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
19861 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19862 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19863 && THUMB_IS_FUNC (fixP
->fx_addsy
))
19865 /* This would map to a bl<cond>, b<cond>,
19866 b<always> to a Thumb function. We
19867 need to force a relocation for this particular
19869 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19873 case BFD_RELOC_ARM_PLT32
:
19875 case BFD_RELOC_ARM_PCREL_BRANCH
:
19877 goto arm_branch_common
;
19879 case BFD_RELOC_ARM_PCREL_BLX
:
19882 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
19884 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19885 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19886 && ARM_IS_FUNC (fixP
->fx_addsy
))
19888 /* Flip the blx to a bl and warn. */
19889 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
19890 newval
= 0xeb000000;
19891 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
19892 _("blx to '%s' an ARM ISA state function changed to bl"),
19894 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19900 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
19901 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
19905 /* We are going to store value (shifted right by two) in the
19906 instruction, in a 24 bit, signed field. Bits 26 through 32 either
19907 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
19908 also be be clear. */
19910 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19911 _("misaligned branch destination"));
19912 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
19913 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
19914 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19915 _("branch out of range"));
19917 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19919 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19920 newval
|= (value
>> 2) & 0x00ffffff;
19921 /* Set the H bit on BLX instructions. */
19925 newval
|= 0x01000000;
19927 newval
&= ~0x01000000;
19929 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19933 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
19934 /* CBZ can only branch forward. */
19936 /* Attempts to use CBZ to branch to the next instruction
19937 (which, strictly speaking, are prohibited) will be turned into
19940 FIXME: It may be better to remove the instruction completely and
19941 perform relaxation. */
19944 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19945 newval
= 0xbf00; /* NOP encoding T1 */
19946 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19951 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19952 _("branch out of range"));
19954 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19956 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19957 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
19958 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19963 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
19964 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
19965 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19966 _("branch out of range"));
19968 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19970 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19971 newval
|= (value
& 0x1ff) >> 1;
19972 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19976 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
19977 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
19978 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19979 _("branch out of range"));
19981 if (fixP
->fx_done
|| !seg
->use_rela_p
)
19983 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19984 newval
|= (value
& 0xfff) >> 1;
19985 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19989 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19991 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19992 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19993 && S_IS_DEFINED (fixP
->fx_addsy
)
19994 && ARM_IS_FUNC (fixP
->fx_addsy
)
19995 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19997 /* Force a relocation for a branch 20 bits wide. */
20000 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
20001 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20002 _("conditional branch out of range"));
20004 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20007 addressT S
, J1
, J2
, lo
, hi
;
20009 S
= (value
& 0x00100000) >> 20;
20010 J2
= (value
& 0x00080000) >> 19;
20011 J1
= (value
& 0x00040000) >> 18;
20012 hi
= (value
& 0x0003f000) >> 12;
20013 lo
= (value
& 0x00000ffe) >> 1;
20015 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20016 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20017 newval
|= (S
<< 10) | hi
;
20018 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
20019 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20020 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20024 case BFD_RELOC_THUMB_PCREL_BLX
:
20026 /* If there is a blx from a thumb state function to
20027 another thumb function flip this to a bl and warn
20031 && S_IS_DEFINED (fixP
->fx_addsy
)
20032 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20033 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20034 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20036 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
20037 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
20038 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20040 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20041 newval
= newval
| 0x1000;
20042 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20043 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20048 goto thumb_bl_common
;
20050 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20052 /* A bl from Thumb state ISA to an internal ARM state function
20053 is converted to a blx. */
20055 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20056 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20057 && S_IS_DEFINED (fixP
->fx_addsy
)
20058 && ARM_IS_FUNC (fixP
->fx_addsy
)
20059 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
20061 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20062 newval
= newval
& ~0x1000;
20063 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20064 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
20071 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
&&
20072 fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20073 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20076 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
20077 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20078 _("branch out of range"));
20080 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20081 /* For a BLX instruction, make sure that the relocation is rounded up
20082 to a word boundary. This follows the semantics of the instruction
20083 which specifies that bit 1 of the target address will come from bit
20084 1 of the base address. */
20085 value
= (value
+ 1) & ~ 1;
20087 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20091 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20092 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20093 newval
|= (value
& 0x7fffff) >> 12;
20094 newval2
|= (value
& 0xfff) >> 1;
20095 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20096 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20100 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20101 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
20102 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20103 _("branch out of range"));
20105 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20108 addressT S
, I1
, I2
, lo
, hi
;
20110 S
= (value
& 0x01000000) >> 24;
20111 I1
= (value
& 0x00800000) >> 23;
20112 I2
= (value
& 0x00400000) >> 22;
20113 hi
= (value
& 0x003ff000) >> 12;
20114 lo
= (value
& 0x00000ffe) >> 1;
20119 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20120 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20121 newval
|= (S
<< 10) | hi
;
20122 newval2
|= (I1
<< 13) | (I2
<< 11) | lo
;
20123 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20124 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20129 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20130 md_number_to_chars (buf
, value
, 1);
20134 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20135 md_number_to_chars (buf
, value
, 2);
20139 case BFD_RELOC_ARM_TLS_GD32
:
20140 case BFD_RELOC_ARM_TLS_LE32
:
20141 case BFD_RELOC_ARM_TLS_IE32
:
20142 case BFD_RELOC_ARM_TLS_LDM32
:
20143 case BFD_RELOC_ARM_TLS_LDO32
:
20144 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
20147 case BFD_RELOC_ARM_GOT32
:
20148 case BFD_RELOC_ARM_GOTOFF
:
20149 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20150 md_number_to_chars (buf
, 0, 4);
20153 case BFD_RELOC_ARM_TARGET2
:
20154 /* TARGET2 is not partial-inplace, so we need to write the
20155 addend here for REL targets, because it won't be written out
20156 during reloc processing later. */
20157 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20158 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
20162 case BFD_RELOC_RVA
:
20164 case BFD_RELOC_ARM_TARGET1
:
20165 case BFD_RELOC_ARM_ROSEGREL32
:
20166 case BFD_RELOC_ARM_SBREL32
:
20167 case BFD_RELOC_32_PCREL
:
20169 case BFD_RELOC_32_SECREL
:
20171 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20173 /* For WinCE we only do this for pcrel fixups. */
20174 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
20176 md_number_to_chars (buf
, value
, 4);
20180 case BFD_RELOC_ARM_PREL31
:
20181 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20183 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
20184 if ((value
^ (value
>> 1)) & 0x40000000)
20186 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20187 _("rel31 relocation overflow"));
20189 newval
|= value
& 0x7fffffff;
20190 md_number_to_chars (buf
, newval
, 4);
20195 case BFD_RELOC_ARM_CP_OFF_IMM
:
20196 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
20197 if (value
< -1023 || value
> 1023 || (value
& 3))
20198 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20199 _("co-processor offset out of range"));
20204 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20205 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20206 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20208 newval
= get_thumb32_insn (buf
);
20209 newval
&= 0xff7fff00;
20210 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
20211 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20212 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20213 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20215 put_thumb32_insn (buf
, newval
);
20218 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
20219 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
20220 if (value
< -255 || value
> 255)
20221 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20222 _("co-processor offset out of range"));
20224 goto cp_off_common
;
20226 case BFD_RELOC_ARM_THUMB_OFFSET
:
20227 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20228 /* Exactly what ranges, and where the offset is inserted depends
20229 on the type of instruction, we can establish this from the
20231 switch (newval
>> 12)
20233 case 4: /* PC load. */
20234 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20235 forced to zero for these loads; md_pcrel_from has already
20236 compensated for this. */
20238 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20239 _("invalid offset, target not word aligned (0x%08lX)"),
20240 (((unsigned long) fixP
->fx_frag
->fr_address
20241 + (unsigned long) fixP
->fx_where
) & ~3)
20242 + (unsigned long) value
);
20244 if (value
& ~0x3fc)
20245 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20246 _("invalid offset, value too big (0x%08lX)"),
20249 newval
|= value
>> 2;
20252 case 9: /* SP load/store. */
20253 if (value
& ~0x3fc)
20254 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20255 _("invalid offset, value too big (0x%08lX)"),
20257 newval
|= value
>> 2;
20260 case 6: /* Word load/store. */
20262 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20263 _("invalid offset, value too big (0x%08lX)"),
20265 newval
|= value
<< 4; /* 6 - 2. */
20268 case 7: /* Byte load/store. */
20270 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20271 _("invalid offset, value too big (0x%08lX)"),
20273 newval
|= value
<< 6;
20276 case 8: /* Halfword load/store. */
20278 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20279 _("invalid offset, value too big (0x%08lX)"),
20281 newval
|= value
<< 5; /* 6 - 1. */
20285 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20286 "Unable to process relocation for thumb opcode: %lx",
20287 (unsigned long) newval
);
20290 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20293 case BFD_RELOC_ARM_THUMB_ADD
:
20294 /* This is a complicated relocation, since we use it for all of
20295 the following immediate relocations:
20299 9bit ADD/SUB SP word-aligned
20300 10bit ADD PC/SP word-aligned
20302 The type of instruction being processed is encoded in the
20309 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20311 int rd
= (newval
>> 4) & 0xf;
20312 int rs
= newval
& 0xf;
20313 int subtract
= !!(newval
& 0x8000);
20315 /* Check for HI regs, only very restricted cases allowed:
20316 Adjusting SP, and using PC or SP to get an address. */
20317 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
20318 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
20319 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20320 _("invalid Hi register with immediate"));
20322 /* If value is negative, choose the opposite instruction. */
20326 subtract
= !subtract
;
20328 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20329 _("immediate value out of range"));
20334 if (value
& ~0x1fc)
20335 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20336 _("invalid immediate for stack address calculation"));
20337 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
20338 newval
|= value
>> 2;
20340 else if (rs
== REG_PC
|| rs
== REG_SP
)
20342 if (subtract
|| value
& ~0x3fc)
20343 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20344 _("invalid immediate for address calculation (value = 0x%08lX)"),
20345 (unsigned long) value
);
20346 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
20348 newval
|= value
>> 2;
20353 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20354 _("immediate value out of range"));
20355 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
20356 newval
|= (rd
<< 8) | value
;
20361 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20362 _("immediate value out of range"));
20363 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
20364 newval
|= rd
| (rs
<< 3) | (value
<< 6);
20367 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20370 case BFD_RELOC_ARM_THUMB_IMM
:
20371 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20372 if (value
< 0 || value
> 255)
20373 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20374 _("invalid immediate: %ld is out of range"),
20377 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20380 case BFD_RELOC_ARM_THUMB_SHIFT
:
20381 /* 5bit shift value (0..32). LSL cannot take 32. */
20382 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
20383 temp
= newval
& 0xf800;
20384 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
20385 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20386 _("invalid shift value: %ld"), (long) value
);
20387 /* Shifts of zero must be encoded as LSL. */
20389 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
20390 /* Shifts of 32 are encoded as zero. */
20391 else if (value
== 32)
20393 newval
|= value
<< 6;
20394 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20397 case BFD_RELOC_VTABLE_INHERIT
:
20398 case BFD_RELOC_VTABLE_ENTRY
:
20402 case BFD_RELOC_ARM_MOVW
:
20403 case BFD_RELOC_ARM_MOVT
:
20404 case BFD_RELOC_ARM_THUMB_MOVW
:
20405 case BFD_RELOC_ARM_THUMB_MOVT
:
20406 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20408 /* REL format relocations are limited to a 16-bit addend. */
20409 if (!fixP
->fx_done
)
20411 if (value
< -0x8000 || value
> 0x7fff)
20412 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20413 _("offset out of range"));
20415 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
20416 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20421 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
20422 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20424 newval
= get_thumb32_insn (buf
);
20425 newval
&= 0xfbf08f00;
20426 newval
|= (value
& 0xf000) << 4;
20427 newval
|= (value
& 0x0800) << 15;
20428 newval
|= (value
& 0x0700) << 4;
20429 newval
|= (value
& 0x00ff);
20430 put_thumb32_insn (buf
, newval
);
20434 newval
= md_chars_to_number (buf
, 4);
20435 newval
&= 0xfff0f000;
20436 newval
|= value
& 0x0fff;
20437 newval
|= (value
& 0xf000) << 4;
20438 md_number_to_chars (buf
, newval
, 4);
20443 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
20444 case BFD_RELOC_ARM_ALU_PC_G0
:
20445 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
20446 case BFD_RELOC_ARM_ALU_PC_G1
:
20447 case BFD_RELOC_ARM_ALU_PC_G2
:
20448 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
20449 case BFD_RELOC_ARM_ALU_SB_G0
:
20450 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
20451 case BFD_RELOC_ARM_ALU_SB_G1
:
20452 case BFD_RELOC_ARM_ALU_SB_G2
:
20453 gas_assert (!fixP
->fx_done
);
20454 if (!seg
->use_rela_p
)
20457 bfd_vma encoded_addend
;
20458 bfd_vma addend_abs
= abs (value
);
20460 /* Check that the absolute value of the addend can be
20461 expressed as an 8-bit constant plus a rotation. */
20462 encoded_addend
= encode_arm_immediate (addend_abs
);
20463 if (encoded_addend
== (unsigned int) FAIL
)
20464 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20465 _("the offset 0x%08lX is not representable"),
20466 (unsigned long) addend_abs
);
20468 /* Extract the instruction. */
20469 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20471 /* If the addend is positive, use an ADD instruction.
20472 Otherwise use a SUB. Take care not to destroy the S bit. */
20473 insn
&= 0xff1fffff;
20479 /* Place the encoded addend into the first 12 bits of the
20481 insn
&= 0xfffff000;
20482 insn
|= encoded_addend
;
20484 /* Update the instruction. */
20485 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20489 case BFD_RELOC_ARM_LDR_PC_G0
:
20490 case BFD_RELOC_ARM_LDR_PC_G1
:
20491 case BFD_RELOC_ARM_LDR_PC_G2
:
20492 case BFD_RELOC_ARM_LDR_SB_G0
:
20493 case BFD_RELOC_ARM_LDR_SB_G1
:
20494 case BFD_RELOC_ARM_LDR_SB_G2
:
20495 gas_assert (!fixP
->fx_done
);
20496 if (!seg
->use_rela_p
)
20499 bfd_vma addend_abs
= abs (value
);
20501 /* Check that the absolute value of the addend can be
20502 encoded in 12 bits. */
20503 if (addend_abs
>= 0x1000)
20504 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20505 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
20506 (unsigned long) addend_abs
);
20508 /* Extract the instruction. */
20509 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20511 /* If the addend is negative, clear bit 23 of the instruction.
20512 Otherwise set it. */
20514 insn
&= ~(1 << 23);
20518 /* Place the absolute value of the addend into the first 12 bits
20519 of the instruction. */
20520 insn
&= 0xfffff000;
20521 insn
|= addend_abs
;
20523 /* Update the instruction. */
20524 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20528 case BFD_RELOC_ARM_LDRS_PC_G0
:
20529 case BFD_RELOC_ARM_LDRS_PC_G1
:
20530 case BFD_RELOC_ARM_LDRS_PC_G2
:
20531 case BFD_RELOC_ARM_LDRS_SB_G0
:
20532 case BFD_RELOC_ARM_LDRS_SB_G1
:
20533 case BFD_RELOC_ARM_LDRS_SB_G2
:
20534 gas_assert (!fixP
->fx_done
);
20535 if (!seg
->use_rela_p
)
20538 bfd_vma addend_abs
= abs (value
);
20540 /* Check that the absolute value of the addend can be
20541 encoded in 8 bits. */
20542 if (addend_abs
>= 0x100)
20543 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20544 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
20545 (unsigned long) addend_abs
);
20547 /* Extract the instruction. */
20548 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20550 /* If the addend is negative, clear bit 23 of the instruction.
20551 Otherwise set it. */
20553 insn
&= ~(1 << 23);
20557 /* Place the first four bits of the absolute value of the addend
20558 into the first 4 bits of the instruction, and the remaining
20559 four into bits 8 .. 11. */
20560 insn
&= 0xfffff0f0;
20561 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
20563 /* Update the instruction. */
20564 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20568 case BFD_RELOC_ARM_LDC_PC_G0
:
20569 case BFD_RELOC_ARM_LDC_PC_G1
:
20570 case BFD_RELOC_ARM_LDC_PC_G2
:
20571 case BFD_RELOC_ARM_LDC_SB_G0
:
20572 case BFD_RELOC_ARM_LDC_SB_G1
:
20573 case BFD_RELOC_ARM_LDC_SB_G2
:
20574 gas_assert (!fixP
->fx_done
);
20575 if (!seg
->use_rela_p
)
20578 bfd_vma addend_abs
= abs (value
);
20580 /* Check that the absolute value of the addend is a multiple of
20581 four and, when divided by four, fits in 8 bits. */
20582 if (addend_abs
& 0x3)
20583 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20584 _("bad offset 0x%08lX (must be word-aligned)"),
20585 (unsigned long) addend_abs
);
20587 if ((addend_abs
>> 2) > 0xff)
20588 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20589 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
20590 (unsigned long) addend_abs
);
20592 /* Extract the instruction. */
20593 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20595 /* If the addend is negative, clear bit 23 of the instruction.
20596 Otherwise set it. */
20598 insn
&= ~(1 << 23);
20602 /* Place the addend (divided by four) into the first eight
20603 bits of the instruction. */
20604 insn
&= 0xfffffff0;
20605 insn
|= addend_abs
>> 2;
20607 /* Update the instruction. */
20608 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20612 case BFD_RELOC_ARM_V4BX
:
20613 /* This will need to go in the object file. */
20617 case BFD_RELOC_UNUSED
:
20619 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20620 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
20624 /* Translate internal representation of relocation info to BFD target
20628 tc_gen_reloc (asection
*section
, fixS
*fixp
)
20631 bfd_reloc_code_real_type code
;
20633 reloc
= xmalloc (sizeof (arelent
));
20635 reloc
->sym_ptr_ptr
= xmalloc (sizeof (asymbol
*));
20636 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
20637 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
20639 if (fixp
->fx_pcrel
)
20641 if (section
->use_rela_p
)
20642 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
20644 fixp
->fx_offset
= reloc
->address
;
20646 reloc
->addend
= fixp
->fx_offset
;
20648 switch (fixp
->fx_r_type
)
20651 if (fixp
->fx_pcrel
)
20653 code
= BFD_RELOC_8_PCREL
;
20658 if (fixp
->fx_pcrel
)
20660 code
= BFD_RELOC_16_PCREL
;
20665 if (fixp
->fx_pcrel
)
20667 code
= BFD_RELOC_32_PCREL
;
20671 case BFD_RELOC_ARM_MOVW
:
20672 if (fixp
->fx_pcrel
)
20674 code
= BFD_RELOC_ARM_MOVW_PCREL
;
20678 case BFD_RELOC_ARM_MOVT
:
20679 if (fixp
->fx_pcrel
)
20681 code
= BFD_RELOC_ARM_MOVT_PCREL
;
20685 case BFD_RELOC_ARM_THUMB_MOVW
:
20686 if (fixp
->fx_pcrel
)
20688 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
20692 case BFD_RELOC_ARM_THUMB_MOVT
:
20693 if (fixp
->fx_pcrel
)
20695 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
20699 case BFD_RELOC_NONE
:
20700 case BFD_RELOC_ARM_PCREL_BRANCH
:
20701 case BFD_RELOC_ARM_PCREL_BLX
:
20702 case BFD_RELOC_RVA
:
20703 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
20704 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
20705 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
20706 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
20707 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20708 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20709 case BFD_RELOC_VTABLE_ENTRY
:
20710 case BFD_RELOC_VTABLE_INHERIT
:
20712 case BFD_RELOC_32_SECREL
:
20714 code
= fixp
->fx_r_type
;
20717 case BFD_RELOC_THUMB_PCREL_BLX
:
20719 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
20720 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20723 code
= BFD_RELOC_THUMB_PCREL_BLX
;
20726 case BFD_RELOC_ARM_LITERAL
:
20727 case BFD_RELOC_ARM_HWLITERAL
:
20728 /* If this is called then the a literal has
20729 been referenced across a section boundary. */
20730 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20731 _("literal referenced across section boundary"));
20735 case BFD_RELOC_ARM_GOT32
:
20736 case BFD_RELOC_ARM_GOTOFF
:
20737 case BFD_RELOC_ARM_PLT32
:
20738 case BFD_RELOC_ARM_TARGET1
:
20739 case BFD_RELOC_ARM_ROSEGREL32
:
20740 case BFD_RELOC_ARM_SBREL32
:
20741 case BFD_RELOC_ARM_PREL31
:
20742 case BFD_RELOC_ARM_TARGET2
:
20743 case BFD_RELOC_ARM_TLS_LE32
:
20744 case BFD_RELOC_ARM_TLS_LDO32
:
20745 case BFD_RELOC_ARM_PCREL_CALL
:
20746 case BFD_RELOC_ARM_PCREL_JUMP
:
20747 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
20748 case BFD_RELOC_ARM_ALU_PC_G0
:
20749 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
20750 case BFD_RELOC_ARM_ALU_PC_G1
:
20751 case BFD_RELOC_ARM_ALU_PC_G2
:
20752 case BFD_RELOC_ARM_LDR_PC_G0
:
20753 case BFD_RELOC_ARM_LDR_PC_G1
:
20754 case BFD_RELOC_ARM_LDR_PC_G2
:
20755 case BFD_RELOC_ARM_LDRS_PC_G0
:
20756 case BFD_RELOC_ARM_LDRS_PC_G1
:
20757 case BFD_RELOC_ARM_LDRS_PC_G2
:
20758 case BFD_RELOC_ARM_LDC_PC_G0
:
20759 case BFD_RELOC_ARM_LDC_PC_G1
:
20760 case BFD_RELOC_ARM_LDC_PC_G2
:
20761 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
20762 case BFD_RELOC_ARM_ALU_SB_G0
:
20763 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
20764 case BFD_RELOC_ARM_ALU_SB_G1
:
20765 case BFD_RELOC_ARM_ALU_SB_G2
:
20766 case BFD_RELOC_ARM_LDR_SB_G0
:
20767 case BFD_RELOC_ARM_LDR_SB_G1
:
20768 case BFD_RELOC_ARM_LDR_SB_G2
:
20769 case BFD_RELOC_ARM_LDRS_SB_G0
:
20770 case BFD_RELOC_ARM_LDRS_SB_G1
:
20771 case BFD_RELOC_ARM_LDRS_SB_G2
:
20772 case BFD_RELOC_ARM_LDC_SB_G0
:
20773 case BFD_RELOC_ARM_LDC_SB_G1
:
20774 case BFD_RELOC_ARM_LDC_SB_G2
:
20775 case BFD_RELOC_ARM_V4BX
:
20776 code
= fixp
->fx_r_type
;
20779 case BFD_RELOC_ARM_TLS_GD32
:
20780 case BFD_RELOC_ARM_TLS_IE32
:
20781 case BFD_RELOC_ARM_TLS_LDM32
:
20782 /* BFD will include the symbol's address in the addend.
20783 But we don't want that, so subtract it out again here. */
20784 if (!S_IS_COMMON (fixp
->fx_addsy
))
20785 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
20786 code
= fixp
->fx_r_type
;
20790 case BFD_RELOC_ARM_IMMEDIATE
:
20791 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20792 _("internal relocation (type: IMMEDIATE) not fixed up"));
20795 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
20796 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20797 _("ADRL used for a symbol not defined in the same file"));
20800 case BFD_RELOC_ARM_OFFSET_IMM
:
20801 if (section
->use_rela_p
)
20803 code
= fixp
->fx_r_type
;
20807 if (fixp
->fx_addsy
!= NULL
20808 && !S_IS_DEFINED (fixp
->fx_addsy
)
20809 && S_IS_LOCAL (fixp
->fx_addsy
))
20811 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20812 _("undefined local label `%s'"),
20813 S_GET_NAME (fixp
->fx_addsy
));
20817 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20818 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
20825 switch (fixp
->fx_r_type
)
20827 case BFD_RELOC_NONE
: type
= "NONE"; break;
20828 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
20829 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
20830 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
20831 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
20832 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
20833 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
20834 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
20835 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
20836 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
20837 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
20838 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
20839 default: type
= _("<unknown>"); break;
20841 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20842 _("cannot represent %s relocation in this object file format"),
20849 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
20851 && fixp
->fx_addsy
== GOT_symbol
)
20853 code
= BFD_RELOC_ARM_GOTPC
;
20854 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
20858 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
20860 if (reloc
->howto
== NULL
)
20862 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
20863 _("cannot represent %s relocation in this object file format"),
20864 bfd_get_reloc_code_name (code
));
20868 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
20869 vtable entry to be used in the relocation's section offset. */
20870 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
20871 reloc
->address
= fixp
->fx_offset
;
20876 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
20879 cons_fix_new_arm (fragS
* frag
,
20884 bfd_reloc_code_real_type type
;
20888 FIXME: @@ Should look at CPU word size. */
20892 type
= BFD_RELOC_8
;
20895 type
= BFD_RELOC_16
;
20899 type
= BFD_RELOC_32
;
20902 type
= BFD_RELOC_64
;
20907 if (exp
->X_op
== O_secrel
)
20909 exp
->X_op
= O_symbol
;
20910 type
= BFD_RELOC_32_SECREL
;
20914 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
20917 #if defined (OBJ_COFF)
20919 arm_validate_fix (fixS
* fixP
)
20921 /* If the destination of the branch is a defined symbol which does not have
20922 the THUMB_FUNC attribute, then we must be calling a function which has
20923 the (interfacearm) attribute. We look for the Thumb entry point to that
20924 function and change the branch to refer to that function instead. */
20925 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
20926 && fixP
->fx_addsy
!= NULL
20927 && S_IS_DEFINED (fixP
->fx_addsy
)
20928 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
20930 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
20937 arm_force_relocation (struct fix
* fixp
)
20939 #if defined (OBJ_COFF) && defined (TE_PE)
20940 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
20944 /* In case we have a call or a branch to a function in ARM ISA mode from
20945 a thumb function or vice-versa force the relocation. These relocations
20946 are cleared off for some cores that might have blx and simple transformations
20950 switch (fixp
->fx_r_type
)
20952 case BFD_RELOC_ARM_PCREL_JUMP
:
20953 case BFD_RELOC_ARM_PCREL_CALL
:
20954 case BFD_RELOC_THUMB_PCREL_BLX
:
20955 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
20959 case BFD_RELOC_ARM_PCREL_BLX
:
20960 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20961 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
20962 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20963 if (ARM_IS_FUNC (fixp
->fx_addsy
))
20972 /* Resolve these relocations even if the symbol is extern or weak. */
20973 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
20974 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
20975 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
20976 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
20977 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
20978 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
20979 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
20982 /* Always leave these relocations for the linker. */
20983 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
20984 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
20985 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
20988 /* Always generate relocations against function symbols. */
20989 if (fixp
->fx_r_type
== BFD_RELOC_32
20991 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
20994 return generic_force_reloc (fixp
);
20997 #if defined (OBJ_ELF) || defined (OBJ_COFF)
20998 /* Relocations against function names must be left unadjusted,
20999 so that the linker can use this information to generate interworking
21000 stubs. The MIPS version of this function
21001 also prevents relocations that are mips-16 specific, but I do not
21002 know why it does this.
21005 There is one other problem that ought to be addressed here, but
21006 which currently is not: Taking the address of a label (rather
21007 than a function) and then later jumping to that address. Such
21008 addresses also ought to have their bottom bit set (assuming that
21009 they reside in Thumb code), but at the moment they will not. */
21012 arm_fix_adjustable (fixS
* fixP
)
21014 if (fixP
->fx_addsy
== NULL
)
21017 /* Preserve relocations against symbols with function type. */
21018 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
21021 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
21022 && fixP
->fx_subsy
== NULL
)
21025 /* We need the symbol name for the VTABLE entries. */
21026 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
21027 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
21030 /* Don't allow symbols to be discarded on GOT related relocs. */
21031 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
21032 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
21033 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
21034 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
21035 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
21036 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
21037 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
21038 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
21039 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
21042 /* Similarly for group relocations. */
21043 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
21044 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
21045 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
21048 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21049 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
21050 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
21051 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
21052 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
21053 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
21054 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
21055 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
21056 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
21061 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21066 elf32_arm_target_format (void)
21069 return (target_big_endian
21070 ? "elf32-bigarm-symbian"
21071 : "elf32-littlearm-symbian");
21072 #elif defined (TE_VXWORKS)
21073 return (target_big_endian
21074 ? "elf32-bigarm-vxworks"
21075 : "elf32-littlearm-vxworks");
21077 if (target_big_endian
)
21078 return "elf32-bigarm";
21080 return "elf32-littlearm";
21085 armelf_frob_symbol (symbolS
* symp
,
21088 elf_frob_symbol (symp
, puntp
);
21092 /* MD interface: Finalization. */
21097 literal_pool
* pool
;
21099 /* Ensure that all the IT blocks are properly closed. */
21100 check_it_blocks_finished ();
21102 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
21104 /* Put it at the end of the relevant section. */
21105 subseg_set (pool
->section
, pool
->sub_section
);
21107 arm_elf_change_section ();
21114 /* Remove any excess mapping symbols generated for alignment frags in
21115 SEC. We may have created a mapping symbol before a zero byte
21116 alignment; remove it if there's a mapping symbol after the
21119 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
21120 void *dummy ATTRIBUTE_UNUSED
)
21122 segment_info_type
*seginfo
= seg_info (sec
);
21125 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
21128 for (fragp
= seginfo
->frchainP
->frch_root
;
21130 fragp
= fragp
->fr_next
)
21132 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
21133 fragS
*next
= fragp
->fr_next
;
21135 /* Variable-sized frags have been converted to fixed size by
21136 this point. But if this was variable-sized to start with,
21137 there will be a fixed-size frag after it. So don't handle
21139 if (sym
== NULL
|| next
== NULL
)
21142 if (S_GET_VALUE (sym
) < next
->fr_address
)
21143 /* Not at the end of this frag. */
21145 know (S_GET_VALUE (sym
) == next
->fr_address
);
21149 if (next
->tc_frag_data
.first_map
!= NULL
)
21151 /* Next frag starts with a mapping symbol. Discard this
21153 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21157 if (next
->fr_next
== NULL
)
21159 /* This mapping symbol is at the end of the section. Discard
21161 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
21162 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21166 /* As long as we have empty frags without any mapping symbols,
21168 /* If the next frag is non-empty and does not start with a
21169 mapping symbol, then this mapping symbol is required. */
21170 if (next
->fr_address
!= next
->fr_next
->fr_address
)
21173 next
= next
->fr_next
;
21175 while (next
!= NULL
);
21180 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21184 arm_adjust_symtab (void)
21189 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21191 if (ARM_IS_THUMB (sym
))
21193 if (THUMB_IS_FUNC (sym
))
21195 /* Mark the symbol as a Thumb function. */
21196 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
21197 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
21198 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
21200 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
21201 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
21203 as_bad (_("%s: unexpected function type: %d"),
21204 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
21206 else switch (S_GET_STORAGE_CLASS (sym
))
21209 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
21212 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
21215 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
21223 if (ARM_IS_INTERWORK (sym
))
21224 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
21231 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21233 if (ARM_IS_THUMB (sym
))
21235 elf_symbol_type
* elf_sym
;
21237 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
21238 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
21240 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
21241 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
21243 /* If it's a .thumb_func, declare it as so,
21244 otherwise tag label as .code 16. */
21245 if (THUMB_IS_FUNC (sym
))
21246 elf_sym
->internal_elf_sym
.st_info
=
21247 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
21248 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
21249 elf_sym
->internal_elf_sym
.st_info
=
21250 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
21255 /* Remove any overlapping mapping symbols generated by alignment frags. */
21256 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
21260 /* MD interface: Initialization. */
21263 set_constant_flonums (void)
21267 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
21268 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
21272 /* Auto-select Thumb mode if it's the only available instruction set for the
21273 given architecture. */
21276 autoselect_thumb_from_cpu_variant (void)
21278 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
21279 opcode_select (16);
21288 if ( (arm_ops_hsh
= hash_new ()) == NULL
21289 || (arm_cond_hsh
= hash_new ()) == NULL
21290 || (arm_shift_hsh
= hash_new ()) == NULL
21291 || (arm_psr_hsh
= hash_new ()) == NULL
21292 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
21293 || (arm_reg_hsh
= hash_new ()) == NULL
21294 || (arm_reloc_hsh
= hash_new ()) == NULL
21295 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
21296 as_fatal (_("virtual memory exhausted"));
21298 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
21299 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
21300 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
21301 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
21302 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
21303 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
21304 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
21305 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
21306 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
21307 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
21308 (void *) (v7m_psrs
+ i
));
21309 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
21310 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
21312 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
21314 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
21315 (void *) (barrier_opt_names
+ i
));
21317 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
21318 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (void *) (reloc_names
+ i
));
21321 set_constant_flonums ();
21323 /* Set the cpu variant based on the command-line options. We prefer
21324 -mcpu= over -march= if both are set (as for GCC); and we prefer
21325 -mfpu= over any other way of setting the floating point unit.
21326 Use of legacy options with new options are faulted. */
21329 if (mcpu_cpu_opt
|| march_cpu_opt
)
21330 as_bad (_("use of old and new-style options to set CPU type"));
21332 mcpu_cpu_opt
= legacy_cpu
;
21334 else if (!mcpu_cpu_opt
)
21335 mcpu_cpu_opt
= march_cpu_opt
;
21340 as_bad (_("use of old and new-style options to set FPU type"));
21342 mfpu_opt
= legacy_fpu
;
21344 else if (!mfpu_opt
)
21346 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21347 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21348 /* Some environments specify a default FPU. If they don't, infer it
21349 from the processor. */
21351 mfpu_opt
= mcpu_fpu_opt
;
21353 mfpu_opt
= march_fpu_opt
;
21355 mfpu_opt
= &fpu_default
;
21361 if (mcpu_cpu_opt
!= NULL
)
21362 mfpu_opt
= &fpu_default
;
21363 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
21364 mfpu_opt
= &fpu_arch_vfp_v2
;
21366 mfpu_opt
= &fpu_arch_fpa
;
21372 mcpu_cpu_opt
= &cpu_default
;
21373 selected_cpu
= cpu_default
;
21377 selected_cpu
= *mcpu_cpu_opt
;
21379 mcpu_cpu_opt
= &arm_arch_any
;
21382 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
21384 autoselect_thumb_from_cpu_variant ();
21386 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
21388 #if defined OBJ_COFF || defined OBJ_ELF
21390 unsigned int flags
= 0;
21392 #if defined OBJ_ELF
21393 flags
= meabi_flags
;
21395 switch (meabi_flags
)
21397 case EF_ARM_EABI_UNKNOWN
:
21399 /* Set the flags in the private structure. */
21400 if (uses_apcs_26
) flags
|= F_APCS26
;
21401 if (support_interwork
) flags
|= F_INTERWORK
;
21402 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
21403 if (pic_code
) flags
|= F_PIC
;
21404 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
21405 flags
|= F_SOFT_FLOAT
;
21407 switch (mfloat_abi_opt
)
21409 case ARM_FLOAT_ABI_SOFT
:
21410 case ARM_FLOAT_ABI_SOFTFP
:
21411 flags
|= F_SOFT_FLOAT
;
21414 case ARM_FLOAT_ABI_HARD
:
21415 if (flags
& F_SOFT_FLOAT
)
21416 as_bad (_("hard-float conflicts with specified fpu"));
21420 /* Using pure-endian doubles (even if soft-float). */
21421 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
21422 flags
|= F_VFP_FLOAT
;
21424 #if defined OBJ_ELF
21425 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
21426 flags
|= EF_ARM_MAVERICK_FLOAT
;
21429 case EF_ARM_EABI_VER4
:
21430 case EF_ARM_EABI_VER5
:
21431 /* No additional flags to set. */
21438 bfd_set_private_flags (stdoutput
, flags
);
21440 /* We have run out flags in the COFF header to encode the
21441 status of ATPCS support, so instead we create a dummy,
21442 empty, debug section called .arm.atpcs. */
21447 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
21451 bfd_set_section_flags
21452 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
21453 bfd_set_section_size (stdoutput
, sec
, 0);
21454 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
21460 /* Record the CPU type as well. */
21461 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
21462 mach
= bfd_mach_arm_iWMMXt2
;
21463 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
21464 mach
= bfd_mach_arm_iWMMXt
;
21465 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
21466 mach
= bfd_mach_arm_XScale
;
21467 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
21468 mach
= bfd_mach_arm_ep9312
;
21469 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
21470 mach
= bfd_mach_arm_5TE
;
21471 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
21473 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
21474 mach
= bfd_mach_arm_5T
;
21476 mach
= bfd_mach_arm_5
;
21478 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
21480 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
21481 mach
= bfd_mach_arm_4T
;
21483 mach
= bfd_mach_arm_4
;
21485 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
21486 mach
= bfd_mach_arm_3M
;
21487 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
21488 mach
= bfd_mach_arm_3
;
21489 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
21490 mach
= bfd_mach_arm_2a
;
21491 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
21492 mach
= bfd_mach_arm_2
;
21494 mach
= bfd_mach_arm_unknown
;
21496 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
21499 /* Command line processing. */
21502 Invocation line includes a switch not recognized by the base assembler.
21503 See if it's a processor-specific option.
21505 This routine is somewhat complicated by the need for backwards
21506 compatibility (since older releases of gcc can't be changed).
21507 The new options try to make the interface as compatible as
21510 New options (supported) are:
21512 -mcpu=<cpu name> Assemble for selected processor
21513 -march=<architecture name> Assemble for selected architecture
21514 -mfpu=<fpu architecture> Assemble for selected FPU.
21515 -EB/-mbig-endian Big-endian
21516 -EL/-mlittle-endian Little-endian
21517 -k Generate PIC code
21518 -mthumb Start in Thumb mode
21519 -mthumb-interwork Code supports ARM/Thumb interworking
21521 -m[no-]warn-deprecated Warn about deprecated features
21523 For now we will also provide support for:
21525 -mapcs-32 32-bit Program counter
21526 -mapcs-26 26-bit Program counter
21527 -macps-float Floats passed in FP registers
21528 -mapcs-reentrant Reentrant code
21530 (sometime these will probably be replaced with -mapcs=<list of options>
21531 and -matpcs=<list of options>)
21533 The remaining options are only supported for back-wards compatibility.
21534 Cpu variants, the arm part is optional:
21535 -m[arm]1 Currently not supported.
21536 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
21537 -m[arm]3 Arm 3 processor
21538 -m[arm]6[xx], Arm 6 processors
21539 -m[arm]7[xx][t][[d]m] Arm 7 processors
21540 -m[arm]8[10] Arm 8 processors
21541 -m[arm]9[20][tdmi] Arm 9 processors
21542 -mstrongarm[110[0]] StrongARM processors
21543 -mxscale XScale processors
21544 -m[arm]v[2345[t[e]]] Arm architectures
21545 -mall All (except the ARM1)
21547 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
21548 -mfpe-old (No float load/store multiples)
21549 -mvfpxd VFP Single precision
21551 -mno-fpu Disable all floating point instructions
21553 The following CPU names are recognized:
21554 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
21555 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
21556 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
21557 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
21558 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
21559 arm10t arm10e, arm1020t, arm1020e, arm10200e,
21560 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
21564 const char * md_shortopts
= "m:k";
21566 #ifdef ARM_BI_ENDIAN
21567 #define OPTION_EB (OPTION_MD_BASE + 0)
21568 #define OPTION_EL (OPTION_MD_BASE + 1)
21570 #if TARGET_BYTES_BIG_ENDIAN
21571 #define OPTION_EB (OPTION_MD_BASE + 0)
21573 #define OPTION_EL (OPTION_MD_BASE + 1)
21576 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
21578 struct option md_longopts
[] =
21581 {"EB", no_argument
, NULL
, OPTION_EB
},
21584 {"EL", no_argument
, NULL
, OPTION_EL
},
21586 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
21587 {NULL
, no_argument
, NULL
, 0}
21590 size_t md_longopts_size
= sizeof (md_longopts
);
21592 struct arm_option_table
21594 char *option
; /* Option name to match. */
21595 char *help
; /* Help information. */
21596 int *var
; /* Variable to change. */
21597 int value
; /* What to change it to. */
21598 char *deprecated
; /* If non-null, print this message. */
21601 struct arm_option_table arm_opts
[] =
21603 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
21604 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
21605 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
21606 &support_interwork
, 1, NULL
},
21607 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
21608 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
21609 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
21611 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
21612 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
21613 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
21614 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
21617 /* These are recognized by the assembler, but have no affect on code. */
21618 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
21619 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
21621 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
21622 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
21623 &warn_on_deprecated
, 0, NULL
},
21624 {NULL
, NULL
, NULL
, 0, NULL
}
21627 struct arm_legacy_option_table
21629 char *option
; /* Option name to match. */
21630 const arm_feature_set
**var
; /* Variable to change. */
21631 const arm_feature_set value
; /* What to change it to. */
21632 char *deprecated
; /* If non-null, print this message. */
21635 const struct arm_legacy_option_table arm_legacy_opts
[] =
21637 /* DON'T add any new processors to this list -- we want the whole list
21638 to go away... Add them to the processors table instead. */
21639 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
21640 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
21641 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
21642 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
21643 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
21644 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
21645 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
21646 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
21647 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
21648 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
21649 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
21650 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
21651 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
21652 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
21653 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
21654 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
21655 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
21656 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
21657 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
21658 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
21659 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
21660 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
21661 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
21662 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
21663 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
21664 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
21665 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
21666 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
21667 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
21668 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
21669 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
21670 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
21671 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
21672 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
21673 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
21674 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
21675 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
21676 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
21677 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
21678 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
21679 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
21680 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
21681 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
21682 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
21683 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
21684 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
21685 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
21686 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
21687 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
21688 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
21689 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
21690 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
21691 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
21692 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
21693 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
21694 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
21695 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
21696 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
21697 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
21698 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
21699 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
21700 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
21701 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
21702 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
21703 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
21704 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
21705 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
21706 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
21707 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
21708 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
21709 N_("use -mcpu=strongarm110")},
21710 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
21711 N_("use -mcpu=strongarm1100")},
21712 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
21713 N_("use -mcpu=strongarm1110")},
21714 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
21715 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
21716 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
21718 /* Architecture variants -- don't add any more to this list either. */
21719 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
21720 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
21721 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
21722 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
21723 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
21724 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
21725 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
21726 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
21727 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
21728 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
21729 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
21730 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
21731 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
21732 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
21733 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
21734 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
21735 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
21736 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
21738 /* Floating point variants -- don't add any more to this list either. */
21739 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
21740 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
21741 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
21742 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
21743 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
21745 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
21748 struct arm_cpu_option_table
21751 const arm_feature_set value
;
21752 /* For some CPUs we assume an FPU unless the user explicitly sets
21754 const arm_feature_set default_fpu
;
21755 /* The canonical name of the CPU, or NULL to use NAME converted to upper
21757 const char *canonical_name
;
21760 /* This list should, at a minimum, contain all the cpu names
21761 recognized by GCC. */
21762 static const struct arm_cpu_option_table arm_cpus
[] =
21764 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
21765 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
21766 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
21767 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
21768 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
21769 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21770 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21771 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21772 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21773 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21774 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21775 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
21776 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21777 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
21778 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21779 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
21780 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21781 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21782 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21783 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21784 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21785 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21786 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21787 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21788 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21789 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21790 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21791 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
21792 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21793 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21794 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21795 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21796 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21797 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21798 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21799 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21800 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21801 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21802 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21803 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
21804 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21805 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21806 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21807 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
21808 {"fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21809 {"fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
21810 /* For V5 or later processors we default to using VFP; but the user
21811 should really set the FPU type explicitly. */
21812 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
21813 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21814 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
21815 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
21816 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
21817 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
21818 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
21819 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21820 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
21821 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
21822 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21823 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21824 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
21825 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
21826 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21827 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
21828 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
21829 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21830 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21831 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
21832 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
21833 {"fa626te", ARM_ARCH_V5TE
, FPU_NONE
, NULL
},
21834 {"fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
21835 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
21836 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
21837 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
21838 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
21839 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
21840 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
21841 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
21842 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
21843 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
21844 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
21845 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE (0, FPU_VFP_V3
21846 | FPU_NEON_EXT_V1
),
21848 {"cortex-a9", ARM_ARCH_V7A
, ARM_FEATURE (0, FPU_VFP_V3
21849 | FPU_NEON_EXT_V1
),
21851 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
21852 {"cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
, NULL
},
21853 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
21854 {"cortex-m1", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
21855 {"cortex-m0", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
21856 /* ??? XSCALE is really an architecture. */
21857 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
21858 /* ??? iwmmxt is not a processor. */
21859 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
21860 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
21861 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
21863 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
21864 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
21867 struct arm_arch_option_table
21870 const arm_feature_set value
;
21871 const arm_feature_set default_fpu
;
21874 /* This list should, at a minimum, contain all the architecture names
21875 recognized by GCC. */
21876 static const struct arm_arch_option_table arm_archs
[] =
21878 {"all", ARM_ANY
, FPU_ARCH_FPA
},
21879 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
21880 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
21881 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
21882 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
21883 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
21884 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
21885 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
21886 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
21887 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
21888 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
21889 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
21890 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
21891 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
21892 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
21893 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
21894 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
21895 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
21896 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
21897 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
21898 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
21899 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
21900 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
21901 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
21902 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
21903 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
21904 {"armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
},
21905 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
21906 /* The official spelling of the ARMv7 profile variants is the dashed form.
21907 Accept the non-dashed form for compatibility with old toolchains. */
21908 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
21909 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
21910 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
21911 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
21912 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
21913 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
21914 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
21915 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
21916 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
21917 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
21920 /* ISA extensions in the co-processor space. */
21921 struct arm_option_cpu_value_table
21924 const arm_feature_set value
;
21927 static const struct arm_option_cpu_value_table arm_extensions
[] =
21929 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
21930 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
21931 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
21932 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
21933 {NULL
, ARM_ARCH_NONE
}
21936 /* This list should, at a minimum, contain all the fpu names
21937 recognized by GCC. */
21938 static const struct arm_option_cpu_value_table arm_fpus
[] =
21940 {"softfpa", FPU_NONE
},
21941 {"fpe", FPU_ARCH_FPE
},
21942 {"fpe2", FPU_ARCH_FPE
},
21943 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
21944 {"fpa", FPU_ARCH_FPA
},
21945 {"fpa10", FPU_ARCH_FPA
},
21946 {"fpa11", FPU_ARCH_FPA
},
21947 {"arm7500fe", FPU_ARCH_FPA
},
21948 {"softvfp", FPU_ARCH_VFP
},
21949 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
21950 {"vfp", FPU_ARCH_VFP_V2
},
21951 {"vfp9", FPU_ARCH_VFP_V2
},
21952 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
21953 {"vfp10", FPU_ARCH_VFP_V2
},
21954 {"vfp10-r0", FPU_ARCH_VFP_V1
},
21955 {"vfpxd", FPU_ARCH_VFP_V1xD
},
21956 {"vfpv2", FPU_ARCH_VFP_V2
},
21957 {"vfpv3", FPU_ARCH_VFP_V3
},
21958 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
21959 {"arm1020t", FPU_ARCH_VFP_V1
},
21960 {"arm1020e", FPU_ARCH_VFP_V2
},
21961 {"arm1136jfs", FPU_ARCH_VFP_V2
},
21962 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
21963 {"maverick", FPU_ARCH_MAVERICK
},
21964 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
21965 {"neon-fp16", FPU_ARCH_NEON_FP16
},
21966 {NULL
, ARM_ARCH_NONE
}
21969 struct arm_option_value_table
21975 static const struct arm_option_value_table arm_float_abis
[] =
21977 {"hard", ARM_FLOAT_ABI_HARD
},
21978 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
21979 {"soft", ARM_FLOAT_ABI_SOFT
},
21984 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
21985 static const struct arm_option_value_table arm_eabis
[] =
21987 {"gnu", EF_ARM_EABI_UNKNOWN
},
21988 {"4", EF_ARM_EABI_VER4
},
21989 {"5", EF_ARM_EABI_VER5
},
21994 struct arm_long_option_table
21996 char * option
; /* Substring to match. */
21997 char * help
; /* Help information. */
21998 int (* func
) (char * subopt
); /* Function to decode sub-option. */
21999 char * deprecated
; /* If non-null, print this message. */
22003 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
22005 arm_feature_set
*ext_set
= xmalloc (sizeof (arm_feature_set
));
22007 /* Copy the feature set, so that we can modify it. */
22008 *ext_set
= **opt_p
;
22011 while (str
!= NULL
&& *str
!= 0)
22013 const struct arm_option_cpu_value_table
* opt
;
22019 as_bad (_("invalid architectural extension"));
22024 ext
= strchr (str
, '+');
22027 optlen
= ext
- str
;
22029 optlen
= strlen (str
);
22033 as_bad (_("missing architectural extension"));
22037 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
22038 if (strncmp (opt
->name
, str
, optlen
) == 0)
22040 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
22044 if (opt
->name
== NULL
)
22046 as_bad (_("unknown architectural extension `%s'"), str
);
22057 arm_parse_cpu (char * str
)
22059 const struct arm_cpu_option_table
* opt
;
22060 char * ext
= strchr (str
, '+');
22064 optlen
= ext
- str
;
22066 optlen
= strlen (str
);
22070 as_bad (_("missing cpu name `%s'"), str
);
22074 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
22075 if (strncmp (opt
->name
, str
, optlen
) == 0)
22077 mcpu_cpu_opt
= &opt
->value
;
22078 mcpu_fpu_opt
= &opt
->default_fpu
;
22079 if (opt
->canonical_name
)
22080 strcpy (selected_cpu_name
, opt
->canonical_name
);
22085 for (i
= 0; i
< optlen
; i
++)
22086 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
22087 selected_cpu_name
[i
] = 0;
22091 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
22096 as_bad (_("unknown cpu `%s'"), str
);
22101 arm_parse_arch (char * str
)
22103 const struct arm_arch_option_table
*opt
;
22104 char *ext
= strchr (str
, '+');
22108 optlen
= ext
- str
;
22110 optlen
= strlen (str
);
22114 as_bad (_("missing architecture name `%s'"), str
);
22118 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
22119 if (streq (opt
->name
, str
))
22121 march_cpu_opt
= &opt
->value
;
22122 march_fpu_opt
= &opt
->default_fpu
;
22123 strcpy (selected_cpu_name
, opt
->name
);
22126 return arm_parse_extension (ext
, &march_cpu_opt
);
22131 as_bad (_("unknown architecture `%s'\n"), str
);
22136 arm_parse_fpu (char * str
)
22138 const struct arm_option_cpu_value_table
* opt
;
22140 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
22141 if (streq (opt
->name
, str
))
22143 mfpu_opt
= &opt
->value
;
22147 as_bad (_("unknown floating point format `%s'\n"), str
);
22152 arm_parse_float_abi (char * str
)
22154 const struct arm_option_value_table
* opt
;
22156 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
22157 if (streq (opt
->name
, str
))
22159 mfloat_abi_opt
= opt
->value
;
22163 as_bad (_("unknown floating point abi `%s'\n"), str
);
22169 arm_parse_eabi (char * str
)
22171 const struct arm_option_value_table
*opt
;
22173 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
22174 if (streq (opt
->name
, str
))
22176 meabi_flags
= opt
->value
;
22179 as_bad (_("unknown EABI `%s'\n"), str
);
22185 arm_parse_it_mode (char * str
)
22187 bfd_boolean ret
= TRUE
;
22189 if (streq ("arm", str
))
22190 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
22191 else if (streq ("thumb", str
))
22192 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
22193 else if (streq ("always", str
))
22194 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
22195 else if (streq ("never", str
))
22196 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
22199 as_bad (_("unknown implicit IT mode `%s', should be "\
22200 "arm, thumb, always, or never."), str
);
22207 struct arm_long_option_table arm_long_opts
[] =
22209 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22210 arm_parse_cpu
, NULL
},
22211 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22212 arm_parse_arch
, NULL
},
22213 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22214 arm_parse_fpu
, NULL
},
22215 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22216 arm_parse_float_abi
, NULL
},
22218 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22219 arm_parse_eabi
, NULL
},
22221 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22222 arm_parse_it_mode
, NULL
},
22223 {NULL
, NULL
, 0, NULL
}
22227 md_parse_option (int c
, char * arg
)
22229 struct arm_option_table
*opt
;
22230 const struct arm_legacy_option_table
*fopt
;
22231 struct arm_long_option_table
*lopt
;
22237 target_big_endian
= 1;
22243 target_big_endian
= 0;
22247 case OPTION_FIX_V4BX
:
22252 /* Listing option. Just ignore these, we don't support additional
22257 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22259 if (c
== opt
->option
[0]
22260 && ((arg
== NULL
&& opt
->option
[1] == 0)
22261 || streq (arg
, opt
->option
+ 1)))
22263 /* If the option is deprecated, tell the user. */
22264 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
22265 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22266 arg
? arg
: "", _(opt
->deprecated
));
22268 if (opt
->var
!= NULL
)
22269 *opt
->var
= opt
->value
;
22275 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
22277 if (c
== fopt
->option
[0]
22278 && ((arg
== NULL
&& fopt
->option
[1] == 0)
22279 || streq (arg
, fopt
->option
+ 1)))
22281 /* If the option is deprecated, tell the user. */
22282 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
22283 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22284 arg
? arg
: "", _(fopt
->deprecated
));
22286 if (fopt
->var
!= NULL
)
22287 *fopt
->var
= &fopt
->value
;
22293 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22295 /* These options are expected to have an argument. */
22296 if (c
== lopt
->option
[0]
22298 && strncmp (arg
, lopt
->option
+ 1,
22299 strlen (lopt
->option
+ 1)) == 0)
22301 /* If the option is deprecated, tell the user. */
22302 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
22303 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
22304 _(lopt
->deprecated
));
22306 /* Call the sup-option parser. */
22307 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
22318 md_show_usage (FILE * fp
)
22320 struct arm_option_table
*opt
;
22321 struct arm_long_option_table
*lopt
;
22323 fprintf (fp
, _(" ARM-specific assembler options:\n"));
22325 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22326 if (opt
->help
!= NULL
)
22327 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
22329 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22330 if (lopt
->help
!= NULL
)
22331 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
22335 -EB assemble code for a big-endian cpu\n"));
22340 -EL assemble code for a little-endian cpu\n"));
22344 --fix-v4bx Allow BX in ARMv4 code\n"));
22352 arm_feature_set flags
;
22353 } cpu_arch_ver_table
;
22355 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
22356 least features first. */
22357 static const cpu_arch_ver_table cpu_arch_ver
[] =
22363 {4, ARM_ARCH_V5TE
},
22364 {5, ARM_ARCH_V5TEJ
},
22368 {11, ARM_ARCH_V6M
},
22369 {8, ARM_ARCH_V6T2
},
22370 {10, ARM_ARCH_V7A
},
22371 {10, ARM_ARCH_V7R
},
22372 {10, ARM_ARCH_V7M
},
22376 /* Set an attribute if it has not already been set by the user. */
22378 aeabi_set_attribute_int (int tag
, int value
)
22381 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
22382 || !attributes_set_explicitly
[tag
])
22383 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
22387 aeabi_set_attribute_string (int tag
, const char *value
)
22390 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
22391 || !attributes_set_explicitly
[tag
])
22392 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
22395 /* Set the public EABI object attributes. */
22397 aeabi_set_public_attributes (void)
22400 arm_feature_set flags
;
22401 arm_feature_set tmp
;
22402 const cpu_arch_ver_table
*p
;
22404 /* Choose the architecture based on the capabilities of the requested cpu
22405 (if any) and/or the instructions actually used. */
22406 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
22407 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
22408 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
22409 /*Allow the user to override the reported architecture. */
22412 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
22413 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
22418 for (p
= cpu_arch_ver
; p
->val
; p
++)
22420 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
22423 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
22427 /* Tag_CPU_name. */
22428 if (selected_cpu_name
[0])
22432 p
= selected_cpu_name
;
22433 if (strncmp (p
, "armv", 4) == 0)
22438 for (i
= 0; p
[i
]; i
++)
22439 p
[i
] = TOUPPER (p
[i
]);
22441 aeabi_set_attribute_string (Tag_CPU_name
, p
);
22443 /* Tag_CPU_arch. */
22444 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
22445 /* Tag_CPU_arch_profile. */
22446 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
22447 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'A');
22448 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
22449 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'R');
22450 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
22451 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'M');
22452 /* Tag_ARM_ISA_use. */
22453 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
22455 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
22456 /* Tag_THUMB_ISA_use. */
22457 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
22459 aeabi_set_attribute_int (Tag_THUMB_ISA_use
,
22460 ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
) ? 2 : 1);
22461 /* Tag_VFP_arch. */
22462 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
22463 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
22464 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3
))
22465 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
22466 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
22467 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
22468 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
22469 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
22470 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
22471 /* Tag_WMMX_arch. */
22472 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
22473 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
22474 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
22475 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
22476 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
22477 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
22478 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
22479 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
22480 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_fp16
))
22481 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
22484 /* Add the default contents for the .ARM.attributes section. */
22488 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
22491 aeabi_set_public_attributes ();
22493 #endif /* OBJ_ELF */
22496 /* Parse a .cpu directive. */
22499 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
22501 const struct arm_cpu_option_table
*opt
;
22505 name
= input_line_pointer
;
22506 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22507 input_line_pointer
++;
22508 saved_char
= *input_line_pointer
;
22509 *input_line_pointer
= 0;
22511 /* Skip the first "all" entry. */
22512 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
22513 if (streq (opt
->name
, name
))
22515 mcpu_cpu_opt
= &opt
->value
;
22516 selected_cpu
= opt
->value
;
22517 if (opt
->canonical_name
)
22518 strcpy (selected_cpu_name
, opt
->canonical_name
);
22522 for (i
= 0; opt
->name
[i
]; i
++)
22523 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
22524 selected_cpu_name
[i
] = 0;
22526 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
22527 *input_line_pointer
= saved_char
;
22528 demand_empty_rest_of_line ();
22531 as_bad (_("unknown cpu `%s'"), name
);
22532 *input_line_pointer
= saved_char
;
22533 ignore_rest_of_line ();
22537 /* Parse a .arch directive. */
22540 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
22542 const struct arm_arch_option_table
*opt
;
22546 name
= input_line_pointer
;
22547 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22548 input_line_pointer
++;
22549 saved_char
= *input_line_pointer
;
22550 *input_line_pointer
= 0;
22552 /* Skip the first "all" entry. */
22553 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
22554 if (streq (opt
->name
, name
))
22556 mcpu_cpu_opt
= &opt
->value
;
22557 selected_cpu
= opt
->value
;
22558 strcpy (selected_cpu_name
, opt
->name
);
22559 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
22560 *input_line_pointer
= saved_char
;
22561 demand_empty_rest_of_line ();
22565 as_bad (_("unknown architecture `%s'\n"), name
);
22566 *input_line_pointer
= saved_char
;
22567 ignore_rest_of_line ();
22571 /* Parse a .object_arch directive. */
22574 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
22576 const struct arm_arch_option_table
*opt
;
22580 name
= input_line_pointer
;
22581 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22582 input_line_pointer
++;
22583 saved_char
= *input_line_pointer
;
22584 *input_line_pointer
= 0;
22586 /* Skip the first "all" entry. */
22587 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
22588 if (streq (opt
->name
, name
))
22590 object_arch
= &opt
->value
;
22591 *input_line_pointer
= saved_char
;
22592 demand_empty_rest_of_line ();
22596 as_bad (_("unknown architecture `%s'\n"), name
);
22597 *input_line_pointer
= saved_char
;
22598 ignore_rest_of_line ();
22601 /* Parse a .fpu directive. */
22604 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
22606 const struct arm_option_cpu_value_table
*opt
;
22610 name
= input_line_pointer
;
22611 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22612 input_line_pointer
++;
22613 saved_char
= *input_line_pointer
;
22614 *input_line_pointer
= 0;
22616 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
22617 if (streq (opt
->name
, name
))
22619 mfpu_opt
= &opt
->value
;
22620 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
22621 *input_line_pointer
= saved_char
;
22622 demand_empty_rest_of_line ();
22626 as_bad (_("unknown floating point format `%s'\n"), name
);
22627 *input_line_pointer
= saved_char
;
22628 ignore_rest_of_line ();
22631 /* Copy symbol information. */
22634 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
22636 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
22640 /* Given a symbolic attribute NAME, return the proper integer value.
22641 Returns -1 if the attribute is not known. */
22644 arm_convert_symbolic_attribute (const char *name
)
22646 static const struct
22651 attribute_table
[] =
22653 /* When you modify this table you should
22654 also modify the list in doc/c-arm.texi. */
22655 #define T(tag) {#tag, tag}
22656 T (Tag_CPU_raw_name
),
22659 T (Tag_CPU_arch_profile
),
22660 T (Tag_ARM_ISA_use
),
22661 T (Tag_THUMB_ISA_use
),
22664 T (Tag_Advanced_SIMD_arch
),
22665 T (Tag_PCS_config
),
22666 T (Tag_ABI_PCS_R9_use
),
22667 T (Tag_ABI_PCS_RW_data
),
22668 T (Tag_ABI_PCS_RO_data
),
22669 T (Tag_ABI_PCS_GOT_use
),
22670 T (Tag_ABI_PCS_wchar_t
),
22671 T (Tag_ABI_FP_rounding
),
22672 T (Tag_ABI_FP_denormal
),
22673 T (Tag_ABI_FP_exceptions
),
22674 T (Tag_ABI_FP_user_exceptions
),
22675 T (Tag_ABI_FP_number_model
),
22676 T (Tag_ABI_align8_needed
),
22677 T (Tag_ABI_align8_preserved
),
22678 T (Tag_ABI_enum_size
),
22679 T (Tag_ABI_HardFP_use
),
22680 T (Tag_ABI_VFP_args
),
22681 T (Tag_ABI_WMMX_args
),
22682 T (Tag_ABI_optimization_goals
),
22683 T (Tag_ABI_FP_optimization_goals
),
22684 T (Tag_compatibility
),
22685 T (Tag_CPU_unaligned_access
),
22686 T (Tag_VFP_HP_extension
),
22687 T (Tag_ABI_FP_16bit_format
),
22688 T (Tag_nodefaults
),
22689 T (Tag_also_compatible_with
),
22690 T (Tag_conformance
),
22692 T (Tag_Virtualization_use
),
22693 T (Tag_MPextension_use
)
22701 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
22702 if (streq (name
, attribute_table
[i
].name
))
22703 return attribute_table
[i
].tag
;
22709 /* Apply sym value for relocations only in the case that
22710 they are for local symbols and you have the respective
22711 architectural feature for blx and simple switches. */
22713 arm_apply_sym_value (struct fix
* fixP
)
22716 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
22717 && !S_IS_EXTERNAL (fixP
->fx_addsy
))
22719 switch (fixP
->fx_r_type
)
22721 case BFD_RELOC_ARM_PCREL_BLX
:
22722 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
22723 if (ARM_IS_FUNC (fixP
->fx_addsy
))
22727 case BFD_RELOC_ARM_PCREL_CALL
:
22728 case BFD_RELOC_THUMB_PCREL_BLX
:
22729 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
22740 #endif /* OBJ_ELF */