1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
54 symbolS
* table_entry
;
55 symbolS
* personality_routine
;
56 int personality_index
;
57 /* The segment containing the function. */
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes
;
64 /* The number of bytes pushed to the stack. */
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset
;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
74 /* Nonzero if an unwind_setfp directive has been seen. */
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored
:1;
82 /* Results from operand parsing worker functions. */
86 PARSE_OPERAND_SUCCESS
,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result
;
98 /* Types of processor to assemble for. */
100 /* The code that was here used to select a default CPU depending on compiler
101 pre-defines which were only present when doing native builds, thus
102 changing gas' default behaviour depending upon the build host.
104 If you have a target that requires a default CPU option then the you
105 should define CPU_DEFAULT here. */
110 # define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
113 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 /* Legacy a.out format. */
116 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # elif defined (TE_VXWORKS)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 /* For backwards compatibility, default to FPA. */
122 # define FPU_DEFAULT FPU_ARCH_FPA
124 #endif /* ifndef FPU_DEFAULT */
126 #define streq(a, b) (strcmp (a, b) == 0)
128 static arm_feature_set cpu_variant
;
129 static arm_feature_set arm_arch_used
;
130 static arm_feature_set thumb_arch_used
;
132 /* Flags stored in private area of BFD structure. */
133 static int uses_apcs_26
= FALSE
;
134 static int atpcs
= FALSE
;
135 static int support_interwork
= FALSE
;
136 static int uses_apcs_float
= FALSE
;
137 static int pic_code
= FALSE
;
138 static int fix_v4bx
= FALSE
;
139 /* Warn on using deprecated features. */
140 static int warn_on_deprecated
= TRUE
;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set
*legacy_cpu
= NULL
;
147 static const arm_feature_set
*legacy_fpu
= NULL
;
149 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
150 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
151 static const arm_feature_set
*march_cpu_opt
= NULL
;
152 static const arm_feature_set
*march_fpu_opt
= NULL
;
153 static const arm_feature_set
*mfpu_opt
= NULL
;
154 static const arm_feature_set
*object_arch
= NULL
;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
158 static const arm_feature_set fpu_arch_vfp_v1
= FPU_ARCH_VFP_V1
;
159 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
160 static const arm_feature_set fpu_arch_vfp_v3
= FPU_ARCH_VFP_V3
;
161 static const arm_feature_set fpu_arch_neon_v1
= FPU_ARCH_NEON_V1
;
162 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
163 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
164 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
165 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
168 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
171 static const arm_feature_set arm_ext_v1
= ARM_FEATURE (ARM_EXT_V1
, 0);
172 static const arm_feature_set arm_ext_v2
= ARM_FEATURE (ARM_EXT_V1
, 0);
173 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE (ARM_EXT_V2S
, 0);
174 static const arm_feature_set arm_ext_v3
= ARM_FEATURE (ARM_EXT_V3
, 0);
175 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE (ARM_EXT_V3M
, 0);
176 static const arm_feature_set arm_ext_v4
= ARM_FEATURE (ARM_EXT_V4
, 0);
177 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE (ARM_EXT_V4T
, 0);
178 static const arm_feature_set arm_ext_v5
= ARM_FEATURE (ARM_EXT_V5
, 0);
179 static const arm_feature_set arm_ext_v4t_5
=
180 ARM_FEATURE (ARM_EXT_V4T
| ARM_EXT_V5
, 0);
181 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE (ARM_EXT_V5T
, 0);
182 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE (ARM_EXT_V5E
, 0);
183 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE (ARM_EXT_V5ExP
, 0);
184 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE (ARM_EXT_V5J
, 0);
185 static const arm_feature_set arm_ext_v6
= ARM_FEATURE (ARM_EXT_V6
, 0);
186 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE (ARM_EXT_V6K
, 0);
187 static const arm_feature_set arm_ext_v6z
= ARM_FEATURE (ARM_EXT_V6Z
, 0);
188 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE (ARM_EXT_V6T2
, 0);
189 static const arm_feature_set arm_ext_v6_notm
= ARM_FEATURE (ARM_EXT_V6_NOTM
, 0);
190 static const arm_feature_set arm_ext_v6_dsp
= ARM_FEATURE (ARM_EXT_V6_DSP
, 0);
191 static const arm_feature_set arm_ext_barrier
= ARM_FEATURE (ARM_EXT_BARRIER
, 0);
192 static const arm_feature_set arm_ext_msr
= ARM_FEATURE (ARM_EXT_THUMB_MSR
, 0);
193 static const arm_feature_set arm_ext_div
= ARM_FEATURE (ARM_EXT_DIV
, 0);
194 static const arm_feature_set arm_ext_v7
= ARM_FEATURE (ARM_EXT_V7
, 0);
195 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE (ARM_EXT_V7A
, 0);
196 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE (ARM_EXT_V7R
, 0);
197 static const arm_feature_set arm_ext_v7m
= ARM_FEATURE (ARM_EXT_V7M
, 0);
198 static const arm_feature_set arm_ext_m
=
199 ARM_FEATURE (ARM_EXT_V6M
| ARM_EXT_V7M
, 0);
201 static const arm_feature_set arm_arch_any
= ARM_ANY
;
202 static const arm_feature_set arm_arch_full
= ARM_FEATURE (-1, -1);
203 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
204 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
206 static const arm_feature_set arm_cext_iwmmxt2
=
207 ARM_FEATURE (0, ARM_CEXT_IWMMXT2
);
208 static const arm_feature_set arm_cext_iwmmxt
=
209 ARM_FEATURE (0, ARM_CEXT_IWMMXT
);
210 static const arm_feature_set arm_cext_xscale
=
211 ARM_FEATURE (0, ARM_CEXT_XSCALE
);
212 static const arm_feature_set arm_cext_maverick
=
213 ARM_FEATURE (0, ARM_CEXT_MAVERICK
);
214 static const arm_feature_set fpu_fpa_ext_v1
= ARM_FEATURE (0, FPU_FPA_EXT_V1
);
215 static const arm_feature_set fpu_fpa_ext_v2
= ARM_FEATURE (0, FPU_FPA_EXT_V2
);
216 static const arm_feature_set fpu_vfp_ext_v1xd
=
217 ARM_FEATURE (0, FPU_VFP_EXT_V1xD
);
218 static const arm_feature_set fpu_vfp_ext_v1
= ARM_FEATURE (0, FPU_VFP_EXT_V1
);
219 static const arm_feature_set fpu_vfp_ext_v2
= ARM_FEATURE (0, FPU_VFP_EXT_V2
);
220 static const arm_feature_set fpu_vfp_ext_v3xd
= ARM_FEATURE (0, FPU_VFP_EXT_V3xD
);
221 static const arm_feature_set fpu_vfp_ext_v3
= ARM_FEATURE (0, FPU_VFP_EXT_V3
);
222 static const arm_feature_set fpu_vfp_ext_d32
=
223 ARM_FEATURE (0, FPU_VFP_EXT_D32
);
224 static const arm_feature_set fpu_neon_ext_v1
= ARM_FEATURE (0, FPU_NEON_EXT_V1
);
225 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
226 ARM_FEATURE (0, FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
227 static const arm_feature_set fpu_vfp_fp16
= ARM_FEATURE (0, FPU_VFP_EXT_FP16
);
228 static const arm_feature_set fpu_neon_ext_fma
= ARM_FEATURE (0, FPU_NEON_EXT_FMA
);
229 static const arm_feature_set fpu_vfp_ext_fma
= ARM_FEATURE (0, FPU_VFP_EXT_FMA
);
231 static int mfloat_abi_opt
= -1;
232 /* Record user cpu selection for object attributes. */
233 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
234 /* Must be long enough to hold any of the names in arm_cpus. */
235 static char selected_cpu_name
[16];
238 static int meabi_flags
= EABI_DEFAULT
;
240 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
243 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
248 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
253 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
254 symbolS
* GOT_symbol
;
257 /* 0: assemble for ARM,
258 1: assemble for Thumb,
259 2: assemble for Thumb even though target CPU does not support thumb
261 static int thumb_mode
= 0;
262 /* A value distinct from the possible values for thumb_mode that we
263 can use to record whether thumb_mode has been copied into the
264 tc_frag_data field of a frag. */
265 #define MODE_RECORDED (1 << 4)
267 /* Specifies the intrinsic IT insn behavior mode. */
268 enum implicit_it_mode
270 IMPLICIT_IT_MODE_NEVER
= 0x00,
271 IMPLICIT_IT_MODE_ARM
= 0x01,
272 IMPLICIT_IT_MODE_THUMB
= 0x02,
273 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
275 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
277 /* If unified_syntax is true, we are processing the new unified
278 ARM/Thumb syntax. Important differences from the old ARM mode:
280 - Immediate operands do not require a # prefix.
281 - Conditional affixes always appear at the end of the
282 instruction. (For backward compatibility, those instructions
283 that formerly had them in the middle, continue to accept them
285 - The IT instruction may appear, and if it does is validated
286 against subsequent conditional affixes. It does not generate
289 Important differences from the old Thumb mode:
291 - Immediate operands do not require a # prefix.
292 - Most of the V6T2 instructions are only available in unified mode.
293 - The .N and .W suffixes are recognized and honored (it is an error
294 if they cannot be honored).
295 - All instructions set the flags if and only if they have an 's' affix.
296 - Conditional affixes may be used. They are validated against
297 preceding IT instructions. Unlike ARM mode, you cannot use a
298 conditional affix except in the scope of an IT instruction. */
300 static bfd_boolean unified_syntax
= FALSE
;
315 enum neon_el_type type
;
319 #define NEON_MAX_TYPE_ELS 4
323 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
327 enum it_instruction_type
332 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
333 if inside, should be the last one. */
334 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
335 i.e. BKPT and NOP. */
336 IT_INSN
/* The IT insn has been parsed. */
342 unsigned long instruction
;
346 /* "uncond_value" is set to the value in place of the conditional field in
347 unconditional versions of the instruction, or -1 if nothing is
350 struct neon_type vectype
;
351 /* This does not indicate an actual NEON instruction, only that
352 the mnemonic accepts neon-style type suffixes. */
354 /* Set to the opcode if the instruction needs relaxation.
355 Zero if the instruction is not relaxed. */
359 bfd_reloc_code_real_type type
;
364 enum it_instruction_type it_insn_type
;
370 struct neon_type_el vectype
;
371 unsigned present
: 1; /* Operand present. */
372 unsigned isreg
: 1; /* Operand was a register. */
373 unsigned immisreg
: 1; /* .imm field is a second register. */
374 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
375 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
376 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
377 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
378 instructions. This allows us to disambiguate ARM <-> vector insns. */
379 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
380 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
381 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
382 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
383 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
384 unsigned writeback
: 1; /* Operand has trailing ! */
385 unsigned preind
: 1; /* Preindexed address. */
386 unsigned postind
: 1; /* Postindexed address. */
387 unsigned negative
: 1; /* Index register was negated. */
388 unsigned shifted
: 1; /* Shift applied to operation. */
389 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
393 static struct arm_it inst
;
395 #define NUM_FLOAT_VALS 8
397 const char * fp_const
[] =
399 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
402 /* Number of littlenums required to hold an extended precision number. */
403 #define MAX_LITTLENUMS 6
405 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
415 #define CP_T_X 0x00008000
416 #define CP_T_Y 0x00400000
418 #define CONDS_BIT 0x00100000
419 #define LOAD_BIT 0x00100000
421 #define DOUBLE_LOAD_FLAG 0x00000001
425 const char * template_name
;
429 #define COND_ALWAYS 0xE
433 const char * template_name
;
437 struct asm_barrier_opt
439 const char * template_name
;
443 /* The bit that distinguishes CPSR and SPSR. */
444 #define SPSR_BIT (1 << 22)
446 /* The individual PSR flag bits. */
447 #define PSR_c (1 << 16)
448 #define PSR_x (1 << 17)
449 #define PSR_s (1 << 18)
450 #define PSR_f (1 << 19)
455 bfd_reloc_code_real_type reloc
;
460 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
461 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
466 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
469 /* Bits for DEFINED field in neon_typed_alias. */
470 #define NTA_HASTYPE 1
471 #define NTA_HASINDEX 2
473 struct neon_typed_alias
475 unsigned char defined
;
477 struct neon_type_el eltype
;
480 /* ARM register categories. This includes coprocessor numbers and various
481 architecture extensions' registers. */
507 /* Structure for a hash table entry for a register.
508 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
509 information which states whether a vector type or index is specified (for a
510 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
514 unsigned char number
;
516 unsigned char builtin
;
517 struct neon_typed_alias
* neon
;
520 /* Diagnostics used when we don't get a register of the expected type. */
521 const char * const reg_expected_msgs
[] =
523 N_("ARM register expected"),
524 N_("bad or missing co-processor number"),
525 N_("co-processor register expected"),
526 N_("FPA register expected"),
527 N_("VFP single precision register expected"),
528 N_("VFP/Neon double precision register expected"),
529 N_("Neon quad precision register expected"),
530 N_("VFP single or double precision register expected"),
531 N_("Neon double or quad precision register expected"),
532 N_("VFP single, double or Neon quad precision register expected"),
533 N_("VFP system register expected"),
534 N_("Maverick MVF register expected"),
535 N_("Maverick MVD register expected"),
536 N_("Maverick MVFX register expected"),
537 N_("Maverick MVDX register expected"),
538 N_("Maverick MVAX register expected"),
539 N_("Maverick DSPSC register expected"),
540 N_("iWMMXt data register expected"),
541 N_("iWMMXt control register expected"),
542 N_("iWMMXt scalar register expected"),
543 N_("XScale accumulator register expected"),
546 /* Some well known registers that we refer to directly elsewhere. */
551 /* ARM instructions take 4bytes in the object file, Thumb instructions
557 /* Basic string to match. */
558 const char * template_name
;
560 /* Parameters to instruction. */
561 unsigned int operands
[8];
563 /* Conditional tag - see opcode_lookup. */
564 unsigned int tag
: 4;
566 /* Basic instruction code. */
567 unsigned int avalue
: 28;
569 /* Thumb-format instruction code. */
572 /* Which architecture variant provides this instruction. */
573 const arm_feature_set
* avariant
;
574 const arm_feature_set
* tvariant
;
576 /* Function to call to encode instruction in ARM format. */
577 void (* aencode
) (void);
579 /* Function to call to encode instruction in Thumb format. */
580 void (* tencode
) (void);
583 /* Defines for various bits that we will want to toggle. */
584 #define INST_IMMEDIATE 0x02000000
585 #define OFFSET_REG 0x02000000
586 #define HWOFFSET_IMM 0x00400000
587 #define SHIFT_BY_REG 0x00000010
588 #define PRE_INDEX 0x01000000
589 #define INDEX_UP 0x00800000
590 #define WRITE_BACK 0x00200000
591 #define LDM_TYPE_2_OR_3 0x00400000
592 #define CPSI_MMOD 0x00020000
594 #define LITERAL_MASK 0xf000f000
595 #define OPCODE_MASK 0xfe1fffff
596 #define V4_STR_BIT 0x00000020
598 #define T2_SUBS_PC_LR 0xf3de8f00
600 #define DATA_OP_SHIFT 21
602 #define T2_OPCODE_MASK 0xfe1fffff
603 #define T2_DATA_OP_SHIFT 21
605 /* Codes to distinguish the arithmetic instructions. */
616 #define OPCODE_CMP 10
617 #define OPCODE_CMN 11
618 #define OPCODE_ORR 12
619 #define OPCODE_MOV 13
620 #define OPCODE_BIC 14
621 #define OPCODE_MVN 15
623 #define T2_OPCODE_AND 0
624 #define T2_OPCODE_BIC 1
625 #define T2_OPCODE_ORR 2
626 #define T2_OPCODE_ORN 3
627 #define T2_OPCODE_EOR 4
628 #define T2_OPCODE_ADD 8
629 #define T2_OPCODE_ADC 10
630 #define T2_OPCODE_SBC 11
631 #define T2_OPCODE_SUB 13
632 #define T2_OPCODE_RSB 14
634 #define T_OPCODE_MUL 0x4340
635 #define T_OPCODE_TST 0x4200
636 #define T_OPCODE_CMN 0x42c0
637 #define T_OPCODE_NEG 0x4240
638 #define T_OPCODE_MVN 0x43c0
640 #define T_OPCODE_ADD_R3 0x1800
641 #define T_OPCODE_SUB_R3 0x1a00
642 #define T_OPCODE_ADD_HI 0x4400
643 #define T_OPCODE_ADD_ST 0xb000
644 #define T_OPCODE_SUB_ST 0xb080
645 #define T_OPCODE_ADD_SP 0xa800
646 #define T_OPCODE_ADD_PC 0xa000
647 #define T_OPCODE_ADD_I8 0x3000
648 #define T_OPCODE_SUB_I8 0x3800
649 #define T_OPCODE_ADD_I3 0x1c00
650 #define T_OPCODE_SUB_I3 0x1e00
652 #define T_OPCODE_ASR_R 0x4100
653 #define T_OPCODE_LSL_R 0x4080
654 #define T_OPCODE_LSR_R 0x40c0
655 #define T_OPCODE_ROR_R 0x41c0
656 #define T_OPCODE_ASR_I 0x1000
657 #define T_OPCODE_LSL_I 0x0000
658 #define T_OPCODE_LSR_I 0x0800
660 #define T_OPCODE_MOV_I8 0x2000
661 #define T_OPCODE_CMP_I8 0x2800
662 #define T_OPCODE_CMP_LR 0x4280
663 #define T_OPCODE_MOV_HR 0x4600
664 #define T_OPCODE_CMP_HR 0x4500
666 #define T_OPCODE_LDR_PC 0x4800
667 #define T_OPCODE_LDR_SP 0x9800
668 #define T_OPCODE_STR_SP 0x9000
669 #define T_OPCODE_LDR_IW 0x6800
670 #define T_OPCODE_STR_IW 0x6000
671 #define T_OPCODE_LDR_IH 0x8800
672 #define T_OPCODE_STR_IH 0x8000
673 #define T_OPCODE_LDR_IB 0x7800
674 #define T_OPCODE_STR_IB 0x7000
675 #define T_OPCODE_LDR_RW 0x5800
676 #define T_OPCODE_STR_RW 0x5000
677 #define T_OPCODE_LDR_RH 0x5a00
678 #define T_OPCODE_STR_RH 0x5200
679 #define T_OPCODE_LDR_RB 0x5c00
680 #define T_OPCODE_STR_RB 0x5400
682 #define T_OPCODE_PUSH 0xb400
683 #define T_OPCODE_POP 0xbc00
685 #define T_OPCODE_BRANCH 0xe000
687 #define THUMB_SIZE 2 /* Size of thumb instruction. */
688 #define THUMB_PP_PC_LR 0x0100
689 #define THUMB_LOAD_BIT 0x0800
690 #define THUMB2_LOAD_BIT 0x00100000
692 #define BAD_ARGS _("bad arguments to instruction")
693 #define BAD_SP _("r13 not allowed here")
694 #define BAD_PC _("r15 not allowed here")
695 #define BAD_COND _("instruction cannot be conditional")
696 #define BAD_OVERLAP _("registers may not be the same")
697 #define BAD_HIREG _("lo register required")
698 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
699 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
700 #define BAD_BRANCH _("branch must be last instruction in IT block")
701 #define BAD_NOT_IT _("instruction not allowed in IT block")
702 #define BAD_FPU _("selected FPU does not support instruction")
703 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
704 #define BAD_IT_COND _("incorrect condition in IT block")
705 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
706 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
707 #define BAD_PC_ADDRESSING \
708 _("cannot use register index with PC-relative addressing")
709 #define BAD_PC_WRITEBACK \
710 _("cannot use writeback with PC-relative addressing")
712 static struct hash_control
* arm_ops_hsh
;
713 static struct hash_control
* arm_cond_hsh
;
714 static struct hash_control
* arm_shift_hsh
;
715 static struct hash_control
* arm_psr_hsh
;
716 static struct hash_control
* arm_v7m_psr_hsh
;
717 static struct hash_control
* arm_reg_hsh
;
718 static struct hash_control
* arm_reloc_hsh
;
719 static struct hash_control
* arm_barrier_opt_hsh
;
721 /* Stuff needed to resolve the label ambiguity
730 symbolS
* last_label_seen
;
731 static int label_is_thumb_function_name
= FALSE
;
733 /* Literal pool structure. Held on a per-section
734 and per-sub-section basis. */
736 #define MAX_LITERAL_POOL_SIZE 1024
737 typedef struct literal_pool
739 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
740 unsigned int next_free_entry
;
745 struct literal_pool
* next
;
748 /* Pointer to a linked list of literal pools. */
749 literal_pool
* list_of_pools
= NULL
;
752 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
754 static struct current_it now_it
;
758 now_it_compatible (int cond
)
760 return (cond
& ~1) == (now_it
.cc
& ~1);
764 conditional_insn (void)
766 return inst
.cond
!= COND_ALWAYS
;
769 static int in_it_block (void);
771 static int handle_it_state (void);
773 static void force_automatic_it_block_close (void);
775 static void it_fsm_post_encode (void);
777 #define set_it_insn_type(type) \
780 inst.it_insn_type = type; \
781 if (handle_it_state () == FAIL) \
786 #define set_it_insn_type_nonvoid(type, failret) \
789 inst.it_insn_type = type; \
790 if (handle_it_state () == FAIL) \
795 #define set_it_insn_type_last() \
798 if (inst.cond == COND_ALWAYS) \
799 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
801 set_it_insn_type (INSIDE_IT_LAST_INSN); \
807 /* This array holds the chars that always start a comment. If the
808 pre-processor is disabled, these aren't very useful. */
809 const char comment_chars
[] = "@";
811 /* This array holds the chars that only start a comment at the beginning of
812 a line. If the line seems to have the form '# 123 filename'
813 .line and .file directives will appear in the pre-processed output. */
814 /* Note that input_file.c hand checks for '#' at the beginning of the
815 first line of the input file. This is because the compiler outputs
816 #NO_APP at the beginning of its output. */
817 /* Also note that comments like this one will always work. */
818 const char line_comment_chars
[] = "#";
820 const char line_separator_chars
[] = ";";
822 /* Chars that can be used to separate mant
823 from exp in floating point numbers. */
824 const char EXP_CHARS
[] = "eE";
826 /* Chars that mean this number is a floating point constant. */
830 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
832 /* Prefix characters that indicate the start of an immediate
834 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
836 /* Separator character handling. */
838 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
841 skip_past_char (char ** str
, char c
)
852 #define skip_past_comma(str) skip_past_char (str, ',')
854 /* Arithmetic expressions (possibly involving symbols). */
856 /* Return TRUE if anything in the expression is a bignum. */
859 walk_no_bignums (symbolS
* sp
)
861 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
864 if (symbol_get_value_expression (sp
)->X_add_symbol
)
866 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
867 || (symbol_get_value_expression (sp
)->X_op_symbol
868 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
874 static int in_my_get_expression
= 0;
876 /* Third argument to my_get_expression. */
877 #define GE_NO_PREFIX 0
878 #define GE_IMM_PREFIX 1
879 #define GE_OPT_PREFIX 2
880 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
881 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
882 #define GE_OPT_PREFIX_BIG 3
885 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
890 /* In unified syntax, all prefixes are optional. */
892 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
897 case GE_NO_PREFIX
: break;
899 if (!is_immediate_prefix (**str
))
901 inst
.error
= _("immediate expression requires a # prefix");
907 case GE_OPT_PREFIX_BIG
:
908 if (is_immediate_prefix (**str
))
914 memset (ep
, 0, sizeof (expressionS
));
916 save_in
= input_line_pointer
;
917 input_line_pointer
= *str
;
918 in_my_get_expression
= 1;
919 seg
= expression (ep
);
920 in_my_get_expression
= 0;
922 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
924 /* We found a bad or missing expression in md_operand(). */
925 *str
= input_line_pointer
;
926 input_line_pointer
= save_in
;
927 if (inst
.error
== NULL
)
928 inst
.error
= (ep
->X_op
== O_absent
929 ? _("missing expression") :_("bad expression"));
934 if (seg
!= absolute_section
935 && seg
!= text_section
936 && seg
!= data_section
937 && seg
!= bss_section
938 && seg
!= undefined_section
)
940 inst
.error
= _("bad segment");
941 *str
= input_line_pointer
;
942 input_line_pointer
= save_in
;
947 /* Get rid of any bignums now, so that we don't generate an error for which
948 we can't establish a line number later on. Big numbers are never valid
949 in instructions, which is where this routine is always called. */
950 if (prefix_mode
!= GE_OPT_PREFIX_BIG
951 && (ep
->X_op
== O_big
953 && (walk_no_bignums (ep
->X_add_symbol
)
955 && walk_no_bignums (ep
->X_op_symbol
))))))
957 inst
.error
= _("invalid constant");
958 *str
= input_line_pointer
;
959 input_line_pointer
= save_in
;
963 *str
= input_line_pointer
;
964 input_line_pointer
= save_in
;
968 /* Turn a string in input_line_pointer into a floating point constant
969 of type TYPE, and store the appropriate bytes in *LITP. The number
970 of LITTLENUMS emitted is stored in *SIZEP. An error message is
971 returned, or NULL on OK.
973 Note that fp constants aren't represent in the normal way on the ARM.
974 In big endian mode, things are as expected. However, in little endian
975 mode fp constants are big-endian word-wise, and little-endian byte-wise
976 within the words. For example, (double) 1.1 in big endian mode is
977 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
978 the byte sequence 99 99 f1 3f 9a 99 99 99.
980 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
983 md_atof (int type
, char * litP
, int * sizeP
)
986 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1018 return _("Unrecognized or unsupported floating point constant");
1021 t
= atof_ieee (input_line_pointer
, type
, words
);
1023 input_line_pointer
= t
;
1024 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1026 if (target_big_endian
)
1028 for (i
= 0; i
< prec
; i
++)
1030 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1031 litP
+= sizeof (LITTLENUM_TYPE
);
1036 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1037 for (i
= prec
- 1; i
>= 0; i
--)
1039 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1040 litP
+= sizeof (LITTLENUM_TYPE
);
1043 /* For a 4 byte float the order of elements in `words' is 1 0.
1044 For an 8 byte float the order is 1 0 3 2. */
1045 for (i
= 0; i
< prec
; i
+= 2)
1047 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1048 sizeof (LITTLENUM_TYPE
));
1049 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1050 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1051 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1058 /* We handle all bad expressions here, so that we can report the faulty
1059 instruction in the error message. */
1061 md_operand (expressionS
* exp
)
1063 if (in_my_get_expression
)
1064 exp
->X_op
= O_illegal
;
1067 /* Immediate values. */
1069 /* Generic immediate-value read function for use in directives.
1070 Accepts anything that 'expression' can fold to a constant.
1071 *val receives the number. */
1074 immediate_for_directive (int *val
)
1077 exp
.X_op
= O_illegal
;
1079 if (is_immediate_prefix (*input_line_pointer
))
1081 input_line_pointer
++;
1085 if (exp
.X_op
!= O_constant
)
1087 as_bad (_("expected #constant"));
1088 ignore_rest_of_line ();
1091 *val
= exp
.X_add_number
;
1096 /* Register parsing. */
1098 /* Generic register parser. CCP points to what should be the
1099 beginning of a register name. If it is indeed a valid register
1100 name, advance CCP over it and return the reg_entry structure;
1101 otherwise return NULL. Does not issue diagnostics. */
1103 static struct reg_entry
*
1104 arm_reg_parse_multi (char **ccp
)
1108 struct reg_entry
*reg
;
1110 #ifdef REGISTER_PREFIX
1111 if (*start
!= REGISTER_PREFIX
)
1115 #ifdef OPTIONAL_REGISTER_PREFIX
1116 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1121 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1126 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1128 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1138 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1139 enum arm_reg_type type
)
1141 /* Alternative syntaxes are accepted for a few register classes. */
1148 /* Generic coprocessor register names are allowed for these. */
1149 if (reg
&& reg
->type
== REG_TYPE_CN
)
1154 /* For backward compatibility, a bare number is valid here. */
1156 unsigned long processor
= strtoul (start
, ccp
, 10);
1157 if (*ccp
!= start
&& processor
<= 15)
1161 case REG_TYPE_MMXWC
:
1162 /* WC includes WCG. ??? I'm not sure this is true for all
1163 instructions that take WC registers. */
1164 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1175 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1176 return value is the register number or FAIL. */
1179 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1182 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1185 /* Do not allow a scalar (reg+index) to parse as a register. */
1186 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1189 if (reg
&& reg
->type
== type
)
1192 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1199 /* Parse a Neon type specifier. *STR should point at the leading '.'
1200 character. Does no verification at this stage that the type fits the opcode
1207 Can all be legally parsed by this function.
1209 Fills in neon_type struct pointer with parsed information, and updates STR
1210 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1211 type, FAIL if not. */
1214 parse_neon_type (struct neon_type
*type
, char **str
)
1221 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1223 enum neon_el_type thistype
= NT_untyped
;
1224 unsigned thissize
= -1u;
1231 /* Just a size without an explicit type. */
1235 switch (TOLOWER (*ptr
))
1237 case 'i': thistype
= NT_integer
; break;
1238 case 'f': thistype
= NT_float
; break;
1239 case 'p': thistype
= NT_poly
; break;
1240 case 's': thistype
= NT_signed
; break;
1241 case 'u': thistype
= NT_unsigned
; break;
1243 thistype
= NT_float
;
1248 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1254 /* .f is an abbreviation for .f32. */
1255 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1260 thissize
= strtoul (ptr
, &ptr
, 10);
1262 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1265 as_bad (_("bad size %d in type specifier"), thissize
);
1273 type
->el
[type
->elems
].type
= thistype
;
1274 type
->el
[type
->elems
].size
= thissize
;
1279 /* Empty/missing type is not a successful parse. */
1280 if (type
->elems
== 0)
1288 /* Errors may be set multiple times during parsing or bit encoding
1289 (particularly in the Neon bits), but usually the earliest error which is set
1290 will be the most meaningful. Avoid overwriting it with later (cascading)
1291 errors by calling this function. */
1294 first_error (const char *err
)
1300 /* Parse a single type, e.g. ".s32", leading period included. */
1302 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1305 struct neon_type optype
;
1309 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1311 if (optype
.elems
== 1)
1312 *vectype
= optype
.el
[0];
1315 first_error (_("only one type should be specified for operand"));
1321 first_error (_("vector type expected"));
1333 /* Special meanings for indices (which have a range of 0-7), which will fit into
1336 #define NEON_ALL_LANES 15
1337 #define NEON_INTERLEAVE_LANES 14
1339 /* Parse either a register or a scalar, with an optional type. Return the
1340 register number, and optionally fill in the actual type of the register
1341 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1342 type/index information in *TYPEINFO. */
1345 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1346 enum arm_reg_type
*rtype
,
1347 struct neon_typed_alias
*typeinfo
)
1350 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1351 struct neon_typed_alias atype
;
1352 struct neon_type_el parsetype
;
1356 atype
.eltype
.type
= NT_invtype
;
1357 atype
.eltype
.size
= -1;
1359 /* Try alternate syntax for some types of register. Note these are mutually
1360 exclusive with the Neon syntax extensions. */
1363 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1371 /* Undo polymorphism when a set of register types may be accepted. */
1372 if ((type
== REG_TYPE_NDQ
1373 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1374 || (type
== REG_TYPE_VFSD
1375 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1376 || (type
== REG_TYPE_NSDQ
1377 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1378 || reg
->type
== REG_TYPE_NQ
))
1379 || (type
== REG_TYPE_MMXWC
1380 && (reg
->type
== REG_TYPE_MMXWCG
)))
1381 type
= (enum arm_reg_type
) reg
->type
;
1383 if (type
!= reg
->type
)
1389 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1391 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1393 first_error (_("can't redefine type for operand"));
1396 atype
.defined
|= NTA_HASTYPE
;
1397 atype
.eltype
= parsetype
;
1400 if (skip_past_char (&str
, '[') == SUCCESS
)
1402 if (type
!= REG_TYPE_VFD
)
1404 first_error (_("only D registers may be indexed"));
1408 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1410 first_error (_("can't change index for operand"));
1414 atype
.defined
|= NTA_HASINDEX
;
1416 if (skip_past_char (&str
, ']') == SUCCESS
)
1417 atype
.index
= NEON_ALL_LANES
;
1422 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1424 if (exp
.X_op
!= O_constant
)
1426 first_error (_("constant expression required"));
1430 if (skip_past_char (&str
, ']') == FAIL
)
1433 atype
.index
= exp
.X_add_number
;
1448 /* Like arm_reg_parse, but allow allow the following extra features:
1449 - If RTYPE is non-zero, return the (possibly restricted) type of the
1450 register (e.g. Neon double or quad reg when either has been requested).
1451 - If this is a Neon vector type with additional type information, fill
1452 in the struct pointed to by VECTYPE (if non-NULL).
1453 This function will fault on encountering a scalar. */
1456 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1457 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1459 struct neon_typed_alias atype
;
1461 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1466 /* Do not allow a scalar (reg+index) to parse as a register. */
1467 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1469 first_error (_("register operand expected, but got scalar"));
1474 *vectype
= atype
.eltype
;
1481 #define NEON_SCALAR_REG(X) ((X) >> 4)
1482 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1484 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1485 have enough information to be able to do a good job bounds-checking. So, we
1486 just do easy checks here, and do further checks later. */
1489 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1493 struct neon_typed_alias atype
;
1495 reg
= parse_typed_reg_or_scalar (&str
, REG_TYPE_VFD
, NULL
, &atype
);
1497 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1500 if (atype
.index
== NEON_ALL_LANES
)
1502 first_error (_("scalar must have an index"));
1505 else if (atype
.index
>= 64 / elsize
)
1507 first_error (_("scalar index out of range"));
1512 *type
= atype
.eltype
;
1516 return reg
* 16 + atype
.index
;
1519 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1522 parse_reg_list (char ** strp
)
1524 char * str
= * strp
;
1528 /* We come back here if we get ranges concatenated by '+' or '|'. */
1543 if ((reg
= arm_reg_parse (&str
, REG_TYPE_RN
)) == FAIL
)
1545 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
1555 first_error (_("bad range in register list"));
1559 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1561 if (range
& (1 << i
))
1563 (_("Warning: duplicated register (r%d) in register list"),
1571 if (range
& (1 << reg
))
1572 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1574 else if (reg
<= cur_reg
)
1575 as_tsktsk (_("Warning: register range not in ascending order"));
1580 while (skip_past_comma (&str
) != FAIL
1581 || (in_range
= 1, *str
++ == '-'));
1586 first_error (_("missing `}'"));
1594 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1597 if (exp
.X_op
== O_constant
)
1599 if (exp
.X_add_number
1600 != (exp
.X_add_number
& 0x0000ffff))
1602 inst
.error
= _("invalid register mask");
1606 if ((range
& exp
.X_add_number
) != 0)
1608 int regno
= range
& exp
.X_add_number
;
1611 regno
= (1 << regno
) - 1;
1613 (_("Warning: duplicated register (r%d) in register list"),
1617 range
|= exp
.X_add_number
;
1621 if (inst
.reloc
.type
!= 0)
1623 inst
.error
= _("expression too complex");
1627 memcpy (&inst
.reloc
.exp
, &exp
, sizeof (expressionS
));
1628 inst
.reloc
.type
= BFD_RELOC_ARM_MULTI
;
1629 inst
.reloc
.pc_rel
= 0;
1633 if (*str
== '|' || *str
== '+')
1639 while (another_range
);
1645 /* Types of registers in a list. */
1654 /* Parse a VFP register list. If the string is invalid return FAIL.
1655 Otherwise return the number of registers, and set PBASE to the first
1656 register. Parses registers of type ETYPE.
1657 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1658 - Q registers can be used to specify pairs of D registers
1659 - { } can be omitted from around a singleton register list
1660 FIXME: This is not implemented, as it would require backtracking in
1663 This could be done (the meaning isn't really ambiguous), but doesn't
1664 fit in well with the current parsing framework.
1665 - 32 D registers may be used (also true for VFPv3).
1666 FIXME: Types are ignored in these register lists, which is probably a
1670 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
)
1675 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1679 unsigned long mask
= 0;
1684 inst
.error
= _("expecting {");
1693 regtype
= REG_TYPE_VFS
;
1698 regtype
= REG_TYPE_VFD
;
1701 case REGLIST_NEON_D
:
1702 regtype
= REG_TYPE_NDQ
;
1706 if (etype
!= REGLIST_VFP_S
)
1708 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1709 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1713 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1716 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1723 base_reg
= max_regs
;
1727 int setmask
= 1, addregs
= 1;
1729 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1731 if (new_base
== FAIL
)
1733 first_error (_(reg_expected_msgs
[regtype
]));
1737 if (new_base
>= max_regs
)
1739 first_error (_("register out of range in list"));
1743 /* Note: a value of 2 * n is returned for the register Q<n>. */
1744 if (regtype
== REG_TYPE_NQ
)
1750 if (new_base
< base_reg
)
1751 base_reg
= new_base
;
1753 if (mask
& (setmask
<< new_base
))
1755 first_error (_("invalid register list"));
1759 if ((mask
>> new_base
) != 0 && ! warned
)
1761 as_tsktsk (_("register list not in ascending order"));
1765 mask
|= setmask
<< new_base
;
1768 if (*str
== '-') /* We have the start of a range expression */
1774 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
1777 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
1781 if (high_range
>= max_regs
)
1783 first_error (_("register out of range in list"));
1787 if (regtype
== REG_TYPE_NQ
)
1788 high_range
= high_range
+ 1;
1790 if (high_range
<= new_base
)
1792 inst
.error
= _("register range not in ascending order");
1796 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
1798 if (mask
& (setmask
<< new_base
))
1800 inst
.error
= _("invalid register list");
1804 mask
|= setmask
<< new_base
;
1809 while (skip_past_comma (&str
) != FAIL
);
1813 /* Sanity check -- should have raised a parse error above. */
1814 if (count
== 0 || count
> max_regs
)
1819 /* Final test -- the registers must be consecutive. */
1821 for (i
= 0; i
< count
; i
++)
1823 if ((mask
& (1u << i
)) == 0)
1825 inst
.error
= _("non-contiguous register range");
1835 /* True if two alias types are the same. */
1838 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
1846 if (a
->defined
!= b
->defined
)
1849 if ((a
->defined
& NTA_HASTYPE
) != 0
1850 && (a
->eltype
.type
!= b
->eltype
.type
1851 || a
->eltype
.size
!= b
->eltype
.size
))
1854 if ((a
->defined
& NTA_HASINDEX
) != 0
1855 && (a
->index
!= b
->index
))
1861 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1862 The base register is put in *PBASE.
1863 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1865 The register stride (minus one) is put in bit 4 of the return value.
1866 Bits [6:5] encode the list length (minus one).
1867 The type of the list elements is put in *ELTYPE, if non-NULL. */
1869 #define NEON_LANE(X) ((X) & 0xf)
1870 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1871 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1874 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
1875 struct neon_type_el
*eltype
)
1882 int leading_brace
= 0;
1883 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
1885 const char *const incr_error
= _("register stride must be 1 or 2");
1886 const char *const type_error
= _("mismatched element/structure types in list");
1887 struct neon_typed_alias firsttype
;
1889 if (skip_past_char (&ptr
, '{') == SUCCESS
)
1894 struct neon_typed_alias atype
;
1895 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
1899 first_error (_(reg_expected_msgs
[rtype
]));
1906 if (rtype
== REG_TYPE_NQ
)
1913 else if (reg_incr
== -1)
1915 reg_incr
= getreg
- base_reg
;
1916 if (reg_incr
< 1 || reg_incr
> 2)
1918 first_error (_(incr_error
));
1922 else if (getreg
!= base_reg
+ reg_incr
* count
)
1924 first_error (_(incr_error
));
1928 if (! neon_alias_types_same (&atype
, &firsttype
))
1930 first_error (_(type_error
));
1934 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1938 struct neon_typed_alias htype
;
1939 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
1941 lane
= NEON_INTERLEAVE_LANES
;
1942 else if (lane
!= NEON_INTERLEAVE_LANES
)
1944 first_error (_(type_error
));
1949 else if (reg_incr
!= 1)
1951 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1955 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
1958 first_error (_(reg_expected_msgs
[rtype
]));
1961 if (! neon_alias_types_same (&htype
, &firsttype
))
1963 first_error (_(type_error
));
1966 count
+= hireg
+ dregs
- getreg
;
1970 /* If we're using Q registers, we can't use [] or [n] syntax. */
1971 if (rtype
== REG_TYPE_NQ
)
1977 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1981 else if (lane
!= atype
.index
)
1983 first_error (_(type_error
));
1987 else if (lane
== -1)
1988 lane
= NEON_INTERLEAVE_LANES
;
1989 else if (lane
!= NEON_INTERLEAVE_LANES
)
1991 first_error (_(type_error
));
1996 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
1998 /* No lane set by [x]. We must be interleaving structures. */
2000 lane
= NEON_INTERLEAVE_LANES
;
2003 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2004 || (count
> 1 && reg_incr
== -1))
2006 first_error (_("error parsing element/structure list"));
2010 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2012 first_error (_("expected }"));
2020 *eltype
= firsttype
.eltype
;
2025 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2028 /* Parse an explicit relocation suffix on an expression. This is
2029 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2030 arm_reloc_hsh contains no entries, so this function can only
2031 succeed if there is no () after the word. Returns -1 on error,
2032 BFD_RELOC_UNUSED if there wasn't any suffix. */
2034 parse_reloc (char **str
)
2036 struct reloc_entry
*r
;
2040 return BFD_RELOC_UNUSED
;
2045 while (*q
&& *q
!= ')' && *q
!= ',')
2050 if ((r
= (struct reloc_entry
*)
2051 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2058 /* Directives: register aliases. */
2060 static struct reg_entry
*
2061 insert_reg_alias (char *str
, int number
, int type
)
2063 struct reg_entry
*new_reg
;
2066 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2068 if (new_reg
->builtin
)
2069 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2071 /* Only warn about a redefinition if it's not defined as the
2073 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2074 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2079 name
= xstrdup (str
);
2080 new_reg
= (struct reg_entry
*) xmalloc (sizeof (struct reg_entry
));
2082 new_reg
->name
= name
;
2083 new_reg
->number
= number
;
2084 new_reg
->type
= type
;
2085 new_reg
->builtin
= FALSE
;
2086 new_reg
->neon
= NULL
;
2088 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2095 insert_neon_reg_alias (char *str
, int number
, int type
,
2096 struct neon_typed_alias
*atype
)
2098 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2102 first_error (_("attempt to redefine typed alias"));
2108 reg
->neon
= (struct neon_typed_alias
*)
2109 xmalloc (sizeof (struct neon_typed_alias
));
2110 *reg
->neon
= *atype
;
2114 /* Look for the .req directive. This is of the form:
2116 new_register_name .req existing_register_name
2118 If we find one, or if it looks sufficiently like one that we want to
2119 handle any error here, return TRUE. Otherwise return FALSE. */
2122 create_register_alias (char * newname
, char *p
)
2124 struct reg_entry
*old
;
2125 char *oldname
, *nbuf
;
2128 /* The input scrubber ensures that whitespace after the mnemonic is
2129 collapsed to single spaces. */
2131 if (strncmp (oldname
, " .req ", 6) != 0)
2135 if (*oldname
== '\0')
2138 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2141 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2145 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2146 the desired alias name, and p points to its end. If not, then
2147 the desired alias name is in the global original_case_string. */
2148 #ifdef TC_CASE_SENSITIVE
2151 newname
= original_case_string
;
2152 nlen
= strlen (newname
);
2155 nbuf
= (char *) alloca (nlen
+ 1);
2156 memcpy (nbuf
, newname
, nlen
);
2159 /* Create aliases under the new name as stated; an all-lowercase
2160 version of the new name; and an all-uppercase version of the new
2162 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2164 for (p
= nbuf
; *p
; p
++)
2167 if (strncmp (nbuf
, newname
, nlen
))
2169 /* If this attempt to create an additional alias fails, do not bother
2170 trying to create the all-lower case alias. We will fail and issue
2171 a second, duplicate error message. This situation arises when the
2172 programmer does something like:
2175 The second .req creates the "Foo" alias but then fails to create
2176 the artificial FOO alias because it has already been created by the
2178 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2182 for (p
= nbuf
; *p
; p
++)
2185 if (strncmp (nbuf
, newname
, nlen
))
2186 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2192 /* Create a Neon typed/indexed register alias using directives, e.g.:
2197 These typed registers can be used instead of the types specified after the
2198 Neon mnemonic, so long as all operands given have types. Types can also be
2199 specified directly, e.g.:
2200 vadd d0.s32, d1.s32, d2.s32 */
2203 create_neon_reg_alias (char *newname
, char *p
)
2205 enum arm_reg_type basetype
;
2206 struct reg_entry
*basereg
;
2207 struct reg_entry mybasereg
;
2208 struct neon_type ntype
;
2209 struct neon_typed_alias typeinfo
;
2210 char *namebuf
, *nameend
;
2213 typeinfo
.defined
= 0;
2214 typeinfo
.eltype
.type
= NT_invtype
;
2215 typeinfo
.eltype
.size
= -1;
2216 typeinfo
.index
= -1;
2220 if (strncmp (p
, " .dn ", 5) == 0)
2221 basetype
= REG_TYPE_VFD
;
2222 else if (strncmp (p
, " .qn ", 5) == 0)
2223 basetype
= REG_TYPE_NQ
;
2232 basereg
= arm_reg_parse_multi (&p
);
2234 if (basereg
&& basereg
->type
!= basetype
)
2236 as_bad (_("bad type for register"));
2240 if (basereg
== NULL
)
2243 /* Try parsing as an integer. */
2244 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2245 if (exp
.X_op
!= O_constant
)
2247 as_bad (_("expression must be constant"));
2250 basereg
= &mybasereg
;
2251 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2257 typeinfo
= *basereg
->neon
;
2259 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2261 /* We got a type. */
2262 if (typeinfo
.defined
& NTA_HASTYPE
)
2264 as_bad (_("can't redefine the type of a register alias"));
2268 typeinfo
.defined
|= NTA_HASTYPE
;
2269 if (ntype
.elems
!= 1)
2271 as_bad (_("you must specify a single type only"));
2274 typeinfo
.eltype
= ntype
.el
[0];
2277 if (skip_past_char (&p
, '[') == SUCCESS
)
2280 /* We got a scalar index. */
2282 if (typeinfo
.defined
& NTA_HASINDEX
)
2284 as_bad (_("can't redefine the index of a scalar alias"));
2288 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2290 if (exp
.X_op
!= O_constant
)
2292 as_bad (_("scalar index must be constant"));
2296 typeinfo
.defined
|= NTA_HASINDEX
;
2297 typeinfo
.index
= exp
.X_add_number
;
2299 if (skip_past_char (&p
, ']') == FAIL
)
2301 as_bad (_("expecting ]"));
2306 namelen
= nameend
- newname
;
2307 namebuf
= (char *) alloca (namelen
+ 1);
2308 strncpy (namebuf
, newname
, namelen
);
2309 namebuf
[namelen
] = '\0';
2311 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2312 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2314 /* Insert name in all uppercase. */
2315 for (p
= namebuf
; *p
; p
++)
2318 if (strncmp (namebuf
, newname
, namelen
))
2319 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2320 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2322 /* Insert name in all lowercase. */
2323 for (p
= namebuf
; *p
; p
++)
2326 if (strncmp (namebuf
, newname
, namelen
))
2327 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2328 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2333 /* Should never be called, as .req goes between the alias and the
2334 register name, not at the beginning of the line. */
2337 s_req (int a ATTRIBUTE_UNUSED
)
2339 as_bad (_("invalid syntax for .req directive"));
2343 s_dn (int a ATTRIBUTE_UNUSED
)
2345 as_bad (_("invalid syntax for .dn directive"));
2349 s_qn (int a ATTRIBUTE_UNUSED
)
2351 as_bad (_("invalid syntax for .qn directive"));
2354 /* The .unreq directive deletes an alias which was previously defined
2355 by .req. For example:
2361 s_unreq (int a ATTRIBUTE_UNUSED
)
2366 name
= input_line_pointer
;
2368 while (*input_line_pointer
!= 0
2369 && *input_line_pointer
!= ' '
2370 && *input_line_pointer
!= '\n')
2371 ++input_line_pointer
;
2373 saved_char
= *input_line_pointer
;
2374 *input_line_pointer
= 0;
2377 as_bad (_("invalid syntax for .unreq directive"));
2380 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2384 as_bad (_("unknown register alias '%s'"), name
);
2385 else if (reg
->builtin
)
2386 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2393 hash_delete (arm_reg_hsh
, name
, FALSE
);
2394 free ((char *) reg
->name
);
2399 /* Also locate the all upper case and all lower case versions.
2400 Do not complain if we cannot find one or the other as it
2401 was probably deleted above. */
2403 nbuf
= strdup (name
);
2404 for (p
= nbuf
; *p
; p
++)
2406 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2409 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2410 free ((char *) reg
->name
);
2416 for (p
= nbuf
; *p
; p
++)
2418 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2421 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2422 free ((char *) reg
->name
);
2432 *input_line_pointer
= saved_char
;
2433 demand_empty_rest_of_line ();
2436 /* Directives: Instruction set selection. */
2439 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2440 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2441 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2442 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2444 /* Create a new mapping symbol for the transition to STATE. */
2447 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2450 const char * symname
;
2457 type
= BSF_NO_FLAGS
;
2461 type
= BSF_NO_FLAGS
;
2465 type
= BSF_NO_FLAGS
;
2471 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2472 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2477 THUMB_SET_FUNC (symbolP
, 0);
2478 ARM_SET_THUMB (symbolP
, 0);
2479 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2483 THUMB_SET_FUNC (symbolP
, 1);
2484 ARM_SET_THUMB (symbolP
, 1);
2485 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2493 /* Save the mapping symbols for future reference. Also check that
2494 we do not place two mapping symbols at the same offset within a
2495 frag. We'll handle overlap between frags in
2496 check_mapping_symbols. */
2499 know (frag
->tc_frag_data
.first_map
== NULL
);
2500 frag
->tc_frag_data
.first_map
= symbolP
;
2502 if (frag
->tc_frag_data
.last_map
!= NULL
)
2504 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2505 /* If .fill or other data filling directive generates zero sized data,
2506 the mapping symbol for the following code will have the same value
2507 as the one generated for the data filling directive. In this case,
2508 we replace the old symbol with the new one at the same address. */
2509 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2510 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2512 frag
->tc_frag_data
.last_map
= symbolP
;
2515 /* We must sometimes convert a region marked as code to data during
2516 code alignment, if an odd number of bytes have to be padded. The
2517 code mapping symbol is pushed to an aligned address. */
2520 insert_data_mapping_symbol (enum mstate state
,
2521 valueT value
, fragS
*frag
, offsetT bytes
)
2523 /* If there was already a mapping symbol, remove it. */
2524 if (frag
->tc_frag_data
.last_map
!= NULL
2525 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2527 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2531 know (frag
->tc_frag_data
.first_map
== symp
);
2532 frag
->tc_frag_data
.first_map
= NULL
;
2534 frag
->tc_frag_data
.last_map
= NULL
;
2535 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2538 make_mapping_symbol (MAP_DATA
, value
, frag
);
2539 make_mapping_symbol (state
, value
+ bytes
, frag
);
2542 static void mapping_state_2 (enum mstate state
, int max_chars
);
2544 /* Set the mapping state to STATE. Only call this when about to
2545 emit some STATE bytes to the file. */
2548 mapping_state (enum mstate state
)
2550 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2552 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2554 if (mapstate
== state
)
2555 /* The mapping symbol has already been emitted.
2556 There is nothing else to do. */
2558 else if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2559 /* This case will be evaluated later in the next else. */
2561 else if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2562 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2564 /* Only add the symbol if the offset is > 0:
2565 if we're at the first frag, check it's size > 0;
2566 if we're not at the first frag, then for sure
2567 the offset is > 0. */
2568 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2569 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2572 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2575 mapping_state_2 (state
, 0);
2579 /* Same as mapping_state, but MAX_CHARS bytes have already been
2580 allocated. Put the mapping symbol that far back. */
2583 mapping_state_2 (enum mstate state
, int max_chars
)
2585 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2587 if (!SEG_NORMAL (now_seg
))
2590 if (mapstate
== state
)
2591 /* The mapping symbol has already been emitted.
2592 There is nothing else to do. */
2595 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2596 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2599 #define mapping_state(x) ((void)0)
2600 #define mapping_state_2(x, y) ((void)0)
2603 /* Find the real, Thumb encoded start of a Thumb function. */
2607 find_real_start (symbolS
* symbolP
)
2610 const char * name
= S_GET_NAME (symbolP
);
2611 symbolS
* new_target
;
2613 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2614 #define STUB_NAME ".real_start_of"
2619 /* The compiler may generate BL instructions to local labels because
2620 it needs to perform a branch to a far away location. These labels
2621 do not have a corresponding ".real_start_of" label. We check
2622 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2623 the ".real_start_of" convention for nonlocal branches. */
2624 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2627 real_start
= ACONCAT ((STUB_NAME
, name
, NULL
));
2628 new_target
= symbol_find (real_start
);
2630 if (new_target
== NULL
)
2632 as_warn (_("Failed to find real start of function: %s\n"), name
);
2633 new_target
= symbolP
;
2641 opcode_select (int width
)
2648 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2649 as_bad (_("selected processor does not support THUMB opcodes"));
2652 /* No need to force the alignment, since we will have been
2653 coming from ARM mode, which is word-aligned. */
2654 record_alignment (now_seg
, 1);
2661 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2662 as_bad (_("selected processor does not support ARM opcodes"));
2667 frag_align (2, 0, 0);
2669 record_alignment (now_seg
, 1);
2674 as_bad (_("invalid instruction size selected (%d)"), width
);
2679 s_arm (int ignore ATTRIBUTE_UNUSED
)
2682 demand_empty_rest_of_line ();
2686 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2689 demand_empty_rest_of_line ();
2693 s_code (int unused ATTRIBUTE_UNUSED
)
2697 temp
= get_absolute_expression ();
2702 opcode_select (temp
);
2706 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2711 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2713 /* If we are not already in thumb mode go into it, EVEN if
2714 the target processor does not support thumb instructions.
2715 This is used by gcc/config/arm/lib1funcs.asm for example
2716 to compile interworking support functions even if the
2717 target processor should not support interworking. */
2721 record_alignment (now_seg
, 1);
2724 demand_empty_rest_of_line ();
2728 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
2732 /* The following label is the name/address of the start of a Thumb function.
2733 We need to know this for the interworking support. */
2734 label_is_thumb_function_name
= TRUE
;
2737 /* Perform a .set directive, but also mark the alias as
2738 being a thumb function. */
2741 s_thumb_set (int equiv
)
2743 /* XXX the following is a duplicate of the code for s_set() in read.c
2744 We cannot just call that code as we need to get at the symbol that
2751 /* Especial apologies for the random logic:
2752 This just grew, and could be parsed much more simply!
2754 name
= input_line_pointer
;
2755 delim
= get_symbol_end ();
2756 end_name
= input_line_pointer
;
2759 if (*input_line_pointer
!= ',')
2762 as_bad (_("expected comma after name \"%s\""), name
);
2764 ignore_rest_of_line ();
2768 input_line_pointer
++;
2771 if (name
[0] == '.' && name
[1] == '\0')
2773 /* XXX - this should not happen to .thumb_set. */
2777 if ((symbolP
= symbol_find (name
)) == NULL
2778 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
2781 /* When doing symbol listings, play games with dummy fragments living
2782 outside the normal fragment chain to record the file and line info
2784 if (listing
& LISTING_SYMBOLS
)
2786 extern struct list_info_struct
* listing_tail
;
2787 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
2789 memset (dummy_frag
, 0, sizeof (fragS
));
2790 dummy_frag
->fr_type
= rs_fill
;
2791 dummy_frag
->line
= listing_tail
;
2792 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
2793 dummy_frag
->fr_symbol
= symbolP
;
2797 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
2800 /* "set" symbols are local unless otherwise specified. */
2801 SF_SET_LOCAL (symbolP
);
2802 #endif /* OBJ_COFF */
2803 } /* Make a new symbol. */
2805 symbol_table_insert (symbolP
);
2810 && S_IS_DEFINED (symbolP
)
2811 && S_GET_SEGMENT (symbolP
) != reg_section
)
2812 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
2814 pseudo_set (symbolP
);
2816 demand_empty_rest_of_line ();
2818 /* XXX Now we come to the Thumb specific bit of code. */
2820 THUMB_SET_FUNC (symbolP
, 1);
2821 ARM_SET_THUMB (symbolP
, 1);
2822 #if defined OBJ_ELF || defined OBJ_COFF
2823 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2827 /* Directives: Mode selection. */
2829 /* .syntax [unified|divided] - choose the new unified syntax
2830 (same for Arm and Thumb encoding, modulo slight differences in what
2831 can be represented) or the old divergent syntax for each mode. */
2833 s_syntax (int unused ATTRIBUTE_UNUSED
)
2837 name
= input_line_pointer
;
2838 delim
= get_symbol_end ();
2840 if (!strcasecmp (name
, "unified"))
2841 unified_syntax
= TRUE
;
2842 else if (!strcasecmp (name
, "divided"))
2843 unified_syntax
= FALSE
;
2846 as_bad (_("unrecognized syntax mode \"%s\""), name
);
2849 *input_line_pointer
= delim
;
2850 demand_empty_rest_of_line ();
2853 /* Directives: sectioning and alignment. */
2855 /* Same as s_align_ptwo but align 0 => align 2. */
2858 s_align (int unused ATTRIBUTE_UNUSED
)
2863 long max_alignment
= 15;
2865 temp
= get_absolute_expression ();
2866 if (temp
> max_alignment
)
2867 as_bad (_("alignment too large: %d assumed"), temp
= max_alignment
);
2870 as_bad (_("alignment negative. 0 assumed."));
2874 if (*input_line_pointer
== ',')
2876 input_line_pointer
++;
2877 temp_fill
= get_absolute_expression ();
2889 /* Only make a frag if we HAVE to. */
2890 if (temp
&& !need_pass_2
)
2892 if (!fill_p
&& subseg_text_p (now_seg
))
2893 frag_align_code (temp
, 0);
2895 frag_align (temp
, (int) temp_fill
, 0);
2897 demand_empty_rest_of_line ();
2899 record_alignment (now_seg
, temp
);
2903 s_bss (int ignore ATTRIBUTE_UNUSED
)
2905 /* We don't support putting frags in the BSS segment, we fake it by
2906 marking in_bss, then looking at s_skip for clues. */
2907 subseg_set (bss_section
, 0);
2908 demand_empty_rest_of_line ();
2910 #ifdef md_elf_section_change_hook
2911 md_elf_section_change_hook ();
2916 s_even (int ignore ATTRIBUTE_UNUSED
)
2918 /* Never make frag if expect extra pass. */
2920 frag_align (1, 0, 0);
2922 record_alignment (now_seg
, 1);
2924 demand_empty_rest_of_line ();
2927 /* Directives: Literal pools. */
2929 static literal_pool
*
2930 find_literal_pool (void)
2932 literal_pool
* pool
;
2934 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
2936 if (pool
->section
== now_seg
2937 && pool
->sub_section
== now_subseg
)
2944 static literal_pool
*
2945 find_or_make_literal_pool (void)
2947 /* Next literal pool ID number. */
2948 static unsigned int latest_pool_num
= 1;
2949 literal_pool
* pool
;
2951 pool
= find_literal_pool ();
2955 /* Create a new pool. */
2956 pool
= (literal_pool
*) xmalloc (sizeof (* pool
));
2960 pool
->next_free_entry
= 0;
2961 pool
->section
= now_seg
;
2962 pool
->sub_section
= now_subseg
;
2963 pool
->next
= list_of_pools
;
2964 pool
->symbol
= NULL
;
2966 /* Add it to the list. */
2967 list_of_pools
= pool
;
2970 /* New pools, and emptied pools, will have a NULL symbol. */
2971 if (pool
->symbol
== NULL
)
2973 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
2974 (valueT
) 0, &zero_address_frag
);
2975 pool
->id
= latest_pool_num
++;
2982 /* Add the literal in the global 'inst'
2983 structure to the relevant literal pool. */
2986 add_to_lit_pool (void)
2988 literal_pool
* pool
;
2991 pool
= find_or_make_literal_pool ();
2993 /* Check if this literal value is already in the pool. */
2994 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
2996 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
2997 && (inst
.reloc
.exp
.X_op
== O_constant
)
2998 && (pool
->literals
[entry
].X_add_number
2999 == inst
.reloc
.exp
.X_add_number
)
3000 && (pool
->literals
[entry
].X_unsigned
3001 == inst
.reloc
.exp
.X_unsigned
))
3004 if ((pool
->literals
[entry
].X_op
== inst
.reloc
.exp
.X_op
)
3005 && (inst
.reloc
.exp
.X_op
== O_symbol
)
3006 && (pool
->literals
[entry
].X_add_number
3007 == inst
.reloc
.exp
.X_add_number
)
3008 && (pool
->literals
[entry
].X_add_symbol
3009 == inst
.reloc
.exp
.X_add_symbol
)
3010 && (pool
->literals
[entry
].X_op_symbol
3011 == inst
.reloc
.exp
.X_op_symbol
))
3015 /* Do we need to create a new entry? */
3016 if (entry
== pool
->next_free_entry
)
3018 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3020 inst
.error
= _("literal pool overflow");
3024 pool
->literals
[entry
] = inst
.reloc
.exp
;
3025 pool
->next_free_entry
+= 1;
3028 inst
.reloc
.exp
.X_op
= O_symbol
;
3029 inst
.reloc
.exp
.X_add_number
= ((int) entry
) * 4;
3030 inst
.reloc
.exp
.X_add_symbol
= pool
->symbol
;
3035 /* Can't use symbol_new here, so have to create a symbol and then at
3036 a later date assign it a value. Thats what these functions do. */
3039 symbol_locate (symbolS
* symbolP
,
3040 const char * name
, /* It is copied, the caller can modify. */
3041 segT segment
, /* Segment identifier (SEG_<something>). */
3042 valueT valu
, /* Symbol value. */
3043 fragS
* frag
) /* Associated fragment. */
3045 unsigned int name_length
;
3046 char * preserved_copy_of_name
;
3048 name_length
= strlen (name
) + 1; /* +1 for \0. */
3049 obstack_grow (¬es
, name
, name_length
);
3050 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3052 #ifdef tc_canonicalize_symbol_name
3053 preserved_copy_of_name
=
3054 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3057 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3059 S_SET_SEGMENT (symbolP
, segment
);
3060 S_SET_VALUE (symbolP
, valu
);
3061 symbol_clear_list_pointers (symbolP
);
3063 symbol_set_frag (symbolP
, frag
);
3065 /* Link to end of symbol chain. */
3067 extern int symbol_table_frozen
;
3069 if (symbol_table_frozen
)
3073 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3075 obj_symbol_new_hook (symbolP
);
3077 #ifdef tc_symbol_new_hook
3078 tc_symbol_new_hook (symbolP
);
3082 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3083 #endif /* DEBUG_SYMS */
3088 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3091 literal_pool
* pool
;
3094 pool
= find_literal_pool ();
3096 || pool
->symbol
== NULL
3097 || pool
->next_free_entry
== 0)
3100 mapping_state (MAP_DATA
);
3102 /* Align pool as you have word accesses.
3103 Only make a frag if we have to. */
3105 frag_align (2, 0, 0);
3107 record_alignment (now_seg
, 2);
3109 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3111 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3112 (valueT
) frag_now_fix (), frag_now
);
3113 symbol_table_insert (pool
->symbol
);
3115 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3117 #if defined OBJ_COFF || defined OBJ_ELF
3118 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3121 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3122 /* First output the expression in the instruction to the pool. */
3123 emit_expr (&(pool
->literals
[entry
]), 4); /* .word */
3125 /* Mark the pool as empty. */
3126 pool
->next_free_entry
= 0;
3127 pool
->symbol
= NULL
;
3131 /* Forward declarations for functions below, in the MD interface
3133 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3134 static valueT
create_unwind_entry (int);
3135 static void start_unwind_section (const segT
, int);
3136 static void add_unwind_opcode (valueT
, int);
3137 static void flush_pending_unwind (void);
3139 /* Directives: Data. */
3142 s_arm_elf_cons (int nbytes
)
3146 #ifdef md_flush_pending_output
3147 md_flush_pending_output ();
3150 if (is_it_end_of_statement ())
3152 demand_empty_rest_of_line ();
3156 #ifdef md_cons_align
3157 md_cons_align (nbytes
);
3160 mapping_state (MAP_DATA
);
3164 char *base
= input_line_pointer
;
3168 if (exp
.X_op
!= O_symbol
)
3169 emit_expr (&exp
, (unsigned int) nbytes
);
3172 char *before_reloc
= input_line_pointer
;
3173 reloc
= parse_reloc (&input_line_pointer
);
3176 as_bad (_("unrecognized relocation suffix"));
3177 ignore_rest_of_line ();
3180 else if (reloc
== BFD_RELOC_UNUSED
)
3181 emit_expr (&exp
, (unsigned int) nbytes
);
3184 reloc_howto_type
*howto
= (reloc_howto_type
*)
3185 bfd_reloc_type_lookup (stdoutput
,
3186 (bfd_reloc_code_real_type
) reloc
);
3187 int size
= bfd_get_reloc_size (howto
);
3189 if (reloc
== BFD_RELOC_ARM_PLT32
)
3191 as_bad (_("(plt) is only valid on branch targets"));
3192 reloc
= BFD_RELOC_UNUSED
;
3197 as_bad (_("%s relocations do not fit in %d bytes"),
3198 howto
->name
, nbytes
);
3201 /* We've parsed an expression stopping at O_symbol.
3202 But there may be more expression left now that we
3203 have parsed the relocation marker. Parse it again.
3204 XXX Surely there is a cleaner way to do this. */
3205 char *p
= input_line_pointer
;
3207 char *save_buf
= (char *) alloca (input_line_pointer
- base
);
3208 memcpy (save_buf
, base
, input_line_pointer
- base
);
3209 memmove (base
+ (input_line_pointer
- before_reloc
),
3210 base
, before_reloc
- base
);
3212 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3214 memcpy (base
, save_buf
, p
- base
);
3216 offset
= nbytes
- size
;
3217 p
= frag_more ((int) nbytes
);
3218 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3219 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3224 while (*input_line_pointer
++ == ',');
3226 /* Put terminator back into stream. */
3227 input_line_pointer
--;
3228 demand_empty_rest_of_line ();
3231 /* Emit an expression containing a 32-bit thumb instruction.
3232 Implementation based on put_thumb32_insn. */
3235 emit_thumb32_expr (expressionS
* exp
)
3237 expressionS exp_high
= *exp
;
3239 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3240 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3241 exp
->X_add_number
&= 0xffff;
3242 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3245 /* Guess the instruction size based on the opcode. */
3248 thumb_insn_size (int opcode
)
3250 if ((unsigned int) opcode
< 0xe800u
)
3252 else if ((unsigned int) opcode
>= 0xe8000000u
)
3259 emit_insn (expressionS
*exp
, int nbytes
)
3263 if (exp
->X_op
== O_constant
)
3268 size
= thumb_insn_size (exp
->X_add_number
);
3272 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3274 as_bad (_(".inst.n operand too big. "\
3275 "Use .inst.w instead"));
3280 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3281 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3283 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3285 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3286 emit_thumb32_expr (exp
);
3288 emit_expr (exp
, (unsigned int) size
);
3290 it_fsm_post_encode ();
3294 as_bad (_("cannot determine Thumb instruction size. " \
3295 "Use .inst.n/.inst.w instead"));
3298 as_bad (_("constant expression required"));
3303 /* Like s_arm_elf_cons but do not use md_cons_align and
3304 set the mapping state to MAP_ARM/MAP_THUMB. */
3307 s_arm_elf_inst (int nbytes
)
3309 if (is_it_end_of_statement ())
3311 demand_empty_rest_of_line ();
3315 /* Calling mapping_state () here will not change ARM/THUMB,
3316 but will ensure not to be in DATA state. */
3319 mapping_state (MAP_THUMB
);
3324 as_bad (_("width suffixes are invalid in ARM mode"));
3325 ignore_rest_of_line ();
3331 mapping_state (MAP_ARM
);
3340 if (! emit_insn (& exp
, nbytes
))
3342 ignore_rest_of_line ();
3346 while (*input_line_pointer
++ == ',');
3348 /* Put terminator back into stream. */
3349 input_line_pointer
--;
3350 demand_empty_rest_of_line ();
3353 /* Parse a .rel31 directive. */
3356 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3363 if (*input_line_pointer
== '1')
3364 highbit
= 0x80000000;
3365 else if (*input_line_pointer
!= '0')
3366 as_bad (_("expected 0 or 1"));
3368 input_line_pointer
++;
3369 if (*input_line_pointer
!= ',')
3370 as_bad (_("missing comma"));
3371 input_line_pointer
++;
3373 #ifdef md_flush_pending_output
3374 md_flush_pending_output ();
3377 #ifdef md_cons_align
3381 mapping_state (MAP_DATA
);
3386 md_number_to_chars (p
, highbit
, 4);
3387 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3388 BFD_RELOC_ARM_PREL31
);
3390 demand_empty_rest_of_line ();
3393 /* Directives: AEABI stack-unwind tables. */
3395 /* Parse an unwind_fnstart directive. Simply records the current location. */
3398 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3400 demand_empty_rest_of_line ();
3401 if (unwind
.proc_start
)
3403 as_bad (_("duplicate .fnstart directive"));
3407 /* Mark the start of the function. */
3408 unwind
.proc_start
= expr_build_dot ();
3410 /* Reset the rest of the unwind info. */
3411 unwind
.opcode_count
= 0;
3412 unwind
.table_entry
= NULL
;
3413 unwind
.personality_routine
= NULL
;
3414 unwind
.personality_index
= -1;
3415 unwind
.frame_size
= 0;
3416 unwind
.fp_offset
= 0;
3417 unwind
.fp_reg
= REG_SP
;
3419 unwind
.sp_restored
= 0;
3423 /* Parse a handlerdata directive. Creates the exception handling table entry
3424 for the function. */
3427 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3429 demand_empty_rest_of_line ();
3430 if (!unwind
.proc_start
)
3431 as_bad (MISSING_FNSTART
);
3433 if (unwind
.table_entry
)
3434 as_bad (_("duplicate .handlerdata directive"));
3436 create_unwind_entry (1);
3439 /* Parse an unwind_fnend directive. Generates the index table entry. */
3442 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3447 unsigned int marked_pr_dependency
;
3449 demand_empty_rest_of_line ();
3451 if (!unwind
.proc_start
)
3453 as_bad (_(".fnend directive without .fnstart"));
3457 /* Add eh table entry. */
3458 if (unwind
.table_entry
== NULL
)
3459 val
= create_unwind_entry (0);
3463 /* Add index table entry. This is two words. */
3464 start_unwind_section (unwind
.saved_seg
, 1);
3465 frag_align (2, 0, 0);
3466 record_alignment (now_seg
, 2);
3468 ptr
= frag_more (8);
3469 where
= frag_now_fix () - 8;
3471 /* Self relative offset of the function start. */
3472 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3473 BFD_RELOC_ARM_PREL31
);
3475 /* Indicate dependency on EHABI-defined personality routines to the
3476 linker, if it hasn't been done already. */
3477 marked_pr_dependency
3478 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3479 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3480 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3482 static const char *const name
[] =
3484 "__aeabi_unwind_cpp_pr0",
3485 "__aeabi_unwind_cpp_pr1",
3486 "__aeabi_unwind_cpp_pr2"
3488 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3489 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3490 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3491 |= 1 << unwind
.personality_index
;
3495 /* Inline exception table entry. */
3496 md_number_to_chars (ptr
+ 4, val
, 4);
3498 /* Self relative offset of the table entry. */
3499 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3500 BFD_RELOC_ARM_PREL31
);
3502 /* Restore the original section. */
3503 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3505 unwind
.proc_start
= NULL
;
3509 /* Parse an unwind_cantunwind directive. */
3512 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
3514 demand_empty_rest_of_line ();
3515 if (!unwind
.proc_start
)
3516 as_bad (MISSING_FNSTART
);
3518 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3519 as_bad (_("personality routine specified for cantunwind frame"));
3521 unwind
.personality_index
= -2;
3525 /* Parse a personalityindex directive. */
3528 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
3532 if (!unwind
.proc_start
)
3533 as_bad (MISSING_FNSTART
);
3535 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3536 as_bad (_("duplicate .personalityindex directive"));
3540 if (exp
.X_op
!= O_constant
3541 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
3543 as_bad (_("bad personality routine number"));
3544 ignore_rest_of_line ();
3548 unwind
.personality_index
= exp
.X_add_number
;
3550 demand_empty_rest_of_line ();
3554 /* Parse a personality directive. */
3557 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
3561 if (!unwind
.proc_start
)
3562 as_bad (MISSING_FNSTART
);
3564 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
3565 as_bad (_("duplicate .personality directive"));
3567 name
= input_line_pointer
;
3568 c
= get_symbol_end ();
3569 p
= input_line_pointer
;
3570 unwind
.personality_routine
= symbol_find_or_make (name
);
3572 demand_empty_rest_of_line ();
3576 /* Parse a directive saving core registers. */
3579 s_arm_unwind_save_core (void)
3585 range
= parse_reg_list (&input_line_pointer
);
3588 as_bad (_("expected register list"));
3589 ignore_rest_of_line ();
3593 demand_empty_rest_of_line ();
3595 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3596 into .unwind_save {..., sp...}. We aren't bothered about the value of
3597 ip because it is clobbered by calls. */
3598 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
3599 && (range
& 0x3000) == 0x1000)
3601 unwind
.opcode_count
--;
3602 unwind
.sp_restored
= 0;
3603 range
= (range
| 0x2000) & ~0x1000;
3604 unwind
.pending_offset
= 0;
3610 /* See if we can use the short opcodes. These pop a block of up to 8
3611 registers starting with r4, plus maybe r14. */
3612 for (n
= 0; n
< 8; n
++)
3614 /* Break at the first non-saved register. */
3615 if ((range
& (1 << (n
+ 4))) == 0)
3618 /* See if there are any other bits set. */
3619 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
3621 /* Use the long form. */
3622 op
= 0x8000 | ((range
>> 4) & 0xfff);
3623 add_unwind_opcode (op
, 2);
3627 /* Use the short form. */
3629 op
= 0xa8; /* Pop r14. */
3631 op
= 0xa0; /* Do not pop r14. */
3633 add_unwind_opcode (op
, 1);
3640 op
= 0xb100 | (range
& 0xf);
3641 add_unwind_opcode (op
, 2);
3644 /* Record the number of bytes pushed. */
3645 for (n
= 0; n
< 16; n
++)
3647 if (range
& (1 << n
))
3648 unwind
.frame_size
+= 4;
3653 /* Parse a directive saving FPA registers. */
3656 s_arm_unwind_save_fpa (int reg
)
3662 /* Get Number of registers to transfer. */
3663 if (skip_past_comma (&input_line_pointer
) != FAIL
)
3666 exp
.X_op
= O_illegal
;
3668 if (exp
.X_op
!= O_constant
)
3670 as_bad (_("expected , <constant>"));
3671 ignore_rest_of_line ();
3675 num_regs
= exp
.X_add_number
;
3677 if (num_regs
< 1 || num_regs
> 4)
3679 as_bad (_("number of registers must be in the range [1:4]"));
3680 ignore_rest_of_line ();
3684 demand_empty_rest_of_line ();
3689 op
= 0xb4 | (num_regs
- 1);
3690 add_unwind_opcode (op
, 1);
3695 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
3696 add_unwind_opcode (op
, 2);
3698 unwind
.frame_size
+= num_regs
* 12;
3702 /* Parse a directive saving VFP registers for ARMv6 and above. */
3705 s_arm_unwind_save_vfp_armv6 (void)
3710 int num_vfpv3_regs
= 0;
3711 int num_regs_below_16
;
3713 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
);
3716 as_bad (_("expected register list"));
3717 ignore_rest_of_line ();
3721 demand_empty_rest_of_line ();
3723 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3724 than FSTMX/FLDMX-style ones). */
3726 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3728 num_vfpv3_regs
= count
;
3729 else if (start
+ count
> 16)
3730 num_vfpv3_regs
= start
+ count
- 16;
3732 if (num_vfpv3_regs
> 0)
3734 int start_offset
= start
> 16 ? start
- 16 : 0;
3735 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
3736 add_unwind_opcode (op
, 2);
3739 /* Generate opcode for registers numbered in the range 0 .. 15. */
3740 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
3741 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
3742 if (num_regs_below_16
> 0)
3744 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
3745 add_unwind_opcode (op
, 2);
3748 unwind
.frame_size
+= count
* 8;
3752 /* Parse a directive saving VFP registers for pre-ARMv6. */
3755 s_arm_unwind_save_vfp (void)
3761 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
);
3764 as_bad (_("expected register list"));
3765 ignore_rest_of_line ();
3769 demand_empty_rest_of_line ();
3774 op
= 0xb8 | (count
- 1);
3775 add_unwind_opcode (op
, 1);
3780 op
= 0xb300 | (reg
<< 4) | (count
- 1);
3781 add_unwind_opcode (op
, 2);
3783 unwind
.frame_size
+= count
* 8 + 4;
3787 /* Parse a directive saving iWMMXt data registers. */
3790 s_arm_unwind_save_mmxwr (void)
3798 if (*input_line_pointer
== '{')
3799 input_line_pointer
++;
3803 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3807 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3812 as_tsktsk (_("register list not in ascending order"));
3815 if (*input_line_pointer
== '-')
3817 input_line_pointer
++;
3818 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
3821 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
3824 else if (reg
>= hi_reg
)
3826 as_bad (_("bad register range"));
3829 for (; reg
< hi_reg
; reg
++)
3833 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3835 if (*input_line_pointer
== '}')
3836 input_line_pointer
++;
3838 demand_empty_rest_of_line ();
3840 /* Generate any deferred opcodes because we're going to be looking at
3842 flush_pending_unwind ();
3844 for (i
= 0; i
< 16; i
++)
3846 if (mask
& (1 << i
))
3847 unwind
.frame_size
+= 8;
3850 /* Attempt to combine with a previous opcode. We do this because gcc
3851 likes to output separate unwind directives for a single block of
3853 if (unwind
.opcode_count
> 0)
3855 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
3856 if ((i
& 0xf8) == 0xc0)
3859 /* Only merge if the blocks are contiguous. */
3862 if ((mask
& 0xfe00) == (1 << 9))
3864 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
3865 unwind
.opcode_count
--;
3868 else if (i
== 6 && unwind
.opcode_count
>= 2)
3870 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
3874 op
= 0xffff << (reg
- 1);
3876 && ((mask
& op
) == (1u << (reg
- 1))))
3878 op
= (1 << (reg
+ i
+ 1)) - 1;
3879 op
&= ~((1 << reg
) - 1);
3881 unwind
.opcode_count
-= 2;
3888 /* We want to generate opcodes in the order the registers have been
3889 saved, ie. descending order. */
3890 for (reg
= 15; reg
>= -1; reg
--)
3892 /* Save registers in blocks. */
3894 || !(mask
& (1 << reg
)))
3896 /* We found an unsaved reg. Generate opcodes to save the
3903 op
= 0xc0 | (hi_reg
- 10);
3904 add_unwind_opcode (op
, 1);
3909 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
3910 add_unwind_opcode (op
, 2);
3919 ignore_rest_of_line ();
3923 s_arm_unwind_save_mmxwcg (void)
3930 if (*input_line_pointer
== '{')
3931 input_line_pointer
++;
3935 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3939 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3945 as_tsktsk (_("register list not in ascending order"));
3948 if (*input_line_pointer
== '-')
3950 input_line_pointer
++;
3951 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
3954 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
3957 else if (reg
>= hi_reg
)
3959 as_bad (_("bad register range"));
3962 for (; reg
< hi_reg
; reg
++)
3966 while (skip_past_comma (&input_line_pointer
) != FAIL
);
3968 if (*input_line_pointer
== '}')
3969 input_line_pointer
++;
3971 demand_empty_rest_of_line ();
3973 /* Generate any deferred opcodes because we're going to be looking at
3975 flush_pending_unwind ();
3977 for (reg
= 0; reg
< 16; reg
++)
3979 if (mask
& (1 << reg
))
3980 unwind
.frame_size
+= 4;
3983 add_unwind_opcode (op
, 2);
3986 ignore_rest_of_line ();
3990 /* Parse an unwind_save directive.
3991 If the argument is non-zero, this is a .vsave directive. */
3994 s_arm_unwind_save (int arch_v6
)
3997 struct reg_entry
*reg
;
3998 bfd_boolean had_brace
= FALSE
;
4000 if (!unwind
.proc_start
)
4001 as_bad (MISSING_FNSTART
);
4003 /* Figure out what sort of save we have. */
4004 peek
= input_line_pointer
;
4012 reg
= arm_reg_parse_multi (&peek
);
4016 as_bad (_("register expected"));
4017 ignore_rest_of_line ();
4026 as_bad (_("FPA .unwind_save does not take a register list"));
4027 ignore_rest_of_line ();
4030 input_line_pointer
= peek
;
4031 s_arm_unwind_save_fpa (reg
->number
);
4034 case REG_TYPE_RN
: s_arm_unwind_save_core (); return;
4037 s_arm_unwind_save_vfp_armv6 ();
4039 s_arm_unwind_save_vfp ();
4041 case REG_TYPE_MMXWR
: s_arm_unwind_save_mmxwr (); return;
4042 case REG_TYPE_MMXWCG
: s_arm_unwind_save_mmxwcg (); return;
4045 as_bad (_(".unwind_save does not support this kind of register"));
4046 ignore_rest_of_line ();
4051 /* Parse an unwind_movsp directive. */
4054 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4060 if (!unwind
.proc_start
)
4061 as_bad (MISSING_FNSTART
);
4063 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4066 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4067 ignore_rest_of_line ();
4071 /* Optional constant. */
4072 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4074 if (immediate_for_directive (&offset
) == FAIL
)
4080 demand_empty_rest_of_line ();
4082 if (reg
== REG_SP
|| reg
== REG_PC
)
4084 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4088 if (unwind
.fp_reg
!= REG_SP
)
4089 as_bad (_("unexpected .unwind_movsp directive"));
4091 /* Generate opcode to restore the value. */
4093 add_unwind_opcode (op
, 1);
4095 /* Record the information for later. */
4096 unwind
.fp_reg
= reg
;
4097 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4098 unwind
.sp_restored
= 1;
4101 /* Parse an unwind_pad directive. */
4104 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4108 if (!unwind
.proc_start
)
4109 as_bad (MISSING_FNSTART
);
4111 if (immediate_for_directive (&offset
) == FAIL
)
4116 as_bad (_("stack increment must be multiple of 4"));
4117 ignore_rest_of_line ();
4121 /* Don't generate any opcodes, just record the details for later. */
4122 unwind
.frame_size
+= offset
;
4123 unwind
.pending_offset
+= offset
;
4125 demand_empty_rest_of_line ();
4128 /* Parse an unwind_setfp directive. */
4131 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4137 if (!unwind
.proc_start
)
4138 as_bad (MISSING_FNSTART
);
4140 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4141 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4144 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4146 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4148 as_bad (_("expected <reg>, <reg>"));
4149 ignore_rest_of_line ();
4153 /* Optional constant. */
4154 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4156 if (immediate_for_directive (&offset
) == FAIL
)
4162 demand_empty_rest_of_line ();
4164 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4166 as_bad (_("register must be either sp or set by a previous"
4167 "unwind_movsp directive"));
4171 /* Don't generate any opcodes, just record the information for later. */
4172 unwind
.fp_reg
= fp_reg
;
4174 if (sp_reg
== REG_SP
)
4175 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4177 unwind
.fp_offset
-= offset
;
4180 /* Parse an unwind_raw directive. */
4183 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4186 /* This is an arbitrary limit. */
4187 unsigned char op
[16];
4190 if (!unwind
.proc_start
)
4191 as_bad (MISSING_FNSTART
);
4194 if (exp
.X_op
== O_constant
4195 && skip_past_comma (&input_line_pointer
) != FAIL
)
4197 unwind
.frame_size
+= exp
.X_add_number
;
4201 exp
.X_op
= O_illegal
;
4203 if (exp
.X_op
!= O_constant
)
4205 as_bad (_("expected <offset>, <opcode>"));
4206 ignore_rest_of_line ();
4212 /* Parse the opcode. */
4217 as_bad (_("unwind opcode too long"));
4218 ignore_rest_of_line ();
4220 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4222 as_bad (_("invalid unwind opcode"));
4223 ignore_rest_of_line ();
4226 op
[count
++] = exp
.X_add_number
;
4228 /* Parse the next byte. */
4229 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4235 /* Add the opcode bytes in reverse order. */
4237 add_unwind_opcode (op
[count
], 1);
4239 demand_empty_rest_of_line ();
4243 /* Parse a .eabi_attribute directive. */
4246 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4248 int tag
= s_vendor_attribute (OBJ_ATTR_PROC
);
4250 if (tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4251 attributes_set_explicitly
[tag
] = 1;
4253 #endif /* OBJ_ELF */
4255 static void s_arm_arch (int);
4256 static void s_arm_object_arch (int);
4257 static void s_arm_cpu (int);
4258 static void s_arm_fpu (int);
4263 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4270 if (exp
.X_op
== O_symbol
)
4271 exp
.X_op
= O_secrel
;
4273 emit_expr (&exp
, 4);
4275 while (*input_line_pointer
++ == ',');
4277 input_line_pointer
--;
4278 demand_empty_rest_of_line ();
4282 /* This table describes all the machine specific pseudo-ops the assembler
4283 has to support. The fields are:
4284 pseudo-op name without dot
4285 function to call to execute this pseudo-op
4286 Integer arg to pass to the function. */
4288 const pseudo_typeS md_pseudo_table
[] =
4290 /* Never called because '.req' does not start a line. */
4291 { "req", s_req
, 0 },
4292 /* Following two are likewise never called. */
4295 { "unreq", s_unreq
, 0 },
4296 { "bss", s_bss
, 0 },
4297 { "align", s_align
, 0 },
4298 { "arm", s_arm
, 0 },
4299 { "thumb", s_thumb
, 0 },
4300 { "code", s_code
, 0 },
4301 { "force_thumb", s_force_thumb
, 0 },
4302 { "thumb_func", s_thumb_func
, 0 },
4303 { "thumb_set", s_thumb_set
, 0 },
4304 { "even", s_even
, 0 },
4305 { "ltorg", s_ltorg
, 0 },
4306 { "pool", s_ltorg
, 0 },
4307 { "syntax", s_syntax
, 0 },
4308 { "cpu", s_arm_cpu
, 0 },
4309 { "arch", s_arm_arch
, 0 },
4310 { "object_arch", s_arm_object_arch
, 0 },
4311 { "fpu", s_arm_fpu
, 0 },
4313 { "word", s_arm_elf_cons
, 4 },
4314 { "long", s_arm_elf_cons
, 4 },
4315 { "inst.n", s_arm_elf_inst
, 2 },
4316 { "inst.w", s_arm_elf_inst
, 4 },
4317 { "inst", s_arm_elf_inst
, 0 },
4318 { "rel31", s_arm_rel31
, 0 },
4319 { "fnstart", s_arm_unwind_fnstart
, 0 },
4320 { "fnend", s_arm_unwind_fnend
, 0 },
4321 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4322 { "personality", s_arm_unwind_personality
, 0 },
4323 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4324 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4325 { "save", s_arm_unwind_save
, 0 },
4326 { "vsave", s_arm_unwind_save
, 1 },
4327 { "movsp", s_arm_unwind_movsp
, 0 },
4328 { "pad", s_arm_unwind_pad
, 0 },
4329 { "setfp", s_arm_unwind_setfp
, 0 },
4330 { "unwind_raw", s_arm_unwind_raw
, 0 },
4331 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4335 /* These are used for dwarf. */
4339 /* These are used for dwarf2. */
4340 { "file", (void (*) (int)) dwarf2_directive_file
, 0 },
4341 { "loc", dwarf2_directive_loc
, 0 },
4342 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4344 { "extend", float_cons
, 'x' },
4345 { "ldouble", float_cons
, 'x' },
4346 { "packed", float_cons
, 'p' },
4348 {"secrel32", pe_directive_secrel
, 0},
4353 /* Parser functions used exclusively in instruction operands. */
4355 /* Generic immediate-value read function for use in insn parsing.
4356 STR points to the beginning of the immediate (the leading #);
4357 VAL receives the value; if the value is outside [MIN, MAX]
4358 issue an error. PREFIX_OPT is true if the immediate prefix is
4362 parse_immediate (char **str
, int *val
, int min
, int max
,
4363 bfd_boolean prefix_opt
)
4366 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4367 if (exp
.X_op
!= O_constant
)
4369 inst
.error
= _("constant expression required");
4373 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4375 inst
.error
= _("immediate value out of range");
4379 *val
= exp
.X_add_number
;
4383 /* Less-generic immediate-value read function with the possibility of loading a
4384 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4385 instructions. Puts the result directly in inst.operands[i]. */
4388 parse_big_immediate (char **str
, int i
)
4393 my_get_expression (&exp
, &ptr
, GE_OPT_PREFIX_BIG
);
4395 if (exp
.X_op
== O_constant
)
4397 inst
.operands
[i
].imm
= exp
.X_add_number
& 0xffffffff;
4398 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4399 O_constant. We have to be careful not to break compilation for
4400 32-bit X_add_number, though. */
4401 if ((exp
.X_add_number
& ~0xffffffffl
) != 0)
4403 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4404 inst
.operands
[i
].reg
= ((exp
.X_add_number
>> 16) >> 16) & 0xffffffff;
4405 inst
.operands
[i
].regisimm
= 1;
4408 else if (exp
.X_op
== O_big
4409 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
> 32
4410 && LITTLENUM_NUMBER_OF_BITS
* exp
.X_add_number
<= 64)
4412 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4413 /* Bignums have their least significant bits in
4414 generic_bignum[0]. Make sure we put 32 bits in imm and
4415 32 bits in reg, in a (hopefully) portable way. */
4416 gas_assert (parts
!= 0);
4417 inst
.operands
[i
].imm
= 0;
4418 for (j
= 0; j
< parts
; j
++, idx
++)
4419 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4420 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4421 inst
.operands
[i
].reg
= 0;
4422 for (j
= 0; j
< parts
; j
++, idx
++)
4423 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4424 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4425 inst
.operands
[i
].regisimm
= 1;
4435 /* Returns the pseudo-register number of an FPA immediate constant,
4436 or FAIL if there isn't a valid constant here. */
4439 parse_fpa_immediate (char ** str
)
4441 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4447 /* First try and match exact strings, this is to guarantee
4448 that some formats will work even for cross assembly. */
4450 for (i
= 0; fp_const
[i
]; i
++)
4452 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
4456 *str
+= strlen (fp_const
[i
]);
4457 if (is_end_of_line
[(unsigned char) **str
])
4463 /* Just because we didn't get a match doesn't mean that the constant
4464 isn't valid, just that it is in a format that we don't
4465 automatically recognize. Try parsing it with the standard
4466 expression routines. */
4468 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
4470 /* Look for a raw floating point number. */
4471 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
4472 && is_end_of_line
[(unsigned char) *save_in
])
4474 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4476 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4478 if (words
[j
] != fp_values
[i
][j
])
4482 if (j
== MAX_LITTLENUMS
)
4490 /* Try and parse a more complex expression, this will probably fail
4491 unless the code uses a floating point prefix (eg "0f"). */
4492 save_in
= input_line_pointer
;
4493 input_line_pointer
= *str
;
4494 if (expression (&exp
) == absolute_section
4495 && exp
.X_op
== O_big
4496 && exp
.X_add_number
< 0)
4498 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4500 if (gen_to_words (words
, 5, (long) 15) == 0)
4502 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
4504 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
4506 if (words
[j
] != fp_values
[i
][j
])
4510 if (j
== MAX_LITTLENUMS
)
4512 *str
= input_line_pointer
;
4513 input_line_pointer
= save_in
;
4520 *str
= input_line_pointer
;
4521 input_line_pointer
= save_in
;
4522 inst
.error
= _("invalid FPA immediate expression");
4526 /* Returns 1 if a number has "quarter-precision" float format
4527 0baBbbbbbc defgh000 00000000 00000000. */
4530 is_quarter_float (unsigned imm
)
4532 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
4533 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
4536 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4537 0baBbbbbbc defgh000 00000000 00000000.
4538 The zero and minus-zero cases need special handling, since they can't be
4539 encoded in the "quarter-precision" float format, but can nonetheless be
4540 loaded as integer constants. */
4543 parse_qfloat_immediate (char **ccp
, int *immed
)
4547 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
4548 int found_fpchar
= 0;
4550 skip_past_char (&str
, '#');
4552 /* We must not accidentally parse an integer as a floating-point number. Make
4553 sure that the value we parse is not an integer by checking for special
4554 characters '.' or 'e'.
4555 FIXME: This is a horrible hack, but doing better is tricky because type
4556 information isn't in a very usable state at parse time. */
4558 skip_whitespace (fpnum
);
4560 if (strncmp (fpnum
, "0x", 2) == 0)
4564 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
4565 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
4575 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
4577 unsigned fpword
= 0;
4580 /* Our FP word must be 32 bits (single-precision FP). */
4581 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
4583 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
4587 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
4600 /* Shift operands. */
4603 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
4606 struct asm_shift_name
4609 enum shift_kind kind
;
4612 /* Third argument to parse_shift. */
4613 enum parse_shift_mode
4615 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
4616 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
4617 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
4618 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
4619 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
4622 /* Parse a <shift> specifier on an ARM data processing instruction.
4623 This has three forms:
4625 (LSL|LSR|ASL|ASR|ROR) Rs
4626 (LSL|LSR|ASL|ASR|ROR) #imm
4629 Note that ASL is assimilated to LSL in the instruction encoding, and
4630 RRX to ROR #0 (which cannot be written as such). */
4633 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
4635 const struct asm_shift_name
*shift_name
;
4636 enum shift_kind shift
;
4641 for (p
= *str
; ISALPHA (*p
); p
++)
4646 inst
.error
= _("shift expression expected");
4650 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
4653 if (shift_name
== NULL
)
4655 inst
.error
= _("shift expression expected");
4659 shift
= shift_name
->kind
;
4663 case NO_SHIFT_RESTRICT
:
4664 case SHIFT_IMMEDIATE
: break;
4666 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
4667 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
4669 inst
.error
= _("'LSL' or 'ASR' required");
4674 case SHIFT_LSL_IMMEDIATE
:
4675 if (shift
!= SHIFT_LSL
)
4677 inst
.error
= _("'LSL' required");
4682 case SHIFT_ASR_IMMEDIATE
:
4683 if (shift
!= SHIFT_ASR
)
4685 inst
.error
= _("'ASR' required");
4693 if (shift
!= SHIFT_RRX
)
4695 /* Whitespace can appear here if the next thing is a bare digit. */
4696 skip_whitespace (p
);
4698 if (mode
== NO_SHIFT_RESTRICT
4699 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
4701 inst
.operands
[i
].imm
= reg
;
4702 inst
.operands
[i
].immisreg
= 1;
4704 else if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
4707 inst
.operands
[i
].shift_kind
= shift
;
4708 inst
.operands
[i
].shifted
= 1;
4713 /* Parse a <shifter_operand> for an ARM data processing instruction:
4716 #<immediate>, <rotate>
4720 where <shift> is defined by parse_shift above, and <rotate> is a
4721 multiple of 2 between 0 and 30. Validation of immediate operands
4722 is deferred to md_apply_fix. */
4725 parse_shifter_operand (char **str
, int i
)
4730 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
4732 inst
.operands
[i
].reg
= value
;
4733 inst
.operands
[i
].isreg
= 1;
4735 /* parse_shift will override this if appropriate */
4736 inst
.reloc
.exp
.X_op
= O_constant
;
4737 inst
.reloc
.exp
.X_add_number
= 0;
4739 if (skip_past_comma (str
) == FAIL
)
4742 /* Shift operation on register. */
4743 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
4746 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_IMM_PREFIX
))
4749 if (skip_past_comma (str
) == SUCCESS
)
4751 /* #x, y -- ie explicit rotation by Y. */
4752 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
4755 if (exp
.X_op
!= O_constant
|| inst
.reloc
.exp
.X_op
!= O_constant
)
4757 inst
.error
= _("constant expression expected");
4761 value
= exp
.X_add_number
;
4762 if (value
< 0 || value
> 30 || value
% 2 != 0)
4764 inst
.error
= _("invalid rotation");
4767 if (inst
.reloc
.exp
.X_add_number
< 0 || inst
.reloc
.exp
.X_add_number
> 255)
4769 inst
.error
= _("invalid constant");
4773 /* Convert to decoded value. md_apply_fix will put it back. */
4774 inst
.reloc
.exp
.X_add_number
4775 = (((inst
.reloc
.exp
.X_add_number
<< (32 - value
))
4776 | (inst
.reloc
.exp
.X_add_number
>> value
)) & 0xffffffff);
4779 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
4780 inst
.reloc
.pc_rel
= 0;
4784 /* Group relocation information. Each entry in the table contains the
4785 textual name of the relocation as may appear in assembler source
4786 and must end with a colon.
4787 Along with this textual name are the relocation codes to be used if
4788 the corresponding instruction is an ALU instruction (ADD or SUB only),
4789 an LDR, an LDRS, or an LDC. */
4791 struct group_reloc_table_entry
4802 /* Varieties of non-ALU group relocation. */
4809 static struct group_reloc_table_entry group_reloc_table
[] =
4810 { /* Program counter relative: */
4812 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
4817 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
4818 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
4819 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
4820 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
4822 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
4827 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
4828 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
4829 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
4830 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
4832 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
4833 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
4834 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
4835 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
4836 /* Section base relative */
4838 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
4843 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
4844 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
4845 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
4846 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
4848 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
4853 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
4854 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
4855 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
4856 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
4858 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
4859 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
4860 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
4861 BFD_RELOC_ARM_LDC_SB_G2
} }; /* LDC */
4863 /* Given the address of a pointer pointing to the textual name of a group
4864 relocation as may appear in assembler source, attempt to find its details
4865 in group_reloc_table. The pointer will be updated to the character after
4866 the trailing colon. On failure, FAIL will be returned; SUCCESS
4867 otherwise. On success, *entry will be updated to point at the relevant
4868 group_reloc_table entry. */
4871 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
4874 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
4876 int length
= strlen (group_reloc_table
[i
].name
);
4878 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
4879 && (*str
)[length
] == ':')
4881 *out
= &group_reloc_table
[i
];
4882 *str
+= (length
+ 1);
4890 /* Parse a <shifter_operand> for an ARM data processing instruction
4891 (as for parse_shifter_operand) where group relocations are allowed:
4894 #<immediate>, <rotate>
4895 #:<group_reloc>:<expression>
4899 where <group_reloc> is one of the strings defined in group_reloc_table.
4900 The hashes are optional.
4902 Everything else is as for parse_shifter_operand. */
4904 static parse_operand_result
4905 parse_shifter_operand_group_reloc (char **str
, int i
)
4907 /* Determine if we have the sequence of characters #: or just :
4908 coming next. If we do, then we check for a group relocation.
4909 If we don't, punt the whole lot to parse_shifter_operand. */
4911 if (((*str
)[0] == '#' && (*str
)[1] == ':')
4912 || (*str
)[0] == ':')
4914 struct group_reloc_table_entry
*entry
;
4916 if ((*str
)[0] == '#')
4921 /* Try to parse a group relocation. Anything else is an error. */
4922 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
4924 inst
.error
= _("unknown group relocation");
4925 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4928 /* We now have the group relocation table entry corresponding to
4929 the name in the assembler source. Next, we parse the expression. */
4930 if (my_get_expression (&inst
.reloc
.exp
, str
, GE_NO_PREFIX
))
4931 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
4933 /* Record the relocation type (always the ALU variant here). */
4934 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
4935 gas_assert (inst
.reloc
.type
!= 0);
4937 return PARSE_OPERAND_SUCCESS
;
4940 return parse_shifter_operand (str
, i
) == SUCCESS
4941 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
4943 /* Never reached. */
4946 /* Parse all forms of an ARM address expression. Information is written
4947 to inst.operands[i] and/or inst.reloc.
4949 Preindexed addressing (.preind=1):
4951 [Rn, #offset] .reg=Rn .reloc.exp=offset
4952 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4953 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4954 .shift_kind=shift .reloc.exp=shift_imm
4956 These three may have a trailing ! which causes .writeback to be set also.
4958 Postindexed addressing (.postind=1, .writeback=1):
4960 [Rn], #offset .reg=Rn .reloc.exp=offset
4961 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4962 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4963 .shift_kind=shift .reloc.exp=shift_imm
4965 Unindexed addressing (.preind=0, .postind=0):
4967 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4971 [Rn]{!} shorthand for [Rn,#0]{!}
4972 =immediate .isreg=0 .reloc.exp=immediate
4973 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4975 It is the caller's responsibility to check for addressing modes not
4976 supported by the instruction, and to set inst.reloc.type. */
4978 static parse_operand_result
4979 parse_address_main (char **str
, int i
, int group_relocations
,
4980 group_reloc_type group_type
)
4985 if (skip_past_char (&p
, '[') == FAIL
)
4987 if (skip_past_char (&p
, '=') == FAIL
)
4989 /* Bare address - translate to PC-relative offset. */
4990 inst
.reloc
.pc_rel
= 1;
4991 inst
.operands
[i
].reg
= REG_PC
;
4992 inst
.operands
[i
].isreg
= 1;
4993 inst
.operands
[i
].preind
= 1;
4995 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
4997 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
4998 return PARSE_OPERAND_FAIL
;
5001 return PARSE_OPERAND_SUCCESS
;
5004 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5006 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5007 return PARSE_OPERAND_FAIL
;
5009 inst
.operands
[i
].reg
= reg
;
5010 inst
.operands
[i
].isreg
= 1;
5012 if (skip_past_comma (&p
) == SUCCESS
)
5014 inst
.operands
[i
].preind
= 1;
5017 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5019 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5021 inst
.operands
[i
].imm
= reg
;
5022 inst
.operands
[i
].immisreg
= 1;
5024 if (skip_past_comma (&p
) == SUCCESS
)
5025 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5026 return PARSE_OPERAND_FAIL
;
5028 else if (skip_past_char (&p
, ':') == SUCCESS
)
5030 /* FIXME: '@' should be used here, but it's filtered out by generic
5031 code before we get to see it here. This may be subject to
5034 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5035 if (exp
.X_op
!= O_constant
)
5037 inst
.error
= _("alignment must be constant");
5038 return PARSE_OPERAND_FAIL
;
5040 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5041 inst
.operands
[i
].immisalign
= 1;
5042 /* Alignments are not pre-indexes. */
5043 inst
.operands
[i
].preind
= 0;
5047 if (inst
.operands
[i
].negative
)
5049 inst
.operands
[i
].negative
= 0;
5053 if (group_relocations
5054 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5056 struct group_reloc_table_entry
*entry
;
5058 /* Skip over the #: or : sequence. */
5064 /* Try to parse a group relocation. Anything else is an
5066 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5068 inst
.error
= _("unknown group relocation");
5069 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5072 /* We now have the group relocation table entry corresponding to
5073 the name in the assembler source. Next, we parse the
5075 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5076 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5078 /* Record the relocation type. */
5082 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldr_code
;
5086 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5090 inst
.reloc
.type
= (bfd_reloc_code_real_type
) entry
->ldc_code
;
5097 if (inst
.reloc
.type
== 0)
5099 inst
.error
= _("this group relocation is not allowed on this instruction");
5100 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5104 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5105 return PARSE_OPERAND_FAIL
;
5109 if (skip_past_char (&p
, ']') == FAIL
)
5111 inst
.error
= _("']' expected");
5112 return PARSE_OPERAND_FAIL
;
5115 if (skip_past_char (&p
, '!') == SUCCESS
)
5116 inst
.operands
[i
].writeback
= 1;
5118 else if (skip_past_comma (&p
) == SUCCESS
)
5120 if (skip_past_char (&p
, '{') == SUCCESS
)
5122 /* [Rn], {expr} - unindexed, with option */
5123 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5124 0, 255, TRUE
) == FAIL
)
5125 return PARSE_OPERAND_FAIL
;
5127 if (skip_past_char (&p
, '}') == FAIL
)
5129 inst
.error
= _("'}' expected at end of 'option' field");
5130 return PARSE_OPERAND_FAIL
;
5132 if (inst
.operands
[i
].preind
)
5134 inst
.error
= _("cannot combine index with option");
5135 return PARSE_OPERAND_FAIL
;
5138 return PARSE_OPERAND_SUCCESS
;
5142 inst
.operands
[i
].postind
= 1;
5143 inst
.operands
[i
].writeback
= 1;
5145 if (inst
.operands
[i
].preind
)
5147 inst
.error
= _("cannot combine pre- and post-indexing");
5148 return PARSE_OPERAND_FAIL
;
5152 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5154 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5156 /* We might be using the immediate for alignment already. If we
5157 are, OR the register number into the low-order bits. */
5158 if (inst
.operands
[i
].immisalign
)
5159 inst
.operands
[i
].imm
|= reg
;
5161 inst
.operands
[i
].imm
= reg
;
5162 inst
.operands
[i
].immisreg
= 1;
5164 if (skip_past_comma (&p
) == SUCCESS
)
5165 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5166 return PARSE_OPERAND_FAIL
;
5170 if (inst
.operands
[i
].negative
)
5172 inst
.operands
[i
].negative
= 0;
5175 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_IMM_PREFIX
))
5176 return PARSE_OPERAND_FAIL
;
5181 /* If at this point neither .preind nor .postind is set, we have a
5182 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5183 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5185 inst
.operands
[i
].preind
= 1;
5186 inst
.reloc
.exp
.X_op
= O_constant
;
5187 inst
.reloc
.exp
.X_add_number
= 0;
5190 return PARSE_OPERAND_SUCCESS
;
5194 parse_address (char **str
, int i
)
5196 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5200 static parse_operand_result
5201 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5203 return parse_address_main (str
, i
, 1, type
);
5206 /* Parse an operand for a MOVW or MOVT instruction. */
5208 parse_half (char **str
)
5213 skip_past_char (&p
, '#');
5214 if (strncasecmp (p
, ":lower16:", 9) == 0)
5215 inst
.reloc
.type
= BFD_RELOC_ARM_MOVW
;
5216 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5217 inst
.reloc
.type
= BFD_RELOC_ARM_MOVT
;
5219 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
5222 skip_whitespace (p
);
5225 if (my_get_expression (&inst
.reloc
.exp
, &p
, GE_NO_PREFIX
))
5228 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
5230 if (inst
.reloc
.exp
.X_op
!= O_constant
)
5232 inst
.error
= _("constant expression expected");
5235 if (inst
.reloc
.exp
.X_add_number
< 0
5236 || inst
.reloc
.exp
.X_add_number
> 0xffff)
5238 inst
.error
= _("immediate value out of range");
5246 /* Miscellaneous. */
5248 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5249 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5251 parse_psr (char **str
)
5254 unsigned long psr_field
;
5255 const struct asm_psr
*psr
;
5258 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5259 feature for ease of use and backwards compatibility. */
5261 if (strncasecmp (p
, "SPSR", 4) == 0)
5262 psr_field
= SPSR_BIT
;
5263 else if (strncasecmp (p
, "CPSR", 4) == 0)
5270 while (ISALNUM (*p
) || *p
== '_');
5272 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5284 /* A suffix follows. */
5290 while (ISALNUM (*p
) || *p
== '_');
5292 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
5297 psr_field
|= psr
->field
;
5302 goto error
; /* Garbage after "[CS]PSR". */
5304 psr_field
|= (PSR_c
| PSR_f
);
5310 inst
.error
= _("flag for {c}psr instruction expected");
5314 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5315 value suitable for splatting into the AIF field of the instruction. */
5318 parse_cps_flags (char **str
)
5327 case '\0': case ',':
5330 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
5331 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
5332 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
5335 inst
.error
= _("unrecognized CPS flag");
5340 if (saw_a_flag
== 0)
5342 inst
.error
= _("missing CPS flags");
5350 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5351 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5354 parse_endian_specifier (char **str
)
5359 if (strncasecmp (s
, "BE", 2))
5361 else if (strncasecmp (s
, "LE", 2))
5365 inst
.error
= _("valid endian specifiers are be or le");
5369 if (ISALNUM (s
[2]) || s
[2] == '_')
5371 inst
.error
= _("valid endian specifiers are be or le");
5376 return little_endian
;
5379 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5380 value suitable for poking into the rotate field of an sxt or sxta
5381 instruction, or FAIL on error. */
5384 parse_ror (char **str
)
5389 if (strncasecmp (s
, "ROR", 3) == 0)
5393 inst
.error
= _("missing rotation field after comma");
5397 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
5402 case 0: *str
= s
; return 0x0;
5403 case 8: *str
= s
; return 0x1;
5404 case 16: *str
= s
; return 0x2;
5405 case 24: *str
= s
; return 0x3;
5408 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
5413 /* Parse a conditional code (from conds[] below). The value returned is in the
5414 range 0 .. 14, or FAIL. */
5416 parse_cond (char **str
)
5419 const struct asm_cond
*c
;
5421 /* Condition codes are always 2 characters, so matching up to
5422 3 characters is sufficient. */
5427 while (ISALPHA (*q
) && n
< 3)
5429 cond
[n
] = TOLOWER (*q
);
5434 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
5437 inst
.error
= _("condition required");
5445 /* Parse an option for a barrier instruction. Returns the encoding for the
5448 parse_barrier (char **str
)
5451 const struct asm_barrier_opt
*o
;
5454 while (ISALPHA (*q
))
5457 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
5466 /* Parse the operands of a table branch instruction. Similar to a memory
5469 parse_tb (char **str
)
5474 if (skip_past_char (&p
, '[') == FAIL
)
5476 inst
.error
= _("'[' expected");
5480 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5482 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5485 inst
.operands
[0].reg
= reg
;
5487 if (skip_past_comma (&p
) == FAIL
)
5489 inst
.error
= _("',' expected");
5493 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5495 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5498 inst
.operands
[0].imm
= reg
;
5500 if (skip_past_comma (&p
) == SUCCESS
)
5502 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
5504 if (inst
.reloc
.exp
.X_add_number
!= 1)
5506 inst
.error
= _("invalid shift");
5509 inst
.operands
[0].shifted
= 1;
5512 if (skip_past_char (&p
, ']') == FAIL
)
5514 inst
.error
= _("']' expected");
5521 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5522 information on the types the operands can take and how they are encoded.
5523 Up to four operands may be read; this function handles setting the
5524 ".present" field for each read operand itself.
5525 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5526 else returns FAIL. */
5529 parse_neon_mov (char **str
, int *which_operand
)
5531 int i
= *which_operand
, val
;
5532 enum arm_reg_type rtype
;
5534 struct neon_type_el optype
;
5536 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5538 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5539 inst
.operands
[i
].reg
= val
;
5540 inst
.operands
[i
].isscalar
= 1;
5541 inst
.operands
[i
].vectype
= optype
;
5542 inst
.operands
[i
++].present
= 1;
5544 if (skip_past_comma (&ptr
) == FAIL
)
5547 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5550 inst
.operands
[i
].reg
= val
;
5551 inst
.operands
[i
].isreg
= 1;
5552 inst
.operands
[i
].present
= 1;
5554 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
5557 /* Cases 0, 1, 2, 3, 5 (D only). */
5558 if (skip_past_comma (&ptr
) == FAIL
)
5561 inst
.operands
[i
].reg
= val
;
5562 inst
.operands
[i
].isreg
= 1;
5563 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5564 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5565 inst
.operands
[i
].isvec
= 1;
5566 inst
.operands
[i
].vectype
= optype
;
5567 inst
.operands
[i
++].present
= 1;
5569 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5571 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5572 Case 13: VMOV <Sd>, <Rm> */
5573 inst
.operands
[i
].reg
= val
;
5574 inst
.operands
[i
].isreg
= 1;
5575 inst
.operands
[i
].present
= 1;
5577 if (rtype
== REG_TYPE_NQ
)
5579 first_error (_("can't use Neon quad register here"));
5582 else if (rtype
!= REG_TYPE_VFS
)
5585 if (skip_past_comma (&ptr
) == FAIL
)
5587 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5589 inst
.operands
[i
].reg
= val
;
5590 inst
.operands
[i
].isreg
= 1;
5591 inst
.operands
[i
].present
= 1;
5594 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
5597 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5598 Case 1: VMOV<c><q> <Dd>, <Dm>
5599 Case 8: VMOV.F32 <Sd>, <Sm>
5600 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5602 inst
.operands
[i
].reg
= val
;
5603 inst
.operands
[i
].isreg
= 1;
5604 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
5605 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5606 inst
.operands
[i
].isvec
= 1;
5607 inst
.operands
[i
].vectype
= optype
;
5608 inst
.operands
[i
].present
= 1;
5610 if (skip_past_comma (&ptr
) == SUCCESS
)
5615 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5618 inst
.operands
[i
].reg
= val
;
5619 inst
.operands
[i
].isreg
= 1;
5620 inst
.operands
[i
++].present
= 1;
5622 if (skip_past_comma (&ptr
) == FAIL
)
5625 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
5628 inst
.operands
[i
].reg
= val
;
5629 inst
.operands
[i
].isreg
= 1;
5630 inst
.operands
[i
++].present
= 1;
5633 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
5634 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5635 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5636 Case 10: VMOV.F32 <Sd>, #<imm>
5637 Case 11: VMOV.F64 <Dd>, #<imm> */
5638 inst
.operands
[i
].immisfloat
= 1;
5639 else if (parse_big_immediate (&ptr
, i
) == SUCCESS
)
5640 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5641 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5645 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5649 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5652 inst
.operands
[i
].reg
= val
;
5653 inst
.operands
[i
].isreg
= 1;
5654 inst
.operands
[i
++].present
= 1;
5656 if (skip_past_comma (&ptr
) == FAIL
)
5659 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
5661 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5662 inst
.operands
[i
].reg
= val
;
5663 inst
.operands
[i
].isscalar
= 1;
5664 inst
.operands
[i
].present
= 1;
5665 inst
.operands
[i
].vectype
= optype
;
5667 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
5669 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5670 inst
.operands
[i
].reg
= val
;
5671 inst
.operands
[i
].isreg
= 1;
5672 inst
.operands
[i
++].present
= 1;
5674 if (skip_past_comma (&ptr
) == FAIL
)
5677 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
5680 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
5684 inst
.operands
[i
].reg
= val
;
5685 inst
.operands
[i
].isreg
= 1;
5686 inst
.operands
[i
].isvec
= 1;
5687 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
5688 inst
.operands
[i
].vectype
= optype
;
5689 inst
.operands
[i
].present
= 1;
5691 if (rtype
== REG_TYPE_VFS
)
5695 if (skip_past_comma (&ptr
) == FAIL
)
5697 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
5700 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
5703 inst
.operands
[i
].reg
= val
;
5704 inst
.operands
[i
].isreg
= 1;
5705 inst
.operands
[i
].isvec
= 1;
5706 inst
.operands
[i
].issingle
= 1;
5707 inst
.operands
[i
].vectype
= optype
;
5708 inst
.operands
[i
].present
= 1;
5711 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
5715 inst
.operands
[i
].reg
= val
;
5716 inst
.operands
[i
].isreg
= 1;
5717 inst
.operands
[i
].isvec
= 1;
5718 inst
.operands
[i
].issingle
= 1;
5719 inst
.operands
[i
].vectype
= optype
;
5720 inst
.operands
[i
++].present
= 1;
5725 first_error (_("parse error"));
5729 /* Successfully parsed the operands. Update args. */
5735 first_error (_("expected comma"));
5739 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
5743 /* Use this macro when the operand constraints are different
5744 for ARM and THUMB (e.g. ldrd). */
5745 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
5746 ((arm_operand) | ((thumb_operand) << 16))
5748 /* Matcher codes for parse_operands. */
5749 enum operand_parse_code
5751 OP_stop
, /* end of line */
5753 OP_RR
, /* ARM register */
5754 OP_RRnpc
, /* ARM register, not r15 */
5755 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
5756 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
5757 OP_RRw
, /* ARM register, not r15, optional trailing ! */
5758 OP_RCP
, /* Coprocessor number */
5759 OP_RCN
, /* Coprocessor register */
5760 OP_RF
, /* FPA register */
5761 OP_RVS
, /* VFP single precision register */
5762 OP_RVD
, /* VFP double precision register (0..15) */
5763 OP_RND
, /* Neon double precision register (0..31) */
5764 OP_RNQ
, /* Neon quad precision register */
5765 OP_RVSD
, /* VFP single or double precision register */
5766 OP_RNDQ
, /* Neon double or quad precision register */
5767 OP_RNSDQ
, /* Neon single, double or quad precision register */
5768 OP_RNSC
, /* Neon scalar D[X] */
5769 OP_RVC
, /* VFP control register */
5770 OP_RMF
, /* Maverick F register */
5771 OP_RMD
, /* Maverick D register */
5772 OP_RMFX
, /* Maverick FX register */
5773 OP_RMDX
, /* Maverick DX register */
5774 OP_RMAX
, /* Maverick AX register */
5775 OP_RMDS
, /* Maverick DSPSC register */
5776 OP_RIWR
, /* iWMMXt wR register */
5777 OP_RIWC
, /* iWMMXt wC register */
5778 OP_RIWG
, /* iWMMXt wCG register */
5779 OP_RXA
, /* XScale accumulator register */
5781 OP_REGLST
, /* ARM register list */
5782 OP_VRSLST
, /* VFP single-precision register list */
5783 OP_VRDLST
, /* VFP double-precision register list */
5784 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
5785 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
5786 OP_NSTRLST
, /* Neon element/structure list */
5788 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
5789 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
5790 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
5791 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
5792 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
5793 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
5794 OP_VMOV
, /* Neon VMOV operands. */
5795 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
5796 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
5797 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5799 OP_I0
, /* immediate zero */
5800 OP_I7
, /* immediate value 0 .. 7 */
5801 OP_I15
, /* 0 .. 15 */
5802 OP_I16
, /* 1 .. 16 */
5803 OP_I16z
, /* 0 .. 16 */
5804 OP_I31
, /* 0 .. 31 */
5805 OP_I31w
, /* 0 .. 31, optional trailing ! */
5806 OP_I32
, /* 1 .. 32 */
5807 OP_I32z
, /* 0 .. 32 */
5808 OP_I63
, /* 0 .. 63 */
5809 OP_I63s
, /* -64 .. 63 */
5810 OP_I64
, /* 1 .. 64 */
5811 OP_I64z
, /* 0 .. 64 */
5812 OP_I255
, /* 0 .. 255 */
5814 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
5815 OP_I7b
, /* 0 .. 7 */
5816 OP_I15b
, /* 0 .. 15 */
5817 OP_I31b
, /* 0 .. 31 */
5819 OP_SH
, /* shifter operand */
5820 OP_SHG
, /* shifter operand with possible group relocation */
5821 OP_ADDR
, /* Memory address expression (any mode) */
5822 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
5823 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
5824 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
5825 OP_EXP
, /* arbitrary expression */
5826 OP_EXPi
, /* same, with optional immediate prefix */
5827 OP_EXPr
, /* same, with optional relocation suffix */
5828 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
5830 OP_CPSF
, /* CPS flags */
5831 OP_ENDI
, /* Endianness specifier */
5832 OP_PSR
, /* CPSR/SPSR mask for msr */
5833 OP_COND
, /* conditional code */
5834 OP_TB
, /* Table branch. */
5836 OP_RVC_PSR
, /* CPSR/SPSR mask for msr, or VFP control register. */
5837 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
5839 OP_RRnpc_I0
, /* ARM register or literal 0 */
5840 OP_RR_EXr
, /* ARM register or expression with opt. reloc suff. */
5841 OP_RR_EXi
, /* ARM register or expression with imm prefix */
5842 OP_RF_IF
, /* FPA register or immediate */
5843 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
5844 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
5846 /* Optional operands. */
5847 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
5848 OP_oI31b
, /* 0 .. 31 */
5849 OP_oI32b
, /* 1 .. 32 */
5850 OP_oIffffb
, /* 0 .. 65535 */
5851 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
5853 OP_oRR
, /* ARM register */
5854 OP_oRRnpc
, /* ARM register, not the PC */
5855 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
5856 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
5857 OP_oRND
, /* Optional Neon double precision register */
5858 OP_oRNQ
, /* Optional Neon quad precision register */
5859 OP_oRNDQ
, /* Optional Neon double or quad precision register */
5860 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
5861 OP_oSHll
, /* LSL immediate */
5862 OP_oSHar
, /* ASR immediate */
5863 OP_oSHllar
, /* LSL or ASR immediate */
5864 OP_oROR
, /* ROR 0/8/16/24 */
5865 OP_oBARRIER
, /* Option argument for a barrier instruction. */
5867 /* Some pre-defined mixed (ARM/THUMB) operands. */
5868 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
5869 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
5870 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
5872 OP_FIRST_OPTIONAL
= OP_oI7b
5875 /* Generic instruction operand parser. This does no encoding and no
5876 semantic validation; it merely squirrels values away in the inst
5877 structure. Returns SUCCESS or FAIL depending on whether the
5878 specified grammar matched. */
5880 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
5882 unsigned const int *upat
= pattern
;
5883 char *backtrack_pos
= 0;
5884 const char *backtrack_error
= 0;
5885 int i
, val
, backtrack_index
= 0;
5886 enum arm_reg_type rtype
;
5887 parse_operand_result result
;
5888 unsigned int op_parse_code
;
5890 #define po_char_or_fail(chr) \
5893 if (skip_past_char (&str, chr) == FAIL) \
5898 #define po_reg_or_fail(regtype) \
5901 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5902 & inst.operands[i].vectype); \
5905 first_error (_(reg_expected_msgs[regtype])); \
5908 inst.operands[i].reg = val; \
5909 inst.operands[i].isreg = 1; \
5910 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5911 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5912 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5913 || rtype == REG_TYPE_VFD \
5914 || rtype == REG_TYPE_NQ); \
5918 #define po_reg_or_goto(regtype, label) \
5921 val = arm_typed_reg_parse (& str, regtype, & rtype, \
5922 & inst.operands[i].vectype); \
5926 inst.operands[i].reg = val; \
5927 inst.operands[i].isreg = 1; \
5928 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5929 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5930 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5931 || rtype == REG_TYPE_VFD \
5932 || rtype == REG_TYPE_NQ); \
5936 #define po_imm_or_fail(min, max, popt) \
5939 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5941 inst.operands[i].imm = val; \
5945 #define po_scalar_or_goto(elsz, label) \
5948 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
5951 inst.operands[i].reg = val; \
5952 inst.operands[i].isscalar = 1; \
5956 #define po_misc_or_fail(expr) \
5964 #define po_misc_or_fail_no_backtrack(expr) \
5968 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
5969 backtrack_pos = 0; \
5970 if (result != PARSE_OPERAND_SUCCESS) \
5975 skip_whitespace (str
);
5977 for (i
= 0; upat
[i
] != OP_stop
; i
++)
5979 op_parse_code
= upat
[i
];
5980 if (op_parse_code
>= 1<<16)
5981 op_parse_code
= thumb
? (op_parse_code
>> 16)
5982 : (op_parse_code
& ((1<<16)-1));
5984 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
5986 /* Remember where we are in case we need to backtrack. */
5987 gas_assert (!backtrack_pos
);
5988 backtrack_pos
= str
;
5989 backtrack_error
= inst
.error
;
5990 backtrack_index
= i
;
5993 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
5994 po_char_or_fail (',');
5996 switch (op_parse_code
)
6004 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6005 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6006 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6007 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6008 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6009 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6011 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6013 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6015 /* Also accept generic coprocessor regs for unknown registers. */
6017 po_reg_or_fail (REG_TYPE_CN
);
6019 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6020 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6021 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6022 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6023 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6024 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6025 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6026 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6027 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6028 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6030 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6032 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6033 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6035 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6037 /* Neon scalar. Using an element size of 8 means that some invalid
6038 scalars are accepted here, so deal with those in later code. */
6039 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6043 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6046 po_imm_or_fail (0, 0, TRUE
);
6051 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6056 po_scalar_or_goto (8, try_rr
);
6059 po_reg_or_fail (REG_TYPE_RN
);
6065 po_scalar_or_goto (8, try_nsdq
);
6068 po_reg_or_fail (REG_TYPE_NSDQ
);
6074 po_scalar_or_goto (8, try_ndq
);
6077 po_reg_or_fail (REG_TYPE_NDQ
);
6083 po_scalar_or_goto (8, try_vfd
);
6086 po_reg_or_fail (REG_TYPE_VFD
);
6091 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6092 not careful then bad things might happen. */
6093 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
6098 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
6101 /* There's a possibility of getting a 64-bit immediate here, so
6102 we need special handling. */
6103 if (parse_big_immediate (&str
, i
) == FAIL
)
6105 inst
.error
= _("immediate value is out of range");
6113 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
6116 po_imm_or_fail (0, 63, TRUE
);
6121 po_char_or_fail ('[');
6122 po_reg_or_fail (REG_TYPE_RN
);
6123 po_char_or_fail (']');
6128 po_reg_or_fail (REG_TYPE_RN
);
6129 if (skip_past_char (&str
, '!') == SUCCESS
)
6130 inst
.operands
[i
].writeback
= 1;
6134 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
6135 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
6136 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
6137 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
6138 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
6139 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
6140 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
6141 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
6142 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
6143 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
6144 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
6145 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
6147 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
6149 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
6150 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
6152 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
6153 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
6154 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
6156 /* Immediate variants */
6158 po_char_or_fail ('{');
6159 po_imm_or_fail (0, 255, TRUE
);
6160 po_char_or_fail ('}');
6164 /* The expression parser chokes on a trailing !, so we have
6165 to find it first and zap it. */
6168 while (*s
&& *s
!= ',')
6173 inst
.operands
[i
].writeback
= 1;
6175 po_imm_or_fail (0, 31, TRUE
);
6183 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6188 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6193 po_misc_or_fail (my_get_expression (&inst
.reloc
.exp
, &str
,
6195 if (inst
.reloc
.exp
.X_op
== O_symbol
)
6197 val
= parse_reloc (&str
);
6200 inst
.error
= _("unrecognized relocation suffix");
6203 else if (val
!= BFD_RELOC_UNUSED
)
6205 inst
.operands
[i
].imm
= val
;
6206 inst
.operands
[i
].hasreloc
= 1;
6211 /* Operand for MOVW or MOVT. */
6213 po_misc_or_fail (parse_half (&str
));
6216 /* Register or expression. */
6217 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
6218 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
6220 /* Register or immediate. */
6221 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
6222 I0
: po_imm_or_fail (0, 0, FALSE
); break;
6224 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
6226 if (!is_immediate_prefix (*str
))
6229 val
= parse_fpa_immediate (&str
);
6232 /* FPA immediates are encoded as registers 8-15.
6233 parse_fpa_immediate has already applied the offset. */
6234 inst
.operands
[i
].reg
= val
;
6235 inst
.operands
[i
].isreg
= 1;
6238 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
6239 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
6241 /* Two kinds of register. */
6244 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6246 || (rege
->type
!= REG_TYPE_MMXWR
6247 && rege
->type
!= REG_TYPE_MMXWC
6248 && rege
->type
!= REG_TYPE_MMXWCG
))
6250 inst
.error
= _("iWMMXt data or control register expected");
6253 inst
.operands
[i
].reg
= rege
->number
;
6254 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
6260 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
6262 || (rege
->type
!= REG_TYPE_MMXWC
6263 && rege
->type
!= REG_TYPE_MMXWCG
))
6265 inst
.error
= _("iWMMXt control register expected");
6268 inst
.operands
[i
].reg
= rege
->number
;
6269 inst
.operands
[i
].isreg
= 1;
6274 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
6275 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
6276 case OP_oROR
: val
= parse_ror (&str
); break;
6277 case OP_PSR
: val
= parse_psr (&str
); break;
6278 case OP_COND
: val
= parse_cond (&str
); break;
6279 case OP_oBARRIER
:val
= parse_barrier (&str
); break;
6282 po_reg_or_goto (REG_TYPE_VFC
, try_psr
);
6283 inst
.operands
[i
].isvec
= 1; /* Mark VFP control reg as vector. */
6286 val
= parse_psr (&str
);
6290 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
6293 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6295 if (strncasecmp (str
, "APSR_", 5) == 0)
6302 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
6303 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
6304 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
6305 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
6306 default: found
= 16;
6310 inst
.operands
[i
].isvec
= 1;
6311 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6312 inst
.operands
[i
].reg
= REG_PC
;
6319 po_misc_or_fail (parse_tb (&str
));
6322 /* Register lists. */
6324 val
= parse_reg_list (&str
);
6327 inst
.operands
[1].writeback
= 1;
6333 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
);
6337 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
);
6341 /* Allow Q registers too. */
6342 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6347 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6349 inst
.operands
[i
].issingle
= 1;
6354 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
6359 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
6360 &inst
.operands
[i
].vectype
);
6363 /* Addressing modes */
6365 po_misc_or_fail (parse_address (&str
, i
));
6369 po_misc_or_fail_no_backtrack (
6370 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
6374 po_misc_or_fail_no_backtrack (
6375 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
6379 po_misc_or_fail_no_backtrack (
6380 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
6384 po_misc_or_fail (parse_shifter_operand (&str
, i
));
6388 po_misc_or_fail_no_backtrack (
6389 parse_shifter_operand_group_reloc (&str
, i
));
6393 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
6397 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
6401 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
6405 as_fatal (_("unhandled operand code %d"), op_parse_code
);
6408 /* Various value-based sanity checks and shared operations. We
6409 do not signal immediate failures for the register constraints;
6410 this allows a syntax error to take precedence. */
6411 switch (op_parse_code
)
6419 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
6420 inst
.error
= BAD_PC
;
6425 if (inst
.operands
[i
].isreg
)
6427 if (inst
.operands
[i
].reg
== REG_PC
)
6428 inst
.error
= BAD_PC
;
6429 else if (inst
.operands
[i
].reg
== REG_SP
)
6430 inst
.error
= BAD_SP
;
6449 inst
.operands
[i
].imm
= val
;
6456 /* If we get here, this operand was successfully parsed. */
6457 inst
.operands
[i
].present
= 1;
6461 inst
.error
= BAD_ARGS
;
6466 /* The parse routine should already have set inst.error, but set a
6467 default here just in case. */
6469 inst
.error
= _("syntax error");
6473 /* Do not backtrack over a trailing optional argument that
6474 absorbed some text. We will only fail again, with the
6475 'garbage following instruction' error message, which is
6476 probably less helpful than the current one. */
6477 if (backtrack_index
== i
&& backtrack_pos
!= str
6478 && upat
[i
+1] == OP_stop
)
6481 inst
.error
= _("syntax error");
6485 /* Try again, skipping the optional argument at backtrack_pos. */
6486 str
= backtrack_pos
;
6487 inst
.error
= backtrack_error
;
6488 inst
.operands
[backtrack_index
].present
= 0;
6489 i
= backtrack_index
;
6493 /* Check that we have parsed all the arguments. */
6494 if (*str
!= '\0' && !inst
.error
)
6495 inst
.error
= _("garbage following instruction");
6497 return inst
.error
? FAIL
: SUCCESS
;
6500 #undef po_char_or_fail
6501 #undef po_reg_or_fail
6502 #undef po_reg_or_goto
6503 #undef po_imm_or_fail
6504 #undef po_scalar_or_fail
6506 /* Shorthand macro for instruction encoding functions issuing errors. */
6507 #define constraint(expr, err) \
6518 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6519 instructions are unpredictable if these registers are used. This
6520 is the BadReg predicate in ARM's Thumb-2 documentation. */
6521 #define reject_bad_reg(reg) \
6523 if (reg == REG_SP || reg == REG_PC) \
6525 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6530 /* If REG is R13 (the stack pointer), warn that its use is
6532 #define warn_deprecated_sp(reg) \
6534 if (warn_on_deprecated && reg == REG_SP) \
6535 as_warn (_("use of r13 is deprecated")); \
6538 /* Functions for operand encoding. ARM, then Thumb. */
6540 #define rotate_left(v, n) (v << n | v >> (32 - n))
6542 /* If VAL can be encoded in the immediate field of an ARM instruction,
6543 return the encoded form. Otherwise, return FAIL. */
6546 encode_arm_immediate (unsigned int val
)
6550 for (i
= 0; i
< 32; i
+= 2)
6551 if ((a
= rotate_left (val
, i
)) <= 0xff)
6552 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
6557 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6558 return the encoded form. Otherwise, return FAIL. */
6560 encode_thumb32_immediate (unsigned int val
)
6567 for (i
= 1; i
<= 24; i
++)
6570 if ((val
& ~(0xff << i
)) == 0)
6571 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
6575 if (val
== ((a
<< 16) | a
))
6577 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
6581 if (val
== ((a
<< 16) | a
))
6582 return 0x200 | (a
>> 8);
6586 /* Encode a VFP SP or DP register number into inst.instruction. */
6589 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
6591 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
6594 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
6597 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
6600 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
6605 first_error (_("D register out of range for selected VFP version"));
6613 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
6617 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
6621 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
6625 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
6629 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
6633 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
6641 /* Encode a <shift> in an ARM-format instruction. The immediate,
6642 if any, is handled by md_apply_fix. */
6644 encode_arm_shift (int i
)
6646 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6647 inst
.instruction
|= SHIFT_ROR
<< 5;
6650 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6651 if (inst
.operands
[i
].immisreg
)
6653 inst
.instruction
|= SHIFT_BY_REG
;
6654 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
6657 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6662 encode_arm_shifter_operand (int i
)
6664 if (inst
.operands
[i
].isreg
)
6666 inst
.instruction
|= inst
.operands
[i
].reg
;
6667 encode_arm_shift (i
);
6670 inst
.instruction
|= INST_IMMEDIATE
;
6673 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6675 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
6677 gas_assert (inst
.operands
[i
].isreg
);
6678 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6680 if (inst
.operands
[i
].preind
)
6684 inst
.error
= _("instruction does not accept preindexed addressing");
6687 inst
.instruction
|= PRE_INDEX
;
6688 if (inst
.operands
[i
].writeback
)
6689 inst
.instruction
|= WRITE_BACK
;
6692 else if (inst
.operands
[i
].postind
)
6694 gas_assert (inst
.operands
[i
].writeback
);
6696 inst
.instruction
|= WRITE_BACK
;
6698 else /* unindexed - only for coprocessor */
6700 inst
.error
= _("instruction does not accept unindexed addressing");
6704 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
6705 && (((inst
.instruction
& 0x000f0000) >> 16)
6706 == ((inst
.instruction
& 0x0000f000) >> 12)))
6707 as_warn ((inst
.instruction
& LOAD_BIT
)
6708 ? _("destination register same as write-back base")
6709 : _("source register same as write-back base"));
6712 /* inst.operands[i] was set up by parse_address. Encode it into an
6713 ARM-format mode 2 load or store instruction. If is_t is true,
6714 reject forms that cannot be used with a T instruction (i.e. not
6717 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
6719 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
6721 encode_arm_addr_mode_common (i
, is_t
);
6723 if (inst
.operands
[i
].immisreg
)
6725 constraint ((inst
.operands
[i
].imm
== REG_PC
6726 || (is_pc
&& inst
.operands
[i
].writeback
)),
6728 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
6729 inst
.instruction
|= inst
.operands
[i
].imm
;
6730 if (!inst
.operands
[i
].negative
)
6731 inst
.instruction
|= INDEX_UP
;
6732 if (inst
.operands
[i
].shifted
)
6734 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
6735 inst
.instruction
|= SHIFT_ROR
<< 5;
6738 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
6739 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
6743 else /* immediate offset in inst.reloc */
6745 if (is_pc
&& !inst
.reloc
.pc_rel
)
6747 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
6748 /* BAD_PC_ADDRESSING Condition =
6750 which becomes !is_load || is_t. */
6751 constraint ((!is_load
|| is_t
),
6755 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6756 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM
;
6760 /* inst.operands[i] was set up by parse_address. Encode it into an
6761 ARM-format mode 3 load or store instruction. Reject forms that
6762 cannot be used with such instructions. If is_t is true, reject
6763 forms that cannot be used with a T instruction (i.e. not
6766 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
6768 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
6770 inst
.error
= _("instruction does not accept scaled register index");
6774 encode_arm_addr_mode_common (i
, is_t
);
6776 if (inst
.operands
[i
].immisreg
)
6778 constraint ((inst
.operands
[i
].imm
== REG_PC
6779 || inst
.operands
[i
].reg
== REG_PC
),
6781 inst
.instruction
|= inst
.operands
[i
].imm
;
6782 if (!inst
.operands
[i
].negative
)
6783 inst
.instruction
|= INDEX_UP
;
6785 else /* immediate offset in inst.reloc */
6787 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.reloc
.pc_rel
6788 && inst
.operands
[i
].writeback
),
6790 inst
.instruction
|= HWOFFSET_IMM
;
6791 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
6792 inst
.reloc
.type
= BFD_RELOC_ARM_OFFSET_IMM8
;
6796 /* inst.operands[i] was set up by parse_address. Encode it into an
6797 ARM-format instruction. Reject all forms which cannot be encoded
6798 into a coprocessor load/store instruction. If wb_ok is false,
6799 reject use of writeback; if unind_ok is false, reject use of
6800 unindexed addressing. If reloc_override is not 0, use it instead
6801 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6802 (in which case it is preserved). */
6805 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
6807 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
6809 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
6811 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
6813 gas_assert (!inst
.operands
[i
].writeback
);
6816 inst
.error
= _("instruction does not support unindexed addressing");
6819 inst
.instruction
|= inst
.operands
[i
].imm
;
6820 inst
.instruction
|= INDEX_UP
;
6824 if (inst
.operands
[i
].preind
)
6825 inst
.instruction
|= PRE_INDEX
;
6827 if (inst
.operands
[i
].writeback
)
6829 if (inst
.operands
[i
].reg
== REG_PC
)
6831 inst
.error
= _("pc may not be used with write-back");
6836 inst
.error
= _("instruction does not support writeback");
6839 inst
.instruction
|= WRITE_BACK
;
6843 inst
.reloc
.type
= (bfd_reloc_code_real_type
) reloc_override
;
6844 else if ((inst
.reloc
.type
< BFD_RELOC_ARM_ALU_PC_G0_NC
6845 || inst
.reloc
.type
> BFD_RELOC_ARM_LDC_SB_G2
)
6846 && inst
.reloc
.type
!= BFD_RELOC_ARM_LDR_PC_G0
)
6849 inst
.reloc
.type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
6851 inst
.reloc
.type
= BFD_RELOC_ARM_CP_OFF_IMM
;
6857 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6858 Determine whether it can be performed with a move instruction; if
6859 it can, convert inst.instruction to that move instruction and
6860 return TRUE; if it can't, convert inst.instruction to a literal-pool
6861 load and return FALSE. If this is not a valid thing to do in the
6862 current context, set inst.error and return TRUE.
6864 inst.operands[i] describes the destination register. */
6867 move_or_literal_pool (int i
, bfd_boolean thumb_p
, bfd_boolean mode_3
)
6872 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
6876 if ((inst
.instruction
& tbit
) == 0)
6878 inst
.error
= _("invalid pseudo operation");
6881 if (inst
.reloc
.exp
.X_op
!= O_constant
&& inst
.reloc
.exp
.X_op
!= O_symbol
)
6883 inst
.error
= _("constant expression expected");
6886 if (inst
.reloc
.exp
.X_op
== O_constant
)
6890 if (!unified_syntax
&& (inst
.reloc
.exp
.X_add_number
& ~0xFF) == 0)
6892 /* This can be done with a mov(1) instruction. */
6893 inst
.instruction
= T_OPCODE_MOV_I8
| (inst
.operands
[i
].reg
<< 8);
6894 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
;
6900 int value
= encode_arm_immediate (inst
.reloc
.exp
.X_add_number
);
6903 /* This can be done with a mov instruction. */
6904 inst
.instruction
&= LITERAL_MASK
;
6905 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
6906 inst
.instruction
|= value
& 0xfff;
6910 value
= encode_arm_immediate (~inst
.reloc
.exp
.X_add_number
);
6913 /* This can be done with a mvn instruction. */
6914 inst
.instruction
&= LITERAL_MASK
;
6915 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
6916 inst
.instruction
|= value
& 0xfff;
6922 if (add_to_lit_pool () == FAIL
)
6924 inst
.error
= _("literal pool insertion failed");
6927 inst
.operands
[1].reg
= REG_PC
;
6928 inst
.operands
[1].isreg
= 1;
6929 inst
.operands
[1].preind
= 1;
6930 inst
.reloc
.pc_rel
= 1;
6931 inst
.reloc
.type
= (thumb_p
6932 ? BFD_RELOC_ARM_THUMB_OFFSET
6934 ? BFD_RELOC_ARM_HWLITERAL
6935 : BFD_RELOC_ARM_LITERAL
));
6939 /* Functions for instruction encoding, sorted by sub-architecture.
6940 First some generics; their names are taken from the conventional
6941 bit positions for register arguments in ARM format instructions. */
6951 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6957 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6958 inst
.instruction
|= inst
.operands
[1].reg
;
6964 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6965 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
6971 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
6972 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
6978 unsigned Rn
= inst
.operands
[2].reg
;
6979 /* Enforce restrictions on SWP instruction. */
6980 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
6982 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
6983 _("Rn must not overlap other operands"));
6985 /* SWP{b} is deprecated for ARMv6* and ARMv7. */
6986 if (warn_on_deprecated
6987 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
6988 as_warn (_("swp{b} use is deprecated for this architecture"));
6991 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
6992 inst
.instruction
|= inst
.operands
[1].reg
;
6993 inst
.instruction
|= Rn
<< 16;
6999 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7000 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7001 inst
.instruction
|= inst
.operands
[2].reg
;
7007 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
7008 constraint (((inst
.reloc
.exp
.X_op
!= O_constant
7009 && inst
.reloc
.exp
.X_op
!= O_illegal
)
7010 || inst
.reloc
.exp
.X_add_number
!= 0),
7012 inst
.instruction
|= inst
.operands
[0].reg
;
7013 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7014 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7020 inst
.instruction
|= inst
.operands
[0].imm
;
7026 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7027 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
7030 /* ARM instructions, in alphabetical order by function name (except
7031 that wrapper functions appear immediately after the function they
7034 /* This is a pseudo-op of the form "adr rd, label" to be converted
7035 into a relative address of the form "add rd, pc, #label-.-8". */
7040 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
7042 /* Frag hacking will turn this into a sub instruction if the offset turns
7043 out to be negative. */
7044 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7045 inst
.reloc
.pc_rel
= 1;
7046 inst
.reloc
.exp
.X_add_number
-= 8;
7049 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7050 into a relative address of the form:
7051 add rd, pc, #low(label-.-8)"
7052 add rd, rd, #high(label-.-8)" */
7057 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
7059 /* Frag hacking will turn this into a sub instruction if the offset turns
7060 out to be negative. */
7061 inst
.reloc
.type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
7062 inst
.reloc
.pc_rel
= 1;
7063 inst
.size
= INSN_SIZE
* 2;
7064 inst
.reloc
.exp
.X_add_number
-= 8;
7070 if (!inst
.operands
[1].present
)
7071 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
7072 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7073 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7074 encode_arm_shifter_operand (2);
7080 if (inst
.operands
[0].present
)
7082 constraint ((inst
.instruction
& 0xf0) != 0x40
7083 && inst
.operands
[0].imm
!= 0xf,
7084 _("bad barrier type"));
7085 inst
.instruction
|= inst
.operands
[0].imm
;
7088 inst
.instruction
|= 0xf;
7094 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
7095 constraint (msb
> 32, _("bit-field extends past end of register"));
7096 /* The instruction encoding stores the LSB and MSB,
7097 not the LSB and width. */
7098 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7099 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
7100 inst
.instruction
|= (msb
- 1) << 16;
7108 /* #0 in second position is alternative syntax for bfc, which is
7109 the same instruction but with REG_PC in the Rm field. */
7110 if (!inst
.operands
[1].isreg
)
7111 inst
.operands
[1].reg
= REG_PC
;
7113 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
7114 constraint (msb
> 32, _("bit-field extends past end of register"));
7115 /* The instruction encoding stores the LSB and MSB,
7116 not the LSB and width. */
7117 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7118 inst
.instruction
|= inst
.operands
[1].reg
;
7119 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7120 inst
.instruction
|= (msb
- 1) << 16;
7126 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
7127 _("bit-field extends past end of register"));
7128 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7129 inst
.instruction
|= inst
.operands
[1].reg
;
7130 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
7131 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
7134 /* ARM V5 breakpoint instruction (argument parse)
7135 BKPT <16 bit unsigned immediate>
7136 Instruction is not conditional.
7137 The bit pattern given in insns[] has the COND_ALWAYS condition,
7138 and it is an error if the caller tried to override that. */
7143 /* Top 12 of 16 bits to bits 19:8. */
7144 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
7146 /* Bottom 4 of 16 bits to bits 3:0. */
7147 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
7151 encode_branch (int default_reloc
)
7153 if (inst
.operands
[0].hasreloc
)
7155 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
,
7156 _("the only suffix valid here is '(plt)'"));
7157 inst
.reloc
.type
= BFD_RELOC_ARM_PLT32
;
7161 inst
.reloc
.type
= (bfd_reloc_code_real_type
) default_reloc
;
7163 inst
.reloc
.pc_rel
= 1;
7170 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7171 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7174 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7181 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
7183 if (inst
.cond
== COND_ALWAYS
)
7184 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
7186 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
7190 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
7193 /* ARM V5 branch-link-exchange instruction (argument parse)
7194 BLX <target_addr> ie BLX(1)
7195 BLX{<condition>} <Rm> ie BLX(2)
7196 Unfortunately, there are two different opcodes for this mnemonic.
7197 So, the insns[].value is not used, and the code here zaps values
7198 into inst.instruction.
7199 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7204 if (inst
.operands
[0].isreg
)
7206 /* Arg is a register; the opcode provided by insns[] is correct.
7207 It is not illegal to do "blx pc", just useless. */
7208 if (inst
.operands
[0].reg
== REG_PC
)
7209 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7211 inst
.instruction
|= inst
.operands
[0].reg
;
7215 /* Arg is an address; this instruction cannot be executed
7216 conditionally, and the opcode must be adjusted.
7217 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7218 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7219 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
7220 inst
.instruction
= 0xfa000000;
7221 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
7228 bfd_boolean want_reloc
;
7230 if (inst
.operands
[0].reg
== REG_PC
)
7231 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7233 inst
.instruction
|= inst
.operands
[0].reg
;
7234 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7235 it is for ARMv4t or earlier. */
7236 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
7237 if (object_arch
&& !ARM_CPU_HAS_FEATURE (*object_arch
, arm_ext_v5
))
7241 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
7246 inst
.reloc
.type
= BFD_RELOC_ARM_V4BX
;
7250 /* ARM v5TEJ. Jump to Jazelle code. */
7255 if (inst
.operands
[0].reg
== REG_PC
)
7256 as_tsktsk (_("use of r15 in bxj is not really useful"));
7258 inst
.instruction
|= inst
.operands
[0].reg
;
7261 /* Co-processor data operation:
7262 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7263 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7267 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7268 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
7269 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
7270 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7271 inst
.instruction
|= inst
.operands
[4].reg
;
7272 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7278 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7279 encode_arm_shifter_operand (1);
7282 /* Transfer between coprocessor and ARM registers.
7283 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7288 No special properties. */
7295 Rd
= inst
.operands
[2].reg
;
7298 if (inst
.instruction
== 0xee000010
7299 || inst
.instruction
== 0xfe000010)
7301 reject_bad_reg (Rd
);
7304 constraint (Rd
== REG_SP
, BAD_SP
);
7309 if (inst
.instruction
== 0xe000010)
7310 constraint (Rd
== REG_PC
, BAD_PC
);
7314 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7315 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
7316 inst
.instruction
|= Rd
<< 12;
7317 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
7318 inst
.instruction
|= inst
.operands
[4].reg
;
7319 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
7322 /* Transfer between coprocessor register and pair of ARM registers.
7323 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7328 Two XScale instructions are special cases of these:
7330 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7331 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7333 Result unpredictable if Rd or Rn is R15. */
7340 Rd
= inst
.operands
[2].reg
;
7341 Rn
= inst
.operands
[3].reg
;
7345 reject_bad_reg (Rd
);
7346 reject_bad_reg (Rn
);
7350 constraint (Rd
== REG_PC
, BAD_PC
);
7351 constraint (Rn
== REG_PC
, BAD_PC
);
7354 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7355 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
7356 inst
.instruction
|= Rd
<< 12;
7357 inst
.instruction
|= Rn
<< 16;
7358 inst
.instruction
|= inst
.operands
[4].reg
;
7364 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
7365 if (inst
.operands
[1].present
)
7367 inst
.instruction
|= CPSI_MMOD
;
7368 inst
.instruction
|= inst
.operands
[1].imm
;
7375 inst
.instruction
|= inst
.operands
[0].imm
;
7381 /* There is no IT instruction in ARM mode. We
7382 process it to do the validation as if in
7383 thumb mode, just in case the code gets
7384 assembled for thumb using the unified syntax. */
7389 set_it_insn_type (IT_INSN
);
7390 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
7391 now_it
.cc
= inst
.operands
[0].imm
;
7398 int base_reg
= inst
.operands
[0].reg
;
7399 int range
= inst
.operands
[1].imm
;
7401 inst
.instruction
|= base_reg
<< 16;
7402 inst
.instruction
|= range
;
7404 if (inst
.operands
[1].writeback
)
7405 inst
.instruction
|= LDM_TYPE_2_OR_3
;
7407 if (inst
.operands
[0].writeback
)
7409 inst
.instruction
|= WRITE_BACK
;
7410 /* Check for unpredictable uses of writeback. */
7411 if (inst
.instruction
& LOAD_BIT
)
7413 /* Not allowed in LDM type 2. */
7414 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
7415 && ((range
& (1 << REG_PC
)) == 0))
7416 as_warn (_("writeback of base register is UNPREDICTABLE"));
7417 /* Only allowed if base reg not in list for other types. */
7418 else if (range
& (1 << base_reg
))
7419 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7423 /* Not allowed for type 2. */
7424 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
7425 as_warn (_("writeback of base register is UNPREDICTABLE"));
7426 /* Only allowed if base reg not in list, or first in list. */
7427 else if ((range
& (1 << base_reg
))
7428 && (range
& ((1 << base_reg
) - 1)))
7429 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7434 /* ARMv5TE load-consecutive (argument parse)
7443 constraint (inst
.operands
[0].reg
% 2 != 0,
7444 _("first destination register must be even"));
7445 constraint (inst
.operands
[1].present
7446 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7447 _("can only load two consecutive registers"));
7448 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7449 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
7451 if (!inst
.operands
[1].present
)
7452 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
7454 if (inst
.instruction
& LOAD_BIT
)
7456 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7457 register and the first register written; we have to diagnose
7458 overlap between the base and the second register written here. */
7460 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
7461 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
7462 as_warn (_("base register written back, and overlaps "
7463 "second destination register"));
7465 /* For an index-register load, the index register must not overlap the
7466 destination (even if not write-back). */
7467 else if (inst
.operands
[2].immisreg
7468 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
7469 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
7470 as_warn (_("index register overlaps destination register"));
7473 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7474 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
7480 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
7481 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
7482 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
7483 || inst
.operands
[1].negative
7484 /* This can arise if the programmer has written
7486 or if they have mistakenly used a register name as the last
7489 It is very difficult to distinguish between these two cases
7490 because "rX" might actually be a label. ie the register
7491 name has been occluded by a symbol of the same name. So we
7492 just generate a general 'bad addressing mode' type error
7493 message and leave it up to the programmer to discover the
7494 true cause and fix their mistake. */
7495 || (inst
.operands
[1].reg
== REG_PC
),
7498 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7499 || inst
.reloc
.exp
.X_add_number
!= 0,
7500 _("offset must be zero in ARM encoding"));
7502 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
7504 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7505 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7506 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
7512 constraint (inst
.operands
[0].reg
% 2 != 0,
7513 _("even register required"));
7514 constraint (inst
.operands
[1].present
7515 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
7516 _("can only load two consecutive registers"));
7517 /* If op 1 were present and equal to PC, this function wouldn't
7518 have been called in the first place. */
7519 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
7521 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7522 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7528 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7529 if (!inst
.operands
[1].isreg
)
7530 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/FALSE
))
7532 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
7538 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7540 if (inst
.operands
[1].preind
)
7542 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7543 || inst
.reloc
.exp
.X_add_number
!= 0,
7544 _("this instruction requires a post-indexed address"));
7546 inst
.operands
[1].preind
= 0;
7547 inst
.operands
[1].postind
= 1;
7548 inst
.operands
[1].writeback
= 1;
7550 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7551 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
7554 /* Halfword and signed-byte load/store operations. */
7559 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
7560 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7561 if (!inst
.operands
[1].isreg
)
7562 if (move_or_literal_pool (0, /*thumb_p=*/FALSE
, /*mode_3=*/TRUE
))
7564 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
7570 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7572 if (inst
.operands
[1].preind
)
7574 constraint (inst
.reloc
.exp
.X_op
!= O_constant
7575 || inst
.reloc
.exp
.X_add_number
!= 0,
7576 _("this instruction requires a post-indexed address"));
7578 inst
.operands
[1].preind
= 0;
7579 inst
.operands
[1].postind
= 1;
7580 inst
.operands
[1].writeback
= 1;
7582 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7583 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
7586 /* Co-processor register load/store.
7587 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7591 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
7592 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
7593 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
7599 /* This restriction does not apply to mls (nor to mla in v6 or later). */
7600 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7601 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
7602 && !(inst
.instruction
& 0x00400000))
7603 as_tsktsk (_("Rd and Rm should be different in mla"));
7605 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7606 inst
.instruction
|= inst
.operands
[1].reg
;
7607 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7608 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
7614 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7615 encode_arm_shifter_operand (1);
7618 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7625 top
= (inst
.instruction
& 0x00400000) != 0;
7626 constraint (top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
,
7627 _(":lower16: not allowed this instruction"));
7628 constraint (!top
&& inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
,
7629 _(":upper16: not allowed instruction"));
7630 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7631 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
7633 imm
= inst
.reloc
.exp
.X_add_number
;
7634 /* The value is in two pieces: 0:11, 16:19. */
7635 inst
.instruction
|= (imm
& 0x00000fff);
7636 inst
.instruction
|= (imm
& 0x0000f000) << 4;
7640 static void do_vfp_nsyn_opcode (const char *);
7643 do_vfp_nsyn_mrs (void)
7645 if (inst
.operands
[0].isvec
)
7647 if (inst
.operands
[1].reg
!= 1)
7648 first_error (_("operand 1 must be FPSCR"));
7649 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
7650 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
7651 do_vfp_nsyn_opcode ("fmstat");
7653 else if (inst
.operands
[1].isvec
)
7654 do_vfp_nsyn_opcode ("fmrx");
7662 do_vfp_nsyn_msr (void)
7664 if (inst
.operands
[0].isvec
)
7665 do_vfp_nsyn_opcode ("fmxr");
7675 unsigned Rt
= inst
.operands
[0].reg
;
7677 if (thumb_mode
&& inst
.operands
[0].reg
== REG_SP
)
7679 inst
.error
= BAD_SP
;
7683 /* APSR_ sets isvec. All other refs to PC are illegal. */
7684 if (!inst
.operands
[0].isvec
&& inst
.operands
[0].reg
== REG_PC
)
7686 inst
.error
= BAD_PC
;
7690 if (inst
.operands
[1].reg
!= 1)
7691 first_error (_("operand 1 must be FPSCR"));
7693 inst
.instruction
|= (Rt
<< 12);
7699 unsigned Rt
= inst
.operands
[1].reg
;
7702 reject_bad_reg (Rt
);
7703 else if (Rt
== REG_PC
)
7705 inst
.error
= BAD_PC
;
7709 if (inst
.operands
[0].reg
!= 1)
7710 first_error (_("operand 0 must be FPSCR"));
7712 inst
.instruction
|= (Rt
<< 12);
7718 if (do_vfp_nsyn_mrs () == SUCCESS
)
7721 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7722 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
7724 _("'CPSR' or 'SPSR' expected"));
7725 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
7726 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7727 inst
.instruction
|= (inst
.operands
[1].imm
& SPSR_BIT
);
7730 /* Two possible forms:
7731 "{C|S}PSR_<field>, Rm",
7732 "{C|S}PSR_f, #expression". */
7737 if (do_vfp_nsyn_msr () == SUCCESS
)
7740 inst
.instruction
|= inst
.operands
[0].imm
;
7741 if (inst
.operands
[1].isreg
)
7742 inst
.instruction
|= inst
.operands
[1].reg
;
7745 inst
.instruction
|= INST_IMMEDIATE
;
7746 inst
.reloc
.type
= BFD_RELOC_ARM_IMMEDIATE
;
7747 inst
.reloc
.pc_rel
= 0;
7754 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
7756 if (!inst
.operands
[2].present
)
7757 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
7758 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7759 inst
.instruction
|= inst
.operands
[1].reg
;
7760 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7762 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
7763 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7764 as_tsktsk (_("Rd and Rm should be different in mul"));
7767 /* Long Multiply Parser
7768 UMULL RdLo, RdHi, Rm, Rs
7769 SMULL RdLo, RdHi, Rm, Rs
7770 UMLAL RdLo, RdHi, Rm, Rs
7771 SMLAL RdLo, RdHi, Rm, Rs. */
7776 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7777 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7778 inst
.instruction
|= inst
.operands
[2].reg
;
7779 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
7781 /* rdhi and rdlo must be different. */
7782 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
7783 as_tsktsk (_("rdhi and rdlo must be different"));
7785 /* rdhi, rdlo and rm must all be different before armv6. */
7786 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
7787 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
7788 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
7789 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7795 if (inst
.operands
[0].present
7796 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
7798 /* Architectural NOP hints are CPSR sets with no bits selected. */
7799 inst
.instruction
&= 0xf0000000;
7800 inst
.instruction
|= 0x0320f000;
7801 if (inst
.operands
[0].present
)
7802 inst
.instruction
|= inst
.operands
[0].imm
;
7806 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7807 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7808 Condition defaults to COND_ALWAYS.
7809 Error if Rd, Rn or Rm are R15. */
7814 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7815 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7816 inst
.instruction
|= inst
.operands
[2].reg
;
7817 if (inst
.operands
[3].present
)
7818 encode_arm_shift (3);
7821 /* ARM V6 PKHTB (Argument Parse). */
7826 if (!inst
.operands
[3].present
)
7828 /* If the shift specifier is omitted, turn the instruction
7829 into pkhbt rd, rm, rn. */
7830 inst
.instruction
&= 0xfff00010;
7831 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7832 inst
.instruction
|= inst
.operands
[1].reg
;
7833 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
7837 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7838 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
7839 inst
.instruction
|= inst
.operands
[2].reg
;
7840 encode_arm_shift (3);
7844 /* ARMv5TE: Preload-Cache
7848 Syntactically, like LDR with B=1, W=0, L=1. */
7853 constraint (!inst
.operands
[0].isreg
,
7854 _("'[' expected after PLD mnemonic"));
7855 constraint (inst
.operands
[0].postind
,
7856 _("post-indexed expression used in preload instruction"));
7857 constraint (inst
.operands
[0].writeback
,
7858 _("writeback used in preload instruction"));
7859 constraint (!inst
.operands
[0].preind
,
7860 _("unindexed addressing used in preload instruction"));
7861 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7864 /* ARMv7: PLI <addr_mode> */
7868 constraint (!inst
.operands
[0].isreg
,
7869 _("'[' expected after PLI mnemonic"));
7870 constraint (inst
.operands
[0].postind
,
7871 _("post-indexed expression used in preload instruction"));
7872 constraint (inst
.operands
[0].writeback
,
7873 _("writeback used in preload instruction"));
7874 constraint (!inst
.operands
[0].preind
,
7875 _("unindexed addressing used in preload instruction"));
7876 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
7877 inst
.instruction
&= ~PRE_INDEX
;
7883 inst
.operands
[1] = inst
.operands
[0];
7884 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
7885 inst
.operands
[0].isreg
= 1;
7886 inst
.operands
[0].writeback
= 1;
7887 inst
.operands
[0].reg
= REG_SP
;
7891 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7892 word at the specified address and the following word
7894 Unconditionally executed.
7895 Error if Rn is R15. */
7900 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
7901 if (inst
.operands
[0].writeback
)
7902 inst
.instruction
|= WRITE_BACK
;
7905 /* ARM V6 ssat (argument parse). */
7910 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7911 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
7912 inst
.instruction
|= inst
.operands
[2].reg
;
7914 if (inst
.operands
[3].present
)
7915 encode_arm_shift (3);
7918 /* ARM V6 usat (argument parse). */
7923 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7924 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7925 inst
.instruction
|= inst
.operands
[2].reg
;
7927 if (inst
.operands
[3].present
)
7928 encode_arm_shift (3);
7931 /* ARM V6 ssat16 (argument parse). */
7936 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7937 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
7938 inst
.instruction
|= inst
.operands
[2].reg
;
7944 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7945 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
7946 inst
.instruction
|= inst
.operands
[2].reg
;
7949 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7950 preserving the other bits.
7952 setend <endian_specifier>, where <endian_specifier> is either
7958 if (inst
.operands
[0].imm
)
7959 inst
.instruction
|= 0x200;
7965 unsigned int Rm
= (inst
.operands
[1].present
7966 ? inst
.operands
[1].reg
7967 : inst
.operands
[0].reg
);
7969 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
7970 inst
.instruction
|= Rm
;
7971 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
7973 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
7974 inst
.instruction
|= SHIFT_BY_REG
;
7977 inst
.reloc
.type
= BFD_RELOC_ARM_SHIFT_IMM
;
7983 inst
.reloc
.type
= BFD_RELOC_ARM_SMC
;
7984 inst
.reloc
.pc_rel
= 0;
7990 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
7991 inst
.reloc
.pc_rel
= 0;
7994 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7995 SMLAxy{cond} Rd,Rm,Rs,Rn
7996 SMLAWy{cond} Rd,Rm,Rs,Rn
7997 Error if any register is R15. */
8002 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8003 inst
.instruction
|= inst
.operands
[1].reg
;
8004 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8005 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
8008 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8009 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8010 Error if any register is R15.
8011 Warning if Rdlo == Rdhi. */
8016 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8017 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8018 inst
.instruction
|= inst
.operands
[2].reg
;
8019 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
8021 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
8022 as_tsktsk (_("rdhi and rdlo must be different"));
8025 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8026 SMULxy{cond} Rd,Rm,Rs
8027 Error if any register is R15. */
8032 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8033 inst
.instruction
|= inst
.operands
[1].reg
;
8034 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
8037 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8038 the same for both ARM and Thumb-2. */
8045 if (inst
.operands
[0].present
)
8047 reg
= inst
.operands
[0].reg
;
8048 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
8053 inst
.instruction
|= reg
<< 16;
8054 inst
.instruction
|= inst
.operands
[1].imm
;
8055 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
8056 inst
.instruction
|= WRITE_BACK
;
8059 /* ARM V6 strex (argument parse). */
8064 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
8065 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
8066 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
8067 || inst
.operands
[2].negative
8068 /* See comment in do_ldrex(). */
8069 || (inst
.operands
[2].reg
== REG_PC
),
8072 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
8073 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
8075 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8076 || inst
.reloc
.exp
.X_add_number
!= 0,
8077 _("offset must be zero in ARM encoding"));
8079 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8080 inst
.instruction
|= inst
.operands
[1].reg
;
8081 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8082 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8088 constraint (inst
.operands
[1].reg
% 2 != 0,
8089 _("even register required"));
8090 constraint (inst
.operands
[2].present
8091 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
8092 _("can only store two consecutive registers"));
8093 /* If op 2 were present and equal to PC, this function wouldn't
8094 have been called in the first place. */
8095 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
8097 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
8098 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
8099 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
8102 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8103 inst
.instruction
|= inst
.operands
[1].reg
;
8104 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8107 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8108 extends it to 32-bits, and adds the result to a value in another
8109 register. You can specify a rotation by 0, 8, 16, or 24 bits
8110 before extracting the 16-bit value.
8111 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8112 Condition defaults to COND_ALWAYS.
8113 Error if any register uses R15. */
8118 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8119 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8120 inst
.instruction
|= inst
.operands
[2].reg
;
8121 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
8126 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8127 Condition defaults to COND_ALWAYS.
8128 Error if any register uses R15. */
8133 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8134 inst
.instruction
|= inst
.operands
[1].reg
;
8135 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
8138 /* VFP instructions. In a logical order: SP variant first, monad
8139 before dyad, arithmetic then move then load/store. */
8142 do_vfp_sp_monadic (void)
8144 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8145 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8149 do_vfp_sp_dyadic (void)
8151 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8152 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8153 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8157 do_vfp_sp_compare_z (void)
8159 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8163 do_vfp_dp_sp_cvt (void)
8165 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8166 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
8170 do_vfp_sp_dp_cvt (void)
8172 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8173 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8177 do_vfp_reg_from_sp (void)
8179 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8180 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
8184 do_vfp_reg2_from_sp2 (void)
8186 constraint (inst
.operands
[2].imm
!= 2,
8187 _("only two consecutive VFP SP registers allowed here"));
8188 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8189 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8190 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
8194 do_vfp_sp_from_reg (void)
8196 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
8197 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8201 do_vfp_sp2_from_reg2 (void)
8203 constraint (inst
.operands
[0].imm
!= 2,
8204 _("only two consecutive VFP SP registers allowed here"));
8205 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
8206 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8207 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8211 do_vfp_sp_ldst (void)
8213 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8214 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8218 do_vfp_dp_ldst (void)
8220 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8221 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
8226 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
8228 if (inst
.operands
[0].writeback
)
8229 inst
.instruction
|= WRITE_BACK
;
8231 constraint (ldstm_type
!= VFP_LDSTMIA
,
8232 _("this addressing mode requires base-register writeback"));
8233 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8234 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
8235 inst
.instruction
|= inst
.operands
[1].imm
;
8239 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
8243 if (inst
.operands
[0].writeback
)
8244 inst
.instruction
|= WRITE_BACK
;
8246 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
8247 _("this addressing mode requires base-register writeback"));
8249 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8250 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8252 count
= inst
.operands
[1].imm
<< 1;
8253 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
8256 inst
.instruction
|= count
;
8260 do_vfp_sp_ldstmia (void)
8262 vfp_sp_ldstm (VFP_LDSTMIA
);
8266 do_vfp_sp_ldstmdb (void)
8268 vfp_sp_ldstm (VFP_LDSTMDB
);
8272 do_vfp_dp_ldstmia (void)
8274 vfp_dp_ldstm (VFP_LDSTMIA
);
8278 do_vfp_dp_ldstmdb (void)
8280 vfp_dp_ldstm (VFP_LDSTMDB
);
8284 do_vfp_xp_ldstmia (void)
8286 vfp_dp_ldstm (VFP_LDSTMIAX
);
8290 do_vfp_xp_ldstmdb (void)
8292 vfp_dp_ldstm (VFP_LDSTMDBX
);
8296 do_vfp_dp_rd_rm (void)
8298 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8299 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
8303 do_vfp_dp_rn_rd (void)
8305 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
8306 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8310 do_vfp_dp_rd_rn (void)
8312 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8313 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8317 do_vfp_dp_rd_rn_rm (void)
8319 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8320 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
8321 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
8327 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8331 do_vfp_dp_rm_rd_rn (void)
8333 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
8334 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
8335 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
8338 /* VFPv3 instructions. */
8340 do_vfp_sp_const (void)
8342 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8343 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8344 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8348 do_vfp_dp_const (void)
8350 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8351 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
8352 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
8356 vfp_conv (int srcsize
)
8358 unsigned immbits
= srcsize
- inst
.operands
[1].imm
;
8359 inst
.instruction
|= (immbits
& 1) << 5;
8360 inst
.instruction
|= (immbits
>> 1);
8364 do_vfp_sp_conv_16 (void)
8366 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8371 do_vfp_dp_conv_16 (void)
8373 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8378 do_vfp_sp_conv_32 (void)
8380 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
8385 do_vfp_dp_conv_32 (void)
8387 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
8391 /* FPA instructions. Also in a logical order. */
8396 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8397 inst
.instruction
|= inst
.operands
[1].reg
;
8401 do_fpa_ldmstm (void)
8403 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8404 switch (inst
.operands
[1].imm
)
8406 case 1: inst
.instruction
|= CP_T_X
; break;
8407 case 2: inst
.instruction
|= CP_T_Y
; break;
8408 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
8413 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
8415 /* The instruction specified "ea" or "fd", so we can only accept
8416 [Rn]{!}. The instruction does not really support stacking or
8417 unstacking, so we have to emulate these by setting appropriate
8418 bits and offsets. */
8419 constraint (inst
.reloc
.exp
.X_op
!= O_constant
8420 || inst
.reloc
.exp
.X_add_number
!= 0,
8421 _("this instruction does not support indexing"));
8423 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
8424 inst
.reloc
.exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
8426 if (!(inst
.instruction
& INDEX_UP
))
8427 inst
.reloc
.exp
.X_add_number
= -inst
.reloc
.exp
.X_add_number
;
8429 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
8431 inst
.operands
[2].preind
= 0;
8432 inst
.operands
[2].postind
= 1;
8436 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
8439 /* iWMMXt instructions: strictly in alphabetical order. */
8442 do_iwmmxt_tandorc (void)
8444 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
8448 do_iwmmxt_textrc (void)
8450 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8451 inst
.instruction
|= inst
.operands
[1].imm
;
8455 do_iwmmxt_textrm (void)
8457 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8458 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8459 inst
.instruction
|= inst
.operands
[2].imm
;
8463 do_iwmmxt_tinsr (void)
8465 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8466 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8467 inst
.instruction
|= inst
.operands
[2].imm
;
8471 do_iwmmxt_tmia (void)
8473 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8474 inst
.instruction
|= inst
.operands
[1].reg
;
8475 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8479 do_iwmmxt_waligni (void)
8481 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8482 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8483 inst
.instruction
|= inst
.operands
[2].reg
;
8484 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
8488 do_iwmmxt_wmerge (void)
8490 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8491 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8492 inst
.instruction
|= inst
.operands
[2].reg
;
8493 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
8497 do_iwmmxt_wmov (void)
8499 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
8500 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8501 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8502 inst
.instruction
|= inst
.operands
[1].reg
;
8506 do_iwmmxt_wldstbh (void)
8509 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8511 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
8513 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
8514 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
8518 do_iwmmxt_wldstw (void)
8520 /* RIWR_RIWC clears .isreg for a control register. */
8521 if (!inst
.operands
[0].isreg
)
8523 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8524 inst
.instruction
|= 0xf0000000;
8527 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8528 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8532 do_iwmmxt_wldstd (void)
8534 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8535 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
8536 && inst
.operands
[1].immisreg
)
8538 inst
.instruction
&= ~0x1a000ff;
8539 inst
.instruction
|= (0xf << 28);
8540 if (inst
.operands
[1].preind
)
8541 inst
.instruction
|= PRE_INDEX
;
8542 if (!inst
.operands
[1].negative
)
8543 inst
.instruction
|= INDEX_UP
;
8544 if (inst
.operands
[1].writeback
)
8545 inst
.instruction
|= WRITE_BACK
;
8546 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8547 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8548 inst
.instruction
|= inst
.operands
[1].imm
;
8551 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
8555 do_iwmmxt_wshufh (void)
8557 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8558 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8559 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
8560 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
8564 do_iwmmxt_wzero (void)
8566 /* WZERO reg is an alias for WANDN reg, reg, reg. */
8567 inst
.instruction
|= inst
.operands
[0].reg
;
8568 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8569 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8573 do_iwmmxt_wrwrwr_or_imm5 (void)
8575 if (inst
.operands
[2].isreg
)
8578 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
8579 _("immediate operand requires iWMMXt2"));
8581 if (inst
.operands
[2].imm
== 0)
8583 switch ((inst
.instruction
>> 20) & 0xf)
8589 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
8590 inst
.operands
[2].imm
= 16;
8591 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
8597 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
8598 inst
.operands
[2].imm
= 32;
8599 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
8606 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
8608 wrn
= (inst
.instruction
>> 16) & 0xf;
8609 inst
.instruction
&= 0xff0fff0f;
8610 inst
.instruction
|= wrn
;
8611 /* Bail out here; the instruction is now assembled. */
8616 /* Map 32 -> 0, etc. */
8617 inst
.operands
[2].imm
&= 0x1f;
8618 inst
.instruction
|= (0xf << 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
8622 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8623 operations first, then control, shift, and load/store. */
8625 /* Insns like "foo X,Y,Z". */
8628 do_mav_triple (void)
8630 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8631 inst
.instruction
|= inst
.operands
[1].reg
;
8632 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8635 /* Insns like "foo W,X,Y,Z".
8636 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8641 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
8642 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8643 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8644 inst
.instruction
|= inst
.operands
[3].reg
;
8647 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8651 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8654 /* Maverick shift immediate instructions.
8655 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8656 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8661 int imm
= inst
.operands
[2].imm
;
8663 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8664 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8666 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8667 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8668 Bit 4 should be 0. */
8669 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
8671 inst
.instruction
|= imm
;
8674 /* XScale instructions. Also sorted arithmetic before move. */
8676 /* Xscale multiply-accumulate (argument parse)
8679 MIAxycc acc0,Rm,Rs. */
8684 inst
.instruction
|= inst
.operands
[1].reg
;
8685 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8688 /* Xscale move-accumulator-register (argument parse)
8690 MARcc acc0,RdLo,RdHi. */
8695 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8696 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8699 /* Xscale move-register-accumulator (argument parse)
8701 MRAcc RdLo,RdHi,acc0. */
8706 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
8707 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8708 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8711 /* Encoding functions relevant only to Thumb. */
8713 /* inst.operands[i] is a shifted-register operand; encode
8714 it into inst.instruction in the format used by Thumb32. */
8717 encode_thumb32_shifted_operand (int i
)
8719 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
8720 unsigned int shift
= inst
.operands
[i
].shift_kind
;
8722 constraint (inst
.operands
[i
].immisreg
,
8723 _("shift by register not allowed in thumb mode"));
8724 inst
.instruction
|= inst
.operands
[i
].reg
;
8725 if (shift
== SHIFT_RRX
)
8726 inst
.instruction
|= SHIFT_ROR
<< 4;
8729 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8730 _("expression too complex"));
8732 constraint (value
> 32
8733 || (value
== 32 && (shift
== SHIFT_LSL
8734 || shift
== SHIFT_ROR
)),
8735 _("shift expression is too large"));
8739 else if (value
== 32)
8742 inst
.instruction
|= shift
<< 4;
8743 inst
.instruction
|= (value
& 0x1c) << 10;
8744 inst
.instruction
|= (value
& 0x03) << 6;
8749 /* inst.operands[i] was set up by parse_address. Encode it into a
8750 Thumb32 format load or store instruction. Reject forms that cannot
8751 be used with such instructions. If is_t is true, reject forms that
8752 cannot be used with a T instruction; if is_d is true, reject forms
8753 that cannot be used with a D instruction. If it is a store insn,
8757 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
8759 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8761 constraint (!inst
.operands
[i
].isreg
,
8762 _("Instruction does not support =N addresses"));
8764 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8765 if (inst
.operands
[i
].immisreg
)
8767 constraint (is_pc
, BAD_PC_ADDRESSING
);
8768 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
8769 constraint (inst
.operands
[i
].negative
,
8770 _("Thumb does not support negative register indexing"));
8771 constraint (inst
.operands
[i
].postind
,
8772 _("Thumb does not support register post-indexing"));
8773 constraint (inst
.operands
[i
].writeback
,
8774 _("Thumb does not support register indexing with writeback"));
8775 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
8776 _("Thumb supports only LSL in shifted register indexing"));
8778 inst
.instruction
|= inst
.operands
[i
].imm
;
8779 if (inst
.operands
[i
].shifted
)
8781 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
8782 _("expression too complex"));
8783 constraint (inst
.reloc
.exp
.X_add_number
< 0
8784 || inst
.reloc
.exp
.X_add_number
> 3,
8785 _("shift out of range"));
8786 inst
.instruction
|= inst
.reloc
.exp
.X_add_number
<< 4;
8788 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
8790 else if (inst
.operands
[i
].preind
)
8792 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
8793 constraint (is_t
&& inst
.operands
[i
].writeback
,
8794 _("cannot use writeback with this instruction"));
8795 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0)
8796 && !inst
.reloc
.pc_rel
, BAD_PC_ADDRESSING
);
8800 inst
.instruction
|= 0x01000000;
8801 if (inst
.operands
[i
].writeback
)
8802 inst
.instruction
|= 0x00200000;
8806 inst
.instruction
|= 0x00000c00;
8807 if (inst
.operands
[i
].writeback
)
8808 inst
.instruction
|= 0x00000100;
8810 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8812 else if (inst
.operands
[i
].postind
)
8814 gas_assert (inst
.operands
[i
].writeback
);
8815 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
8816 constraint (is_t
, _("cannot use post-indexing with this instruction"));
8819 inst
.instruction
|= 0x00200000;
8821 inst
.instruction
|= 0x00000900;
8822 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
8824 else /* unindexed - only for coprocessor */
8825 inst
.error
= _("instruction does not accept unindexed addressing");
8828 /* Table of Thumb instructions which exist in both 16- and 32-bit
8829 encodings (the latter only in post-V6T2 cores). The index is the
8830 value used in the insns table below. When there is more than one
8831 possible 16-bit encoding for the instruction, this table always
8833 Also contains several pseudo-instructions used during relaxation. */
8834 #define T16_32_TAB \
8835 X(_adc, 4140, eb400000), \
8836 X(_adcs, 4140, eb500000), \
8837 X(_add, 1c00, eb000000), \
8838 X(_adds, 1c00, eb100000), \
8839 X(_addi, 0000, f1000000), \
8840 X(_addis, 0000, f1100000), \
8841 X(_add_pc,000f, f20f0000), \
8842 X(_add_sp,000d, f10d0000), \
8843 X(_adr, 000f, f20f0000), \
8844 X(_and, 4000, ea000000), \
8845 X(_ands, 4000, ea100000), \
8846 X(_asr, 1000, fa40f000), \
8847 X(_asrs, 1000, fa50f000), \
8848 X(_b, e000, f000b000), \
8849 X(_bcond, d000, f0008000), \
8850 X(_bic, 4380, ea200000), \
8851 X(_bics, 4380, ea300000), \
8852 X(_cmn, 42c0, eb100f00), \
8853 X(_cmp, 2800, ebb00f00), \
8854 X(_cpsie, b660, f3af8400), \
8855 X(_cpsid, b670, f3af8600), \
8856 X(_cpy, 4600, ea4f0000), \
8857 X(_dec_sp,80dd, f1ad0d00), \
8858 X(_eor, 4040, ea800000), \
8859 X(_eors, 4040, ea900000), \
8860 X(_inc_sp,00dd, f10d0d00), \
8861 X(_ldmia, c800, e8900000), \
8862 X(_ldr, 6800, f8500000), \
8863 X(_ldrb, 7800, f8100000), \
8864 X(_ldrh, 8800, f8300000), \
8865 X(_ldrsb, 5600, f9100000), \
8866 X(_ldrsh, 5e00, f9300000), \
8867 X(_ldr_pc,4800, f85f0000), \
8868 X(_ldr_pc2,4800, f85f0000), \
8869 X(_ldr_sp,9800, f85d0000), \
8870 X(_lsl, 0000, fa00f000), \
8871 X(_lsls, 0000, fa10f000), \
8872 X(_lsr, 0800, fa20f000), \
8873 X(_lsrs, 0800, fa30f000), \
8874 X(_mov, 2000, ea4f0000), \
8875 X(_movs, 2000, ea5f0000), \
8876 X(_mul, 4340, fb00f000), \
8877 X(_muls, 4340, ffffffff), /* no 32b muls */ \
8878 X(_mvn, 43c0, ea6f0000), \
8879 X(_mvns, 43c0, ea7f0000), \
8880 X(_neg, 4240, f1c00000), /* rsb #0 */ \
8881 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
8882 X(_orr, 4300, ea400000), \
8883 X(_orrs, 4300, ea500000), \
8884 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8885 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
8886 X(_rev, ba00, fa90f080), \
8887 X(_rev16, ba40, fa90f090), \
8888 X(_revsh, bac0, fa90f0b0), \
8889 X(_ror, 41c0, fa60f000), \
8890 X(_rors, 41c0, fa70f000), \
8891 X(_sbc, 4180, eb600000), \
8892 X(_sbcs, 4180, eb700000), \
8893 X(_stmia, c000, e8800000), \
8894 X(_str, 6000, f8400000), \
8895 X(_strb, 7000, f8000000), \
8896 X(_strh, 8000, f8200000), \
8897 X(_str_sp,9000, f84d0000), \
8898 X(_sub, 1e00, eba00000), \
8899 X(_subs, 1e00, ebb00000), \
8900 X(_subi, 8000, f1a00000), \
8901 X(_subis, 8000, f1b00000), \
8902 X(_sxtb, b240, fa4ff080), \
8903 X(_sxth, b200, fa0ff080), \
8904 X(_tst, 4200, ea100f00), \
8905 X(_uxtb, b2c0, fa5ff080), \
8906 X(_uxth, b280, fa1ff080), \
8907 X(_nop, bf00, f3af8000), \
8908 X(_yield, bf10, f3af8001), \
8909 X(_wfe, bf20, f3af8002), \
8910 X(_wfi, bf30, f3af8003), \
8911 X(_sev, bf40, f3af8004),
8913 /* To catch errors in encoding functions, the codes are all offset by
8914 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8915 as 16-bit instructions. */
8916 #define X(a,b,c) T_MNEM##a
8917 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
8920 #define X(a,b,c) 0x##b
8921 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
8922 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8925 #define X(a,b,c) 0x##c
8926 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
8927 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8928 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8932 /* Thumb instruction encoders, in alphabetical order. */
8937 do_t_add_sub_w (void)
8941 Rd
= inst
.operands
[0].reg
;
8942 Rn
= inst
.operands
[1].reg
;
8944 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
8945 is the SP-{plus,minus}-immediate form of the instruction. */
8947 constraint (Rd
== REG_PC
, BAD_PC
);
8949 reject_bad_reg (Rd
);
8951 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
8952 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
8955 /* Parse an add or subtract instruction. We get here with inst.instruction
8956 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8963 Rd
= inst
.operands
[0].reg
;
8964 Rs
= (inst
.operands
[1].present
8965 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
8966 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
8969 set_it_insn_type_last ();
8977 flags
= (inst
.instruction
== T_MNEM_adds
8978 || inst
.instruction
== T_MNEM_subs
);
8980 narrow
= !in_it_block ();
8982 narrow
= in_it_block ();
8983 if (!inst
.operands
[2].isreg
)
8987 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
8989 add
= (inst
.instruction
== T_MNEM_add
8990 || inst
.instruction
== T_MNEM_adds
);
8992 if (inst
.size_req
!= 4)
8994 /* Attempt to use a narrow opcode, with relaxation if
8996 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
8997 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
8998 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
8999 opcode
= T_MNEM_add_sp
;
9000 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
9001 opcode
= T_MNEM_add_pc
;
9002 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
9005 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
9007 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
9011 inst
.instruction
= THUMB_OP16(opcode
);
9012 inst
.instruction
|= (Rd
<< 4) | Rs
;
9013 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9014 if (inst
.size_req
!= 2)
9015 inst
.relax
= opcode
;
9018 constraint (inst
.size_req
== 2, BAD_HIREG
);
9020 if (inst
.size_req
== 4
9021 || (inst
.size_req
!= 2 && !opcode
))
9025 constraint (add
, BAD_PC
);
9026 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
9027 _("only SUBS PC, LR, #const allowed"));
9028 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
9029 _("expression too complex"));
9030 constraint (inst
.reloc
.exp
.X_add_number
< 0
9031 || inst
.reloc
.exp
.X_add_number
> 0xff,
9032 _("immediate value out of range"));
9033 inst
.instruction
= T2_SUBS_PC_LR
9034 | inst
.reloc
.exp
.X_add_number
;
9035 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
9038 else if (Rs
== REG_PC
)
9040 /* Always use addw/subw. */
9041 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
9042 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMM12
;
9046 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9047 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
9050 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9052 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_IMM
;
9054 inst
.instruction
|= Rd
<< 8;
9055 inst
.instruction
|= Rs
<< 16;
9060 Rn
= inst
.operands
[2].reg
;
9061 /* See if we can do this with a 16-bit instruction. */
9062 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
9064 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
9069 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
9070 || inst
.instruction
== T_MNEM_add
)
9073 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
9077 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
9079 /* Thumb-1 cores (except v6-M) require at least one high
9080 register in a narrow non flag setting add. */
9081 if (Rd
> 7 || Rn
> 7
9082 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
9083 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
9090 inst
.instruction
= T_OPCODE_ADD_HI
;
9091 inst
.instruction
|= (Rd
& 8) << 4;
9092 inst
.instruction
|= (Rd
& 7);
9093 inst
.instruction
|= Rn
<< 3;
9099 constraint (Rd
== REG_PC
, BAD_PC
);
9100 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
9101 constraint (Rs
== REG_PC
, BAD_PC
);
9102 reject_bad_reg (Rn
);
9104 /* If we get here, it can't be done in 16 bits. */
9105 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
9106 _("shift must be constant"));
9107 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9108 inst
.instruction
|= Rd
<< 8;
9109 inst
.instruction
|= Rs
<< 16;
9110 encode_thumb32_shifted_operand (2);
9115 constraint (inst
.instruction
== T_MNEM_adds
9116 || inst
.instruction
== T_MNEM_subs
,
9119 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
9121 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
9122 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
9125 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9127 inst
.instruction
|= (Rd
<< 4) | Rs
;
9128 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9132 Rn
= inst
.operands
[2].reg
;
9133 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
9135 /* We now have Rd, Rs, and Rn set to registers. */
9136 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
9138 /* Can't do this for SUB. */
9139 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
9140 inst
.instruction
= T_OPCODE_ADD_HI
;
9141 inst
.instruction
|= (Rd
& 8) << 4;
9142 inst
.instruction
|= (Rd
& 7);
9144 inst
.instruction
|= Rn
<< 3;
9146 inst
.instruction
|= Rs
<< 3;
9148 constraint (1, _("dest must overlap one source register"));
9152 inst
.instruction
= (inst
.instruction
== T_MNEM_add
9153 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
9154 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
9164 Rd
= inst
.operands
[0].reg
;
9165 reject_bad_reg (Rd
);
9167 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
9169 /* Defer to section relaxation. */
9170 inst
.relax
= inst
.instruction
;
9171 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9172 inst
.instruction
|= Rd
<< 4;
9174 else if (unified_syntax
&& inst
.size_req
!= 2)
9176 /* Generate a 32-bit opcode. */
9177 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9178 inst
.instruction
|= Rd
<< 8;
9179 inst
.reloc
.type
= BFD_RELOC_ARM_T32_ADD_PC12
;
9180 inst
.reloc
.pc_rel
= 1;
9184 /* Generate a 16-bit opcode. */
9185 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9186 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_ADD
;
9187 inst
.reloc
.exp
.X_add_number
-= 4; /* PC relative adjust. */
9188 inst
.reloc
.pc_rel
= 1;
9190 inst
.instruction
|= Rd
<< 4;
9194 /* Arithmetic instructions for which there is just one 16-bit
9195 instruction encoding, and it allows only two low registers.
9196 For maximal compatibility with ARM syntax, we allow three register
9197 operands even when Thumb-32 instructions are not available, as long
9198 as the first two are identical. For instance, both "sbc r0,r1" and
9199 "sbc r0,r0,r1" are allowed. */
9205 Rd
= inst
.operands
[0].reg
;
9206 Rs
= (inst
.operands
[1].present
9207 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9208 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9209 Rn
= inst
.operands
[2].reg
;
9211 reject_bad_reg (Rd
);
9212 reject_bad_reg (Rs
);
9213 if (inst
.operands
[2].isreg
)
9214 reject_bad_reg (Rn
);
9218 if (!inst
.operands
[2].isreg
)
9220 /* For an immediate, we always generate a 32-bit opcode;
9221 section relaxation will shrink it later if possible. */
9222 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9223 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9224 inst
.instruction
|= Rd
<< 8;
9225 inst
.instruction
|= Rs
<< 16;
9226 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9232 /* See if we can do this with a 16-bit instruction. */
9233 if (THUMB_SETS_FLAGS (inst
.instruction
))
9234 narrow
= !in_it_block ();
9236 narrow
= in_it_block ();
9238 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9240 if (inst
.operands
[2].shifted
)
9242 if (inst
.size_req
== 4)
9248 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9249 inst
.instruction
|= Rd
;
9250 inst
.instruction
|= Rn
<< 3;
9254 /* If we get here, it can't be done in 16 bits. */
9255 constraint (inst
.operands
[2].shifted
9256 && inst
.operands
[2].immisreg
,
9257 _("shift must be constant"));
9258 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9259 inst
.instruction
|= Rd
<< 8;
9260 inst
.instruction
|= Rs
<< 16;
9261 encode_thumb32_shifted_operand (2);
9266 /* On its face this is a lie - the instruction does set the
9267 flags. However, the only supported mnemonic in this mode
9269 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9271 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9272 _("unshifted register required"));
9273 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9274 constraint (Rd
!= Rs
,
9275 _("dest and source1 must be the same register"));
9277 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9278 inst
.instruction
|= Rd
;
9279 inst
.instruction
|= Rn
<< 3;
9283 /* Similarly, but for instructions where the arithmetic operation is
9284 commutative, so we can allow either of them to be different from
9285 the destination operand in a 16-bit instruction. For instance, all
9286 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9293 Rd
= inst
.operands
[0].reg
;
9294 Rs
= (inst
.operands
[1].present
9295 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
9296 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
9297 Rn
= inst
.operands
[2].reg
;
9299 reject_bad_reg (Rd
);
9300 reject_bad_reg (Rs
);
9301 if (inst
.operands
[2].isreg
)
9302 reject_bad_reg (Rn
);
9306 if (!inst
.operands
[2].isreg
)
9308 /* For an immediate, we always generate a 32-bit opcode;
9309 section relaxation will shrink it later if possible. */
9310 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9311 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
9312 inst
.instruction
|= Rd
<< 8;
9313 inst
.instruction
|= Rs
<< 16;
9314 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
9320 /* See if we can do this with a 16-bit instruction. */
9321 if (THUMB_SETS_FLAGS (inst
.instruction
))
9322 narrow
= !in_it_block ();
9324 narrow
= in_it_block ();
9326 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
9328 if (inst
.operands
[2].shifted
)
9330 if (inst
.size_req
== 4)
9337 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9338 inst
.instruction
|= Rd
;
9339 inst
.instruction
|= Rn
<< 3;
9344 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9345 inst
.instruction
|= Rd
;
9346 inst
.instruction
|= Rs
<< 3;
9351 /* If we get here, it can't be done in 16 bits. */
9352 constraint (inst
.operands
[2].shifted
9353 && inst
.operands
[2].immisreg
,
9354 _("shift must be constant"));
9355 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9356 inst
.instruction
|= Rd
<< 8;
9357 inst
.instruction
|= Rs
<< 16;
9358 encode_thumb32_shifted_operand (2);
9363 /* On its face this is a lie - the instruction does set the
9364 flags. However, the only supported mnemonic in this mode
9366 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
9368 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
9369 _("unshifted register required"));
9370 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
9372 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9373 inst
.instruction
|= Rd
;
9376 inst
.instruction
|= Rn
<< 3;
9378 inst
.instruction
|= Rs
<< 3;
9380 constraint (1, _("dest must overlap one source register"));
9387 if (inst
.operands
[0].present
)
9389 constraint ((inst
.instruction
& 0xf0) != 0x40
9390 && inst
.operands
[0].imm
!= 0xf,
9391 _("bad barrier type"));
9392 inst
.instruction
|= inst
.operands
[0].imm
;
9395 inst
.instruction
|= 0xf;
9402 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
9403 constraint (msb
> 32, _("bit-field extends past end of register"));
9404 /* The instruction encoding stores the LSB and MSB,
9405 not the LSB and width. */
9406 Rd
= inst
.operands
[0].reg
;
9407 reject_bad_reg (Rd
);
9408 inst
.instruction
|= Rd
<< 8;
9409 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
9410 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
9411 inst
.instruction
|= msb
- 1;
9420 Rd
= inst
.operands
[0].reg
;
9421 reject_bad_reg (Rd
);
9423 /* #0 in second position is alternative syntax for bfc, which is
9424 the same instruction but with REG_PC in the Rm field. */
9425 if (!inst
.operands
[1].isreg
)
9429 Rn
= inst
.operands
[1].reg
;
9430 reject_bad_reg (Rn
);
9433 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
9434 constraint (msb
> 32, _("bit-field extends past end of register"));
9435 /* The instruction encoding stores the LSB and MSB,
9436 not the LSB and width. */
9437 inst
.instruction
|= Rd
<< 8;
9438 inst
.instruction
|= Rn
<< 16;
9439 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9440 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9441 inst
.instruction
|= msb
- 1;
9449 Rd
= inst
.operands
[0].reg
;
9450 Rn
= inst
.operands
[1].reg
;
9452 reject_bad_reg (Rd
);
9453 reject_bad_reg (Rn
);
9455 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
9456 _("bit-field extends past end of register"));
9457 inst
.instruction
|= Rd
<< 8;
9458 inst
.instruction
|= Rn
<< 16;
9459 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
9460 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
9461 inst
.instruction
|= inst
.operands
[3].imm
- 1;
9464 /* ARM V5 Thumb BLX (argument parse)
9465 BLX <target_addr> which is BLX(1)
9466 BLX <Rm> which is BLX(2)
9467 Unfortunately, there are two different opcodes for this mnemonic.
9468 So, the insns[].value is not used, and the code here zaps values
9469 into inst.instruction.
9471 ??? How to take advantage of the additional two bits of displacement
9472 available in Thumb32 mode? Need new relocation? */
9477 set_it_insn_type_last ();
9479 if (inst
.operands
[0].isreg
)
9481 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9482 /* We have a register, so this is BLX(2). */
9483 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9487 /* No register. This must be BLX(1). */
9488 inst
.instruction
= 0xf000e800;
9489 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BLX
;
9490 inst
.reloc
.pc_rel
= 1;
9501 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
9505 /* Conditional branches inside IT blocks are encoded as unconditional
9512 if (cond
!= COND_ALWAYS
)
9513 opcode
= T_MNEM_bcond
;
9515 opcode
= inst
.instruction
;
9517 if (unified_syntax
&& inst
.size_req
== 4)
9519 inst
.instruction
= THUMB_OP32(opcode
);
9520 if (cond
== COND_ALWAYS
)
9521 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
9524 gas_assert (cond
!= 0xF);
9525 inst
.instruction
|= cond
<< 22;
9526 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
9531 inst
.instruction
= THUMB_OP16(opcode
);
9532 if (cond
== COND_ALWAYS
)
9533 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
9536 inst
.instruction
|= cond
<< 8;
9537 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
9539 /* Allow section relaxation. */
9540 if (unified_syntax
&& inst
.size_req
!= 2)
9541 inst
.relax
= opcode
;
9544 inst
.reloc
.pc_rel
= 1;
9550 constraint (inst
.cond
!= COND_ALWAYS
,
9551 _("instruction is always unconditional"));
9552 if (inst
.operands
[0].present
)
9554 constraint (inst
.operands
[0].imm
> 255,
9555 _("immediate value out of range"));
9556 inst
.instruction
|= inst
.operands
[0].imm
;
9557 set_it_insn_type (NEUTRAL_IT_INSN
);
9562 do_t_branch23 (void)
9564 set_it_insn_type_last ();
9565 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
9566 inst
.reloc
.pc_rel
= 1;
9568 #if defined(OBJ_COFF)
9569 /* If the destination of the branch is a defined symbol which does not have
9570 the THUMB_FUNC attribute, then we must be calling a function which has
9571 the (interfacearm) attribute. We look for the Thumb entry point to that
9572 function and change the branch to refer to that function instead. */
9573 if ( inst
.reloc
.exp
.X_op
== O_symbol
9574 && inst
.reloc
.exp
.X_add_symbol
!= NULL
9575 && S_IS_DEFINED (inst
.reloc
.exp
.X_add_symbol
)
9576 && ! THUMB_IS_FUNC (inst
.reloc
.exp
.X_add_symbol
))
9577 inst
.reloc
.exp
.X_add_symbol
=
9578 find_real_start (inst
.reloc
.exp
.X_add_symbol
);
9585 set_it_insn_type_last ();
9586 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
9587 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
9588 should cause the alignment to be checked once it is known. This is
9589 because BX PC only works if the instruction is word aligned. */
9597 set_it_insn_type_last ();
9598 Rm
= inst
.operands
[0].reg
;
9599 reject_bad_reg (Rm
);
9600 inst
.instruction
|= Rm
<< 16;
9609 Rd
= inst
.operands
[0].reg
;
9610 Rm
= inst
.operands
[1].reg
;
9612 reject_bad_reg (Rd
);
9613 reject_bad_reg (Rm
);
9615 inst
.instruction
|= Rd
<< 8;
9616 inst
.instruction
|= Rm
<< 16;
9617 inst
.instruction
|= Rm
;
9623 set_it_insn_type (OUTSIDE_IT_INSN
);
9624 inst
.instruction
|= inst
.operands
[0].imm
;
9630 set_it_insn_type (OUTSIDE_IT_INSN
);
9632 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
9633 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
9635 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
9636 inst
.instruction
= 0xf3af8000;
9637 inst
.instruction
|= imod
<< 9;
9638 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
9639 if (inst
.operands
[1].present
)
9640 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
9644 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
9645 && (inst
.operands
[0].imm
& 4),
9646 _("selected processor does not support 'A' form "
9647 "of this instruction"));
9648 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
9649 _("Thumb does not support the 2-argument "
9650 "form of this instruction"));
9651 inst
.instruction
|= inst
.operands
[0].imm
;
9655 /* THUMB CPY instruction (argument parse). */
9660 if (inst
.size_req
== 4)
9662 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
9663 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9664 inst
.instruction
|= inst
.operands
[1].reg
;
9668 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
9669 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
9670 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
9677 set_it_insn_type (OUTSIDE_IT_INSN
);
9678 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
9679 inst
.instruction
|= inst
.operands
[0].reg
;
9680 inst
.reloc
.pc_rel
= 1;
9681 inst
.reloc
.type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
9687 inst
.instruction
|= inst
.operands
[0].imm
;
9693 unsigned Rd
, Rn
, Rm
;
9695 Rd
= inst
.operands
[0].reg
;
9696 Rn
= (inst
.operands
[1].present
9697 ? inst
.operands
[1].reg
: Rd
);
9698 Rm
= inst
.operands
[2].reg
;
9700 reject_bad_reg (Rd
);
9701 reject_bad_reg (Rn
);
9702 reject_bad_reg (Rm
);
9704 inst
.instruction
|= Rd
<< 8;
9705 inst
.instruction
|= Rn
<< 16;
9706 inst
.instruction
|= Rm
;
9712 if (unified_syntax
&& inst
.size_req
== 4)
9713 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9715 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9721 unsigned int cond
= inst
.operands
[0].imm
;
9723 set_it_insn_type (IT_INSN
);
9724 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
9727 /* If the condition is a negative condition, invert the mask. */
9728 if ((cond
& 0x1) == 0x0)
9730 unsigned int mask
= inst
.instruction
& 0x000f;
9732 if ((mask
& 0x7) == 0)
9733 /* no conversion needed */;
9734 else if ((mask
& 0x3) == 0)
9736 else if ((mask
& 0x1) == 0)
9741 inst
.instruction
&= 0xfff0;
9742 inst
.instruction
|= mask
;
9745 inst
.instruction
|= cond
<< 4;
9748 /* Helper function used for both push/pop and ldm/stm. */
9750 encode_thumb2_ldmstm (int base
, unsigned mask
, bfd_boolean writeback
)
9754 load
= (inst
.instruction
& (1 << 20)) != 0;
9756 if (mask
& (1 << 13))
9757 inst
.error
= _("SP not allowed in register list");
9760 if (mask
& (1 << 15))
9762 if (mask
& (1 << 14))
9763 inst
.error
= _("LR and PC should not both be in register list");
9765 set_it_insn_type_last ();
9768 if ((mask
& (1 << base
)) != 0
9770 as_warn (_("base register should not be in register list "
9771 "when written back"));
9775 if (mask
& (1 << 15))
9776 inst
.error
= _("PC not allowed in register list");
9778 if (mask
& (1 << base
))
9779 as_warn (_("value stored for r%d is UNPREDICTABLE"), base
);
9782 if ((mask
& (mask
- 1)) == 0)
9784 /* Single register transfers implemented as str/ldr. */
9787 if (inst
.instruction
& (1 << 23))
9788 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
9790 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
9794 if (inst
.instruction
& (1 << 23))
9795 inst
.instruction
= 0x00800000; /* ia -> [base] */
9797 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
9800 inst
.instruction
|= 0xf8400000;
9802 inst
.instruction
|= 0x00100000;
9804 mask
= ffs (mask
) - 1;
9808 inst
.instruction
|= WRITE_BACK
;
9810 inst
.instruction
|= mask
;
9811 inst
.instruction
|= base
<< 16;
9817 /* This really doesn't seem worth it. */
9818 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
9819 _("expression too complex"));
9820 constraint (inst
.operands
[1].writeback
,
9821 _("Thumb load/store multiple does not support {reglist}^"));
9829 /* See if we can use a 16-bit instruction. */
9830 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
9831 && inst
.size_req
!= 4
9832 && !(inst
.operands
[1].imm
& ~0xff))
9834 mask
= 1 << inst
.operands
[0].reg
;
9836 if (inst
.operands
[0].reg
<= 7
9837 && (inst
.instruction
== T_MNEM_stmia
9838 ? inst
.operands
[0].writeback
9839 : (inst
.operands
[0].writeback
9840 == !(inst
.operands
[1].imm
& mask
))))
9842 if (inst
.instruction
== T_MNEM_stmia
9843 && (inst
.operands
[1].imm
& mask
)
9844 && (inst
.operands
[1].imm
& (mask
- 1)))
9845 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9846 inst
.operands
[0].reg
);
9848 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9849 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9850 inst
.instruction
|= inst
.operands
[1].imm
;
9853 else if (inst
.operands
[0] .reg
== REG_SP
9854 && inst
.operands
[0].writeback
)
9856 inst
.instruction
= THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
9857 ? T_MNEM_push
: T_MNEM_pop
);
9858 inst
.instruction
|= inst
.operands
[1].imm
;
9865 if (inst
.instruction
< 0xffff)
9866 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
9868 encode_thumb2_ldmstm (inst
.operands
[0].reg
, inst
.operands
[1].imm
,
9869 inst
.operands
[0].writeback
);
9874 constraint (inst
.operands
[0].reg
> 7
9875 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
9876 constraint (inst
.instruction
!= T_MNEM_ldmia
9877 && inst
.instruction
!= T_MNEM_stmia
,
9878 _("Thumb-2 instruction only valid in unified syntax"));
9879 if (inst
.instruction
== T_MNEM_stmia
)
9881 if (!inst
.operands
[0].writeback
)
9882 as_warn (_("this instruction will write back the base register"));
9883 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
9884 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
9885 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9886 inst
.operands
[0].reg
);
9890 if (!inst
.operands
[0].writeback
9891 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9892 as_warn (_("this instruction will write back the base register"));
9893 else if (inst
.operands
[0].writeback
9894 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
9895 as_warn (_("this instruction will not write back the base register"));
9898 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
9899 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9900 inst
.instruction
|= inst
.operands
[1].imm
;
9907 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9908 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9909 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9910 || inst
.operands
[1].negative
,
9913 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9915 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9916 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9917 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
9923 if (!inst
.operands
[1].present
)
9925 constraint (inst
.operands
[0].reg
== REG_LR
,
9926 _("r14 not allowed as first register "
9927 "when second register is omitted"));
9928 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9930 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
9933 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9934 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
9935 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9941 unsigned long opcode
;
9944 if (inst
.operands
[0].isreg
9945 && !inst
.operands
[0].preind
9946 && inst
.operands
[0].reg
== REG_PC
)
9947 set_it_insn_type_last ();
9949 opcode
= inst
.instruction
;
9952 if (!inst
.operands
[1].isreg
)
9954 if (opcode
<= 0xffff)
9955 inst
.instruction
= THUMB_OP32 (opcode
);
9956 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
9959 if (inst
.operands
[1].isreg
9960 && !inst
.operands
[1].writeback
9961 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
9962 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
9964 && inst
.size_req
!= 4)
9966 /* Insn may have a 16-bit form. */
9967 Rn
= inst
.operands
[1].reg
;
9968 if (inst
.operands
[1].immisreg
)
9970 inst
.instruction
= THUMB_OP16 (opcode
);
9972 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
9974 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
9975 reject_bad_reg (inst
.operands
[1].imm
);
9977 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
9978 && opcode
!= T_MNEM_ldrsb
)
9979 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
9980 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
9987 if (inst
.reloc
.pc_rel
)
9988 opcode
= T_MNEM_ldr_pc2
;
9990 opcode
= T_MNEM_ldr_pc
;
9994 if (opcode
== T_MNEM_ldr
)
9995 opcode
= T_MNEM_ldr_sp
;
9997 opcode
= T_MNEM_str_sp
;
9999 inst
.instruction
= inst
.operands
[0].reg
<< 8;
10003 inst
.instruction
= inst
.operands
[0].reg
;
10004 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10006 inst
.instruction
|= THUMB_OP16 (opcode
);
10007 if (inst
.size_req
== 2)
10008 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10010 inst
.relax
= opcode
;
10014 /* Definitely a 32-bit variant. */
10016 /* Do some validations regarding addressing modes. */
10017 if (inst
.operands
[1].immisreg
&& opcode
!= T_MNEM_ldr
10018 && opcode
!= T_MNEM_str
)
10019 reject_bad_reg (inst
.operands
[1].imm
);
10021 inst
.instruction
= THUMB_OP32 (opcode
);
10022 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10023 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10027 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
10029 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
10031 /* Only [Rn,Rm] is acceptable. */
10032 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
10033 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
10034 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
10035 || inst
.operands
[1].negative
,
10036 _("Thumb does not support this addressing mode"));
10037 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10041 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10042 if (!inst
.operands
[1].isreg
)
10043 if (move_or_literal_pool (0, /*thumb_p=*/TRUE
, /*mode_3=*/FALSE
))
10046 constraint (!inst
.operands
[1].preind
10047 || inst
.operands
[1].shifted
10048 || inst
.operands
[1].writeback
,
10049 _("Thumb does not support this addressing mode"));
10050 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
10052 constraint (inst
.instruction
& 0x0600,
10053 _("byte or halfword not valid for base register"));
10054 constraint (inst
.operands
[1].reg
== REG_PC
10055 && !(inst
.instruction
& THUMB_LOAD_BIT
),
10056 _("r15 based store not allowed"));
10057 constraint (inst
.operands
[1].immisreg
,
10058 _("invalid base register for register offset"));
10060 if (inst
.operands
[1].reg
== REG_PC
)
10061 inst
.instruction
= T_OPCODE_LDR_PC
;
10062 else if (inst
.instruction
& THUMB_LOAD_BIT
)
10063 inst
.instruction
= T_OPCODE_LDR_SP
;
10065 inst
.instruction
= T_OPCODE_STR_SP
;
10067 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10068 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10072 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
10073 if (!inst
.operands
[1].immisreg
)
10075 /* Immediate offset. */
10076 inst
.instruction
|= inst
.operands
[0].reg
;
10077 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10078 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_OFFSET
;
10082 /* Register offset. */
10083 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
10084 constraint (inst
.operands
[1].negative
,
10085 _("Thumb does not support this addressing mode"));
10088 switch (inst
.instruction
)
10090 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
10091 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
10092 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
10093 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
10094 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
10095 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
10096 case 0x5600 /* ldrsb */:
10097 case 0x5e00 /* ldrsh */: break;
10101 inst
.instruction
|= inst
.operands
[0].reg
;
10102 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10103 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
10109 if (!inst
.operands
[1].present
)
10111 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
10112 constraint (inst
.operands
[0].reg
== REG_LR
,
10113 _("r14 not allowed here"));
10115 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10116 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
10117 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
10123 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10124 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
10130 unsigned Rd
, Rn
, Rm
, Ra
;
10132 Rd
= inst
.operands
[0].reg
;
10133 Rn
= inst
.operands
[1].reg
;
10134 Rm
= inst
.operands
[2].reg
;
10135 Ra
= inst
.operands
[3].reg
;
10137 reject_bad_reg (Rd
);
10138 reject_bad_reg (Rn
);
10139 reject_bad_reg (Rm
);
10140 reject_bad_reg (Ra
);
10142 inst
.instruction
|= Rd
<< 8;
10143 inst
.instruction
|= Rn
<< 16;
10144 inst
.instruction
|= Rm
;
10145 inst
.instruction
|= Ra
<< 12;
10151 unsigned RdLo
, RdHi
, Rn
, Rm
;
10153 RdLo
= inst
.operands
[0].reg
;
10154 RdHi
= inst
.operands
[1].reg
;
10155 Rn
= inst
.operands
[2].reg
;
10156 Rm
= inst
.operands
[3].reg
;
10158 reject_bad_reg (RdLo
);
10159 reject_bad_reg (RdHi
);
10160 reject_bad_reg (Rn
);
10161 reject_bad_reg (Rm
);
10163 inst
.instruction
|= RdLo
<< 12;
10164 inst
.instruction
|= RdHi
<< 8;
10165 inst
.instruction
|= Rn
<< 16;
10166 inst
.instruction
|= Rm
;
10170 do_t_mov_cmp (void)
10174 Rn
= inst
.operands
[0].reg
;
10175 Rm
= inst
.operands
[1].reg
;
10178 set_it_insn_type_last ();
10180 if (unified_syntax
)
10182 int r0off
= (inst
.instruction
== T_MNEM_mov
10183 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
10184 unsigned long opcode
;
10185 bfd_boolean narrow
;
10186 bfd_boolean low_regs
;
10188 low_regs
= (Rn
<= 7 && Rm
<= 7);
10189 opcode
= inst
.instruction
;
10190 if (in_it_block ())
10191 narrow
= opcode
!= T_MNEM_movs
;
10193 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
10194 if (inst
.size_req
== 4
10195 || inst
.operands
[1].shifted
)
10198 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10199 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
10200 && !inst
.operands
[1].shifted
10204 inst
.instruction
= T2_SUBS_PC_LR
;
10208 if (opcode
== T_MNEM_cmp
)
10210 constraint (Rn
== REG_PC
, BAD_PC
);
10213 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10215 warn_deprecated_sp (Rm
);
10216 /* R15 was documented as a valid choice for Rm in ARMv6,
10217 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10218 tools reject R15, so we do too. */
10219 constraint (Rm
== REG_PC
, BAD_PC
);
10222 reject_bad_reg (Rm
);
10224 else if (opcode
== T_MNEM_mov
10225 || opcode
== T_MNEM_movs
)
10227 if (inst
.operands
[1].isreg
)
10229 if (opcode
== T_MNEM_movs
)
10231 reject_bad_reg (Rn
);
10232 reject_bad_reg (Rm
);
10234 else if ((Rn
== REG_SP
|| Rn
== REG_PC
)
10235 && (Rm
== REG_SP
|| Rm
== REG_PC
))
10236 reject_bad_reg (Rm
);
10239 reject_bad_reg (Rn
);
10242 if (!inst
.operands
[1].isreg
)
10244 /* Immediate operand. */
10245 if (!in_it_block () && opcode
== T_MNEM_mov
)
10247 if (low_regs
&& narrow
)
10249 inst
.instruction
= THUMB_OP16 (opcode
);
10250 inst
.instruction
|= Rn
<< 8;
10251 if (inst
.size_req
== 2)
10252 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10254 inst
.relax
= opcode
;
10258 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10259 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10260 inst
.instruction
|= Rn
<< r0off
;
10261 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10264 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
10265 && (inst
.instruction
== T_MNEM_mov
10266 || inst
.instruction
== T_MNEM_movs
))
10268 /* Register shifts are encoded as separate shift instructions. */
10269 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
10271 if (in_it_block ())
10276 if (inst
.size_req
== 4)
10279 if (!low_regs
|| inst
.operands
[1].imm
> 7)
10285 switch (inst
.operands
[1].shift_kind
)
10288 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
10291 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
10294 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
10297 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
10303 inst
.instruction
= opcode
;
10306 inst
.instruction
|= Rn
;
10307 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
10312 inst
.instruction
|= CONDS_BIT
;
10314 inst
.instruction
|= Rn
<< 8;
10315 inst
.instruction
|= Rm
<< 16;
10316 inst
.instruction
|= inst
.operands
[1].imm
;
10321 /* Some mov with immediate shift have narrow variants.
10322 Register shifts are handled above. */
10323 if (low_regs
&& inst
.operands
[1].shifted
10324 && (inst
.instruction
== T_MNEM_mov
10325 || inst
.instruction
== T_MNEM_movs
))
10327 if (in_it_block ())
10328 narrow
= (inst
.instruction
== T_MNEM_mov
);
10330 narrow
= (inst
.instruction
== T_MNEM_movs
);
10335 switch (inst
.operands
[1].shift_kind
)
10337 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
10338 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
10339 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
10340 default: narrow
= FALSE
; break;
10346 inst
.instruction
|= Rn
;
10347 inst
.instruction
|= Rm
<< 3;
10348 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
10352 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10353 inst
.instruction
|= Rn
<< r0off
;
10354 encode_thumb32_shifted_operand (1);
10358 switch (inst
.instruction
)
10361 inst
.instruction
= T_OPCODE_MOV_HR
;
10362 inst
.instruction
|= (Rn
& 0x8) << 4;
10363 inst
.instruction
|= (Rn
& 0x7);
10364 inst
.instruction
|= Rm
<< 3;
10368 /* We know we have low registers at this point.
10369 Generate ADD Rd, Rs, #0. */
10370 inst
.instruction
= T_OPCODE_ADD_I3
;
10371 inst
.instruction
|= Rn
;
10372 inst
.instruction
|= Rm
<< 3;
10378 inst
.instruction
= T_OPCODE_CMP_LR
;
10379 inst
.instruction
|= Rn
;
10380 inst
.instruction
|= Rm
<< 3;
10384 inst
.instruction
= T_OPCODE_CMP_HR
;
10385 inst
.instruction
|= (Rn
& 0x8) << 4;
10386 inst
.instruction
|= (Rn
& 0x7);
10387 inst
.instruction
|= Rm
<< 3;
10394 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10396 /* PR 10443: Do not silently ignore shifted operands. */
10397 constraint (inst
.operands
[1].shifted
,
10398 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
10400 if (inst
.operands
[1].isreg
)
10402 if (Rn
< 8 && Rm
< 8)
10404 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
10405 since a MOV instruction produces unpredictable results. */
10406 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10407 inst
.instruction
= T_OPCODE_ADD_I3
;
10409 inst
.instruction
= T_OPCODE_CMP_LR
;
10411 inst
.instruction
|= Rn
;
10412 inst
.instruction
|= Rm
<< 3;
10416 if (inst
.instruction
== T_OPCODE_MOV_I8
)
10417 inst
.instruction
= T_OPCODE_MOV_HR
;
10419 inst
.instruction
= T_OPCODE_CMP_HR
;
10425 constraint (Rn
> 7,
10426 _("only lo regs allowed with immediate"));
10427 inst
.instruction
|= Rn
<< 8;
10428 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_IMM
;
10439 top
= (inst
.instruction
& 0x00800000) != 0;
10440 if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVW
)
10442 constraint (top
, _(":lower16: not allowed this instruction"));
10443 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVW
;
10445 else if (inst
.reloc
.type
== BFD_RELOC_ARM_MOVT
)
10447 constraint (!top
, _(":upper16: not allowed this instruction"));
10448 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_MOVT
;
10451 Rd
= inst
.operands
[0].reg
;
10452 reject_bad_reg (Rd
);
10454 inst
.instruction
|= Rd
<< 8;
10455 if (inst
.reloc
.type
== BFD_RELOC_UNUSED
)
10457 imm
= inst
.reloc
.exp
.X_add_number
;
10458 inst
.instruction
|= (imm
& 0xf000) << 4;
10459 inst
.instruction
|= (imm
& 0x0800) << 15;
10460 inst
.instruction
|= (imm
& 0x0700) << 4;
10461 inst
.instruction
|= (imm
& 0x00ff);
10466 do_t_mvn_tst (void)
10470 Rn
= inst
.operands
[0].reg
;
10471 Rm
= inst
.operands
[1].reg
;
10473 if (inst
.instruction
== T_MNEM_cmp
10474 || inst
.instruction
== T_MNEM_cmn
)
10475 constraint (Rn
== REG_PC
, BAD_PC
);
10477 reject_bad_reg (Rn
);
10478 reject_bad_reg (Rm
);
10480 if (unified_syntax
)
10482 int r0off
= (inst
.instruction
== T_MNEM_mvn
10483 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
10484 bfd_boolean narrow
;
10486 if (inst
.size_req
== 4
10487 || inst
.instruction
> 0xffff
10488 || inst
.operands
[1].shifted
10489 || Rn
> 7 || Rm
> 7)
10491 else if (inst
.instruction
== T_MNEM_cmn
)
10493 else if (THUMB_SETS_FLAGS (inst
.instruction
))
10494 narrow
= !in_it_block ();
10496 narrow
= in_it_block ();
10498 if (!inst
.operands
[1].isreg
)
10500 /* For an immediate, we always generate a 32-bit opcode;
10501 section relaxation will shrink it later if possible. */
10502 if (inst
.instruction
< 0xffff)
10503 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10504 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10505 inst
.instruction
|= Rn
<< r0off
;
10506 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10510 /* See if we can do this with a 16-bit instruction. */
10513 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10514 inst
.instruction
|= Rn
;
10515 inst
.instruction
|= Rm
<< 3;
10519 constraint (inst
.operands
[1].shifted
10520 && inst
.operands
[1].immisreg
,
10521 _("shift must be constant"));
10522 if (inst
.instruction
< 0xffff)
10523 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10524 inst
.instruction
|= Rn
<< r0off
;
10525 encode_thumb32_shifted_operand (1);
10531 constraint (inst
.instruction
> 0xffff
10532 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
10533 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
10534 _("unshifted register required"));
10535 constraint (Rn
> 7 || Rm
> 7,
10538 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10539 inst
.instruction
|= Rn
;
10540 inst
.instruction
|= Rm
<< 3;
10550 if (do_vfp_nsyn_mrs () == SUCCESS
)
10553 flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
10556 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10557 _("selected processor does not support "
10558 "requested special purpose register"));
10562 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10563 _("selected processor does not support "
10564 "requested special purpose register"));
10565 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
10566 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
10567 _("'CPSR' or 'SPSR' expected"));
10570 Rd
= inst
.operands
[0].reg
;
10571 reject_bad_reg (Rd
);
10573 inst
.instruction
|= Rd
<< 8;
10574 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10575 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
10584 if (do_vfp_nsyn_msr () == SUCCESS
)
10587 constraint (!inst
.operands
[1].isreg
,
10588 _("Thumb encoding does not support an immediate here"));
10589 flags
= inst
.operands
[0].imm
;
10592 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
),
10593 _("selected processor does not support "
10594 "requested special purpose register"));
10598 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
),
10599 _("selected processor does not support "
10600 "requested special purpose register"));
10604 Rn
= inst
.operands
[1].reg
;
10605 reject_bad_reg (Rn
);
10607 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
10608 inst
.instruction
|= (flags
& ~SPSR_BIT
) >> 8;
10609 inst
.instruction
|= (flags
& 0xff);
10610 inst
.instruction
|= Rn
<< 16;
10616 bfd_boolean narrow
;
10617 unsigned Rd
, Rn
, Rm
;
10619 if (!inst
.operands
[2].present
)
10620 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
10622 Rd
= inst
.operands
[0].reg
;
10623 Rn
= inst
.operands
[1].reg
;
10624 Rm
= inst
.operands
[2].reg
;
10626 if (unified_syntax
)
10628 if (inst
.size_req
== 4
10634 else if (inst
.instruction
== T_MNEM_muls
)
10635 narrow
= !in_it_block ();
10637 narrow
= in_it_block ();
10641 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
10642 constraint (Rn
> 7 || Rm
> 7,
10649 /* 16-bit MULS/Conditional MUL. */
10650 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10651 inst
.instruction
|= Rd
;
10654 inst
.instruction
|= Rm
<< 3;
10656 inst
.instruction
|= Rn
<< 3;
10658 constraint (1, _("dest must overlap one source register"));
10662 constraint (inst
.instruction
!= T_MNEM_mul
,
10663 _("Thumb-2 MUL must not set flags"));
10665 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10666 inst
.instruction
|= Rd
<< 8;
10667 inst
.instruction
|= Rn
<< 16;
10668 inst
.instruction
|= Rm
<< 0;
10670 reject_bad_reg (Rd
);
10671 reject_bad_reg (Rn
);
10672 reject_bad_reg (Rm
);
10679 unsigned RdLo
, RdHi
, Rn
, Rm
;
10681 RdLo
= inst
.operands
[0].reg
;
10682 RdHi
= inst
.operands
[1].reg
;
10683 Rn
= inst
.operands
[2].reg
;
10684 Rm
= inst
.operands
[3].reg
;
10686 reject_bad_reg (RdLo
);
10687 reject_bad_reg (RdHi
);
10688 reject_bad_reg (Rn
);
10689 reject_bad_reg (Rm
);
10691 inst
.instruction
|= RdLo
<< 12;
10692 inst
.instruction
|= RdHi
<< 8;
10693 inst
.instruction
|= Rn
<< 16;
10694 inst
.instruction
|= Rm
;
10697 as_tsktsk (_("rdhi and rdlo must be different"));
10703 set_it_insn_type (NEUTRAL_IT_INSN
);
10705 if (unified_syntax
)
10707 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
10709 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10710 inst
.instruction
|= inst
.operands
[0].imm
;
10714 /* PR9722: Check for Thumb2 availability before
10715 generating a thumb2 nop instruction. */
10716 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
10718 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10719 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
10722 inst
.instruction
= 0x46c0;
10727 constraint (inst
.operands
[0].present
,
10728 _("Thumb does not support NOP with hints"));
10729 inst
.instruction
= 0x46c0;
10736 if (unified_syntax
)
10738 bfd_boolean narrow
;
10740 if (THUMB_SETS_FLAGS (inst
.instruction
))
10741 narrow
= !in_it_block ();
10743 narrow
= in_it_block ();
10744 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
10746 if (inst
.size_req
== 4)
10751 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10752 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
10753 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10757 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10758 inst
.instruction
|= inst
.operands
[0].reg
;
10759 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10764 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
10766 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
10768 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10769 inst
.instruction
|= inst
.operands
[0].reg
;
10770 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
10779 Rd
= inst
.operands
[0].reg
;
10780 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
10782 reject_bad_reg (Rd
);
10783 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
10784 reject_bad_reg (Rn
);
10786 inst
.instruction
|= Rd
<< 8;
10787 inst
.instruction
|= Rn
<< 16;
10789 if (!inst
.operands
[2].isreg
)
10791 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
10792 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10798 Rm
= inst
.operands
[2].reg
;
10799 reject_bad_reg (Rm
);
10801 constraint (inst
.operands
[2].shifted
10802 && inst
.operands
[2].immisreg
,
10803 _("shift must be constant"));
10804 encode_thumb32_shifted_operand (2);
10811 unsigned Rd
, Rn
, Rm
;
10813 Rd
= inst
.operands
[0].reg
;
10814 Rn
= inst
.operands
[1].reg
;
10815 Rm
= inst
.operands
[2].reg
;
10817 reject_bad_reg (Rd
);
10818 reject_bad_reg (Rn
);
10819 reject_bad_reg (Rm
);
10821 inst
.instruction
|= Rd
<< 8;
10822 inst
.instruction
|= Rn
<< 16;
10823 inst
.instruction
|= Rm
;
10824 if (inst
.operands
[3].present
)
10826 unsigned int val
= inst
.reloc
.exp
.X_add_number
;
10827 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
10828 _("expression too complex"));
10829 inst
.instruction
|= (val
& 0x1c) << 10;
10830 inst
.instruction
|= (val
& 0x03) << 6;
10837 if (!inst
.operands
[3].present
)
10841 inst
.instruction
&= ~0x00000020;
10843 /* PR 10168. Swap the Rm and Rn registers. */
10844 Rtmp
= inst
.operands
[1].reg
;
10845 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
10846 inst
.operands
[2].reg
= Rtmp
;
10854 if (inst
.operands
[0].immisreg
)
10855 reject_bad_reg (inst
.operands
[0].imm
);
10857 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
10861 do_t_push_pop (void)
10865 constraint (inst
.operands
[0].writeback
,
10866 _("push/pop do not support {reglist}^"));
10867 constraint (inst
.reloc
.type
!= BFD_RELOC_UNUSED
,
10868 _("expression too complex"));
10870 mask
= inst
.operands
[0].imm
;
10871 if ((mask
& ~0xff) == 0)
10872 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
10873 else if ((inst
.instruction
== T_MNEM_push
10874 && (mask
& ~0xff) == 1 << REG_LR
)
10875 || (inst
.instruction
== T_MNEM_pop
10876 && (mask
& ~0xff) == 1 << REG_PC
))
10878 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10879 inst
.instruction
|= THUMB_PP_PC_LR
;
10880 inst
.instruction
|= mask
& 0xff;
10882 else if (unified_syntax
)
10884 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10885 encode_thumb2_ldmstm (13, mask
, TRUE
);
10889 inst
.error
= _("invalid register list to push/pop instruction");
10899 Rd
= inst
.operands
[0].reg
;
10900 Rm
= inst
.operands
[1].reg
;
10902 reject_bad_reg (Rd
);
10903 reject_bad_reg (Rm
);
10905 inst
.instruction
|= Rd
<< 8;
10906 inst
.instruction
|= Rm
<< 16;
10907 inst
.instruction
|= Rm
;
10915 Rd
= inst
.operands
[0].reg
;
10916 Rm
= inst
.operands
[1].reg
;
10918 reject_bad_reg (Rd
);
10919 reject_bad_reg (Rm
);
10921 if (Rd
<= 7 && Rm
<= 7
10922 && inst
.size_req
!= 4)
10924 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
10925 inst
.instruction
|= Rd
;
10926 inst
.instruction
|= Rm
<< 3;
10928 else if (unified_syntax
)
10930 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10931 inst
.instruction
|= Rd
<< 8;
10932 inst
.instruction
|= Rm
<< 16;
10933 inst
.instruction
|= Rm
;
10936 inst
.error
= BAD_HIREG
;
10944 Rd
= inst
.operands
[0].reg
;
10945 Rm
= inst
.operands
[1].reg
;
10947 reject_bad_reg (Rd
);
10948 reject_bad_reg (Rm
);
10950 inst
.instruction
|= Rd
<< 8;
10951 inst
.instruction
|= Rm
;
10959 Rd
= inst
.operands
[0].reg
;
10960 Rs
= (inst
.operands
[1].present
10961 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10962 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10964 reject_bad_reg (Rd
);
10965 reject_bad_reg (Rs
);
10966 if (inst
.operands
[2].isreg
)
10967 reject_bad_reg (inst
.operands
[2].reg
);
10969 inst
.instruction
|= Rd
<< 8;
10970 inst
.instruction
|= Rs
<< 16;
10971 if (!inst
.operands
[2].isreg
)
10973 bfd_boolean narrow
;
10975 if ((inst
.instruction
& 0x00100000) != 0)
10976 narrow
= !in_it_block ();
10978 narrow
= in_it_block ();
10980 if (Rd
> 7 || Rs
> 7)
10983 if (inst
.size_req
== 4 || !unified_syntax
)
10986 if (inst
.reloc
.exp
.X_op
!= O_constant
10987 || inst
.reloc
.exp
.X_add_number
!= 0)
10990 /* Turn rsb #0 into 16-bit neg. We should probably do this via
10991 relaxation, but it doesn't seem worth the hassle. */
10994 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
10995 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
10996 inst
.instruction
|= Rs
<< 3;
10997 inst
.instruction
|= Rd
;
11001 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11002 inst
.reloc
.type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11006 encode_thumb32_shifted_operand (2);
11012 set_it_insn_type (OUTSIDE_IT_INSN
);
11013 if (inst
.operands
[0].imm
)
11014 inst
.instruction
|= 0x8;
11020 if (!inst
.operands
[1].present
)
11021 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
11023 if (unified_syntax
)
11025 bfd_boolean narrow
;
11028 switch (inst
.instruction
)
11031 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
11033 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
11035 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
11037 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
11041 if (THUMB_SETS_FLAGS (inst
.instruction
))
11042 narrow
= !in_it_block ();
11044 narrow
= in_it_block ();
11045 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
11047 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
11049 if (inst
.operands
[2].isreg
11050 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
11051 || inst
.operands
[2].reg
> 7))
11053 if (inst
.size_req
== 4)
11056 reject_bad_reg (inst
.operands
[0].reg
);
11057 reject_bad_reg (inst
.operands
[1].reg
);
11061 if (inst
.operands
[2].isreg
)
11063 reject_bad_reg (inst
.operands
[2].reg
);
11064 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11065 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11066 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11067 inst
.instruction
|= inst
.operands
[2].reg
;
11071 inst
.operands
[1].shifted
= 1;
11072 inst
.operands
[1].shift_kind
= shift_kind
;
11073 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
11074 ? T_MNEM_movs
: T_MNEM_mov
);
11075 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11076 encode_thumb32_shifted_operand (1);
11077 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11078 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11083 if (inst
.operands
[2].isreg
)
11085 switch (shift_kind
)
11087 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
11088 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
11089 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
11090 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
11094 inst
.instruction
|= inst
.operands
[0].reg
;
11095 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
11099 switch (shift_kind
)
11101 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11102 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11103 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11106 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11107 inst
.instruction
|= inst
.operands
[0].reg
;
11108 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11114 constraint (inst
.operands
[0].reg
> 7
11115 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
11116 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11118 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
11120 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
11121 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
11122 _("source1 and dest must be same register"));
11124 switch (inst
.instruction
)
11126 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
11127 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
11128 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
11129 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
11133 inst
.instruction
|= inst
.operands
[0].reg
;
11134 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
11138 switch (inst
.instruction
)
11140 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
11141 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
11142 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
11143 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
11146 inst
.reloc
.type
= BFD_RELOC_ARM_THUMB_SHIFT
;
11147 inst
.instruction
|= inst
.operands
[0].reg
;
11148 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11156 unsigned Rd
, Rn
, Rm
;
11158 Rd
= inst
.operands
[0].reg
;
11159 Rn
= inst
.operands
[1].reg
;
11160 Rm
= inst
.operands
[2].reg
;
11162 reject_bad_reg (Rd
);
11163 reject_bad_reg (Rn
);
11164 reject_bad_reg (Rm
);
11166 inst
.instruction
|= Rd
<< 8;
11167 inst
.instruction
|= Rn
<< 16;
11168 inst
.instruction
|= Rm
;
11174 unsigned Rd
, Rn
, Rm
;
11176 Rd
= inst
.operands
[0].reg
;
11177 Rm
= inst
.operands
[1].reg
;
11178 Rn
= inst
.operands
[2].reg
;
11180 reject_bad_reg (Rd
);
11181 reject_bad_reg (Rn
);
11182 reject_bad_reg (Rm
);
11184 inst
.instruction
|= Rd
<< 8;
11185 inst
.instruction
|= Rn
<< 16;
11186 inst
.instruction
|= Rm
;
11192 unsigned int value
= inst
.reloc
.exp
.X_add_number
;
11193 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11194 _("expression too complex"));
11195 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11196 inst
.instruction
|= (value
& 0xf000) >> 12;
11197 inst
.instruction
|= (value
& 0x0ff0);
11198 inst
.instruction
|= (value
& 0x000f) << 16;
11202 do_t_ssat_usat (int bias
)
11206 Rd
= inst
.operands
[0].reg
;
11207 Rn
= inst
.operands
[2].reg
;
11209 reject_bad_reg (Rd
);
11210 reject_bad_reg (Rn
);
11212 inst
.instruction
|= Rd
<< 8;
11213 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
11214 inst
.instruction
|= Rn
<< 16;
11216 if (inst
.operands
[3].present
)
11218 offsetT shift_amount
= inst
.reloc
.exp
.X_add_number
;
11220 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
11222 constraint (inst
.reloc
.exp
.X_op
!= O_constant
,
11223 _("expression too complex"));
11225 if (shift_amount
!= 0)
11227 constraint (shift_amount
> 31,
11228 _("shift expression is too large"));
11230 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
11231 inst
.instruction
|= 0x00200000; /* sh bit. */
11233 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
11234 inst
.instruction
|= (shift_amount
& 0x03) << 6;
11242 do_t_ssat_usat (1);
11250 Rd
= inst
.operands
[0].reg
;
11251 Rn
= inst
.operands
[2].reg
;
11253 reject_bad_reg (Rd
);
11254 reject_bad_reg (Rn
);
11256 inst
.instruction
|= Rd
<< 8;
11257 inst
.instruction
|= inst
.operands
[1].imm
- 1;
11258 inst
.instruction
|= Rn
<< 16;
11264 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
11265 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
11266 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
11267 || inst
.operands
[2].negative
,
11270 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
11272 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11273 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11274 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11275 inst
.reloc
.type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11281 if (!inst
.operands
[2].present
)
11282 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
11284 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
11285 || inst
.operands
[0].reg
== inst
.operands
[2].reg
11286 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
11289 inst
.instruction
|= inst
.operands
[0].reg
;
11290 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
11291 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
11292 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
11298 unsigned Rd
, Rn
, Rm
;
11300 Rd
= inst
.operands
[0].reg
;
11301 Rn
= inst
.operands
[1].reg
;
11302 Rm
= inst
.operands
[2].reg
;
11304 reject_bad_reg (Rd
);
11305 reject_bad_reg (Rn
);
11306 reject_bad_reg (Rm
);
11308 inst
.instruction
|= Rd
<< 8;
11309 inst
.instruction
|= Rn
<< 16;
11310 inst
.instruction
|= Rm
;
11311 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
11319 Rd
= inst
.operands
[0].reg
;
11320 Rm
= inst
.operands
[1].reg
;
11322 reject_bad_reg (Rd
);
11323 reject_bad_reg (Rm
);
11325 if (inst
.instruction
<= 0xffff
11326 && inst
.size_req
!= 4
11327 && Rd
<= 7 && Rm
<= 7
11328 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
11330 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11331 inst
.instruction
|= Rd
;
11332 inst
.instruction
|= Rm
<< 3;
11334 else if (unified_syntax
)
11336 if (inst
.instruction
<= 0xffff)
11337 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11338 inst
.instruction
|= Rd
<< 8;
11339 inst
.instruction
|= Rm
;
11340 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
11344 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
11345 _("Thumb encoding does not support rotation"));
11346 constraint (1, BAD_HIREG
);
11353 inst
.reloc
.type
= BFD_RELOC_ARM_SWI
;
11362 half
= (inst
.instruction
& 0x10) != 0;
11363 set_it_insn_type_last ();
11364 constraint (inst
.operands
[0].immisreg
,
11365 _("instruction requires register index"));
11367 Rn
= inst
.operands
[0].reg
;
11368 Rm
= inst
.operands
[0].imm
;
11370 constraint (Rn
== REG_SP
, BAD_SP
);
11371 reject_bad_reg (Rm
);
11373 constraint (!half
&& inst
.operands
[0].shifted
,
11374 _("instruction does not allow shifted index"));
11375 inst
.instruction
|= (Rn
<< 16) | Rm
;
11381 do_t_ssat_usat (0);
11389 Rd
= inst
.operands
[0].reg
;
11390 Rn
= inst
.operands
[2].reg
;
11392 reject_bad_reg (Rd
);
11393 reject_bad_reg (Rn
);
11395 inst
.instruction
|= Rd
<< 8;
11396 inst
.instruction
|= inst
.operands
[1].imm
;
11397 inst
.instruction
|= Rn
<< 16;
11400 /* Neon instruction encoder helpers. */
11402 /* Encodings for the different types for various Neon opcodes. */
11404 /* An "invalid" code for the following tables. */
11407 struct neon_tab_entry
11410 unsigned float_or_poly
;
11411 unsigned scalar_or_imm
;
11414 /* Map overloaded Neon opcodes to their respective encodings. */
11415 #define NEON_ENC_TAB \
11416 X(vabd, 0x0000700, 0x1200d00, N_INV), \
11417 X(vmax, 0x0000600, 0x0000f00, N_INV), \
11418 X(vmin, 0x0000610, 0x0200f00, N_INV), \
11419 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
11420 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
11421 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
11422 X(vadd, 0x0000800, 0x0000d00, N_INV), \
11423 X(vsub, 0x1000800, 0x0200d00, N_INV), \
11424 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
11425 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
11426 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
11427 /* Register variants of the following two instructions are encoded as
11428 vcge / vcgt with the operands reversed. */ \
11429 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
11430 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
11431 X(vfma, N_INV, 0x0000c10, N_INV), \
11432 X(vfms, N_INV, 0x0200c10, N_INV), \
11433 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
11434 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
11435 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
11436 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
11437 X(vmlal, 0x0800800, N_INV, 0x0800240), \
11438 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
11439 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
11440 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
11441 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
11442 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
11443 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
11444 X(vshl, 0x0000400, N_INV, 0x0800510), \
11445 X(vqshl, 0x0000410, N_INV, 0x0800710), \
11446 X(vand, 0x0000110, N_INV, 0x0800030), \
11447 X(vbic, 0x0100110, N_INV, 0x0800030), \
11448 X(veor, 0x1000110, N_INV, N_INV), \
11449 X(vorn, 0x0300110, N_INV, 0x0800010), \
11450 X(vorr, 0x0200110, N_INV, 0x0800010), \
11451 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
11452 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
11453 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
11454 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
11455 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
11456 X(vst1, 0x0000000, 0x0800000, N_INV), \
11457 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
11458 X(vst2, 0x0000100, 0x0800100, N_INV), \
11459 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
11460 X(vst3, 0x0000200, 0x0800200, N_INV), \
11461 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
11462 X(vst4, 0x0000300, 0x0800300, N_INV), \
11463 X(vmovn, 0x1b20200, N_INV, N_INV), \
11464 X(vtrn, 0x1b20080, N_INV, N_INV), \
11465 X(vqmovn, 0x1b20200, N_INV, N_INV), \
11466 X(vqmovun, 0x1b20240, N_INV, N_INV), \
11467 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
11468 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
11469 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
11470 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
11471 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
11472 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
11473 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
11474 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
11475 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
11479 #define X(OPC,I,F,S) N_MNEM_##OPC
11484 static const struct neon_tab_entry neon_enc_tab
[] =
11486 #define X(OPC,I,F,S) { (I), (F), (S) }
11491 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
11492 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11493 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11494 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11495 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11496 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11497 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11498 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
11499 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
11500 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
11501 #define NEON_ENC_SINGLE_(X) \
11502 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
11503 #define NEON_ENC_DOUBLE_(X) \
11504 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
11506 #define NEON_ENCODE(type, inst) \
11509 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
11510 inst.is_neon = 1; \
11514 #define check_neon_suffixes \
11517 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
11519 as_bad (_("invalid neon suffix for non neon instruction")); \
11525 /* Define shapes for instruction operands. The following mnemonic characters
11526 are used in this table:
11528 F - VFP S<n> register
11529 D - Neon D<n> register
11530 Q - Neon Q<n> register
11534 L - D<n> register list
11536 This table is used to generate various data:
11537 - enumerations of the form NS_DDR to be used as arguments to
11539 - a table classifying shapes into single, double, quad, mixed.
11540 - a table used to drive neon_select_shape. */
11542 #define NEON_SHAPE_DEF \
11543 X(3, (D, D, D), DOUBLE), \
11544 X(3, (Q, Q, Q), QUAD), \
11545 X(3, (D, D, I), DOUBLE), \
11546 X(3, (Q, Q, I), QUAD), \
11547 X(3, (D, D, S), DOUBLE), \
11548 X(3, (Q, Q, S), QUAD), \
11549 X(2, (D, D), DOUBLE), \
11550 X(2, (Q, Q), QUAD), \
11551 X(2, (D, S), DOUBLE), \
11552 X(2, (Q, S), QUAD), \
11553 X(2, (D, R), DOUBLE), \
11554 X(2, (Q, R), QUAD), \
11555 X(2, (D, I), DOUBLE), \
11556 X(2, (Q, I), QUAD), \
11557 X(3, (D, L, D), DOUBLE), \
11558 X(2, (D, Q), MIXED), \
11559 X(2, (Q, D), MIXED), \
11560 X(3, (D, Q, I), MIXED), \
11561 X(3, (Q, D, I), MIXED), \
11562 X(3, (Q, D, D), MIXED), \
11563 X(3, (D, Q, Q), MIXED), \
11564 X(3, (Q, Q, D), MIXED), \
11565 X(3, (Q, D, S), MIXED), \
11566 X(3, (D, Q, S), MIXED), \
11567 X(4, (D, D, D, I), DOUBLE), \
11568 X(4, (Q, Q, Q, I), QUAD), \
11569 X(2, (F, F), SINGLE), \
11570 X(3, (F, F, F), SINGLE), \
11571 X(2, (F, I), SINGLE), \
11572 X(2, (F, D), MIXED), \
11573 X(2, (D, F), MIXED), \
11574 X(3, (F, F, I), MIXED), \
11575 X(4, (R, R, F, F), SINGLE), \
11576 X(4, (F, F, R, R), SINGLE), \
11577 X(3, (D, R, R), DOUBLE), \
11578 X(3, (R, R, D), DOUBLE), \
11579 X(2, (S, R), SINGLE), \
11580 X(2, (R, S), SINGLE), \
11581 X(2, (F, R), SINGLE), \
11582 X(2, (R, F), SINGLE)
11584 #define S2(A,B) NS_##A##B
11585 #define S3(A,B,C) NS_##A##B##C
11586 #define S4(A,B,C,D) NS_##A##B##C##D
11588 #define X(N, L, C) S##N L
11601 enum neon_shape_class
11609 #define X(N, L, C) SC_##C
11611 static enum neon_shape_class neon_shape_class
[] =
11629 /* Register widths of above. */
11630 static unsigned neon_shape_el_size
[] =
11641 struct neon_shape_info
11644 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
11647 #define S2(A,B) { SE_##A, SE_##B }
11648 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
11649 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
11651 #define X(N, L, C) { N, S##N L }
11653 static struct neon_shape_info neon_shape_tab
[] =
11663 /* Bit masks used in type checking given instructions.
11664 'N_EQK' means the type must be the same as (or based on in some way) the key
11665 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
11666 set, various other bits can be set as well in order to modify the meaning of
11667 the type constraint. */
11669 enum neon_type_mask
11692 N_KEY
= 0x1000000, /* Key element (main type specifier). */
11693 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
11694 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
11695 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
11696 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
11697 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
11698 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
11699 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
11700 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
11701 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
11703 N_MAX_NONSPECIAL
= N_F64
11706 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
11708 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
11709 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
11710 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
11711 #define N_SUF_32 (N_SU_32 | N_F32)
11712 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
11713 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
11715 /* Pass this as the first type argument to neon_check_type to ignore types
11717 #define N_IGNORE_TYPE (N_KEY | N_EQK)
11719 /* Select a "shape" for the current instruction (describing register types or
11720 sizes) from a list of alternatives. Return NS_NULL if the current instruction
11721 doesn't fit. For non-polymorphic shapes, checking is usually done as a
11722 function of operand parsing, so this function doesn't need to be called.
11723 Shapes should be listed in order of decreasing length. */
11725 static enum neon_shape
11726 neon_select_shape (enum neon_shape shape
, ...)
11729 enum neon_shape first_shape
= shape
;
11731 /* Fix missing optional operands. FIXME: we don't know at this point how
11732 many arguments we should have, so this makes the assumption that we have
11733 > 1. This is true of all current Neon opcodes, I think, but may not be
11734 true in the future. */
11735 if (!inst
.operands
[1].present
)
11736 inst
.operands
[1] = inst
.operands
[0];
11738 va_start (ap
, shape
);
11740 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
11745 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
11747 if (!inst
.operands
[j
].present
)
11753 switch (neon_shape_tab
[shape
].el
[j
])
11756 if (!(inst
.operands
[j
].isreg
11757 && inst
.operands
[j
].isvec
11758 && inst
.operands
[j
].issingle
11759 && !inst
.operands
[j
].isquad
))
11764 if (!(inst
.operands
[j
].isreg
11765 && inst
.operands
[j
].isvec
11766 && !inst
.operands
[j
].isquad
11767 && !inst
.operands
[j
].issingle
))
11772 if (!(inst
.operands
[j
].isreg
11773 && !inst
.operands
[j
].isvec
))
11778 if (!(inst
.operands
[j
].isreg
11779 && inst
.operands
[j
].isvec
11780 && inst
.operands
[j
].isquad
11781 && !inst
.operands
[j
].issingle
))
11786 if (!(!inst
.operands
[j
].isreg
11787 && !inst
.operands
[j
].isscalar
))
11792 if (!(!inst
.operands
[j
].isreg
11793 && inst
.operands
[j
].isscalar
))
11809 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
11810 first_error (_("invalid instruction shape"));
11815 /* True if SHAPE is predominantly a quadword operation (most of the time, this
11816 means the Q bit should be set). */
11819 neon_quad (enum neon_shape shape
)
11821 return neon_shape_class
[shape
] == SC_QUAD
;
11825 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
11828 /* Allow modification to be made to types which are constrained to be
11829 based on the key element, based on bits set alongside N_EQK. */
11830 if ((typebits
& N_EQK
) != 0)
11832 if ((typebits
& N_HLF
) != 0)
11834 else if ((typebits
& N_DBL
) != 0)
11836 if ((typebits
& N_SGN
) != 0)
11837 *g_type
= NT_signed
;
11838 else if ((typebits
& N_UNS
) != 0)
11839 *g_type
= NT_unsigned
;
11840 else if ((typebits
& N_INT
) != 0)
11841 *g_type
= NT_integer
;
11842 else if ((typebits
& N_FLT
) != 0)
11843 *g_type
= NT_float
;
11844 else if ((typebits
& N_SIZ
) != 0)
11845 *g_type
= NT_untyped
;
11849 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
11850 operand type, i.e. the single type specified in a Neon instruction when it
11851 is the only one given. */
11853 static struct neon_type_el
11854 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
11856 struct neon_type_el dest
= *key
;
11858 gas_assert ((thisarg
& N_EQK
) != 0);
11860 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
11865 /* Convert Neon type and size into compact bitmask representation. */
11867 static enum neon_type_mask
11868 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
11875 case 8: return N_8
;
11876 case 16: return N_16
;
11877 case 32: return N_32
;
11878 case 64: return N_64
;
11886 case 8: return N_I8
;
11887 case 16: return N_I16
;
11888 case 32: return N_I32
;
11889 case 64: return N_I64
;
11897 case 16: return N_F16
;
11898 case 32: return N_F32
;
11899 case 64: return N_F64
;
11907 case 8: return N_P8
;
11908 case 16: return N_P16
;
11916 case 8: return N_S8
;
11917 case 16: return N_S16
;
11918 case 32: return N_S32
;
11919 case 64: return N_S64
;
11927 case 8: return N_U8
;
11928 case 16: return N_U16
;
11929 case 32: return N_U32
;
11930 case 64: return N_U64
;
11941 /* Convert compact Neon bitmask type representation to a type and size. Only
11942 handles the case where a single bit is set in the mask. */
11945 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
11946 enum neon_type_mask mask
)
11948 if ((mask
& N_EQK
) != 0)
11951 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
11953 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_P16
)) != 0)
11955 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
11957 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
)) != 0)
11962 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
11964 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
11965 *type
= NT_unsigned
;
11966 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
11967 *type
= NT_integer
;
11968 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
11969 *type
= NT_untyped
;
11970 else if ((mask
& (N_P8
| N_P16
)) != 0)
11972 else if ((mask
& (N_F32
| N_F64
)) != 0)
11980 /* Modify a bitmask of allowed types. This is only needed for type
11984 modify_types_allowed (unsigned allowed
, unsigned mods
)
11987 enum neon_el_type type
;
11993 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
11995 if (el_type_of_type_chk (&type
, &size
,
11996 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
11998 neon_modify_type_size (mods
, &type
, &size
);
11999 destmask
|= type_chk_of_el_type (type
, size
);
12006 /* Check type and return type classification.
12007 The manual states (paraphrase): If one datatype is given, it indicates the
12009 - the second operand, if there is one
12010 - the operand, if there is no second operand
12011 - the result, if there are no operands.
12012 This isn't quite good enough though, so we use a concept of a "key" datatype
12013 which is set on a per-instruction basis, which is the one which matters when
12014 only one data type is written.
12015 Note: this function has side-effects (e.g. filling in missing operands). All
12016 Neon instructions should call it before performing bit encoding. */
12018 static struct neon_type_el
12019 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
12022 unsigned i
, pass
, key_el
= 0;
12023 unsigned types
[NEON_MAX_TYPE_ELS
];
12024 enum neon_el_type k_type
= NT_invtype
;
12025 unsigned k_size
= -1u;
12026 struct neon_type_el badtype
= {NT_invtype
, -1};
12027 unsigned key_allowed
= 0;
12029 /* Optional registers in Neon instructions are always (not) in operand 1.
12030 Fill in the missing operand here, if it was omitted. */
12031 if (els
> 1 && !inst
.operands
[1].present
)
12032 inst
.operands
[1] = inst
.operands
[0];
12034 /* Suck up all the varargs. */
12036 for (i
= 0; i
< els
; i
++)
12038 unsigned thisarg
= va_arg (ap
, unsigned);
12039 if (thisarg
== N_IGNORE_TYPE
)
12044 types
[i
] = thisarg
;
12045 if ((thisarg
& N_KEY
) != 0)
12050 if (inst
.vectype
.elems
> 0)
12051 for (i
= 0; i
< els
; i
++)
12052 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
12054 first_error (_("types specified in both the mnemonic and operands"));
12058 /* Duplicate inst.vectype elements here as necessary.
12059 FIXME: No idea if this is exactly the same as the ARM assembler,
12060 particularly when an insn takes one register and one non-register
12062 if (inst
.vectype
.elems
== 1 && els
> 1)
12065 inst
.vectype
.elems
= els
;
12066 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
12067 for (j
= 0; j
< els
; j
++)
12069 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
12072 else if (inst
.vectype
.elems
== 0 && els
> 0)
12075 /* No types were given after the mnemonic, so look for types specified
12076 after each operand. We allow some flexibility here; as long as the
12077 "key" operand has a type, we can infer the others. */
12078 for (j
= 0; j
< els
; j
++)
12079 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
12080 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
12082 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
12084 for (j
= 0; j
< els
; j
++)
12085 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
12086 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
12091 first_error (_("operand types can't be inferred"));
12095 else if (inst
.vectype
.elems
!= els
)
12097 first_error (_("type specifier has the wrong number of parts"));
12101 for (pass
= 0; pass
< 2; pass
++)
12103 for (i
= 0; i
< els
; i
++)
12105 unsigned thisarg
= types
[i
];
12106 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
12107 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
12108 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
12109 unsigned g_size
= inst
.vectype
.el
[i
].size
;
12111 /* Decay more-specific signed & unsigned types to sign-insensitive
12112 integer types if sign-specific variants are unavailable. */
12113 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
12114 && (types_allowed
& N_SU_ALL
) == 0)
12115 g_type
= NT_integer
;
12117 /* If only untyped args are allowed, decay any more specific types to
12118 them. Some instructions only care about signs for some element
12119 sizes, so handle that properly. */
12120 if ((g_size
== 8 && (types_allowed
& N_8
) != 0)
12121 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
12122 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
12123 || (g_size
== 64 && (types_allowed
& N_64
) != 0))
12124 g_type
= NT_untyped
;
12128 if ((thisarg
& N_KEY
) != 0)
12132 key_allowed
= thisarg
& ~N_KEY
;
12137 if ((thisarg
& N_VFP
) != 0)
12139 enum neon_shape_el regshape
;
12140 unsigned regwidth
, match
;
12142 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
12145 first_error (_("invalid instruction shape"));
12148 regshape
= neon_shape_tab
[ns
].el
[i
];
12149 regwidth
= neon_shape_el_size
[regshape
];
12151 /* In VFP mode, operands must match register widths. If we
12152 have a key operand, use its width, else use the width of
12153 the current operand. */
12159 if (regwidth
!= match
)
12161 first_error (_("operand size must match register width"));
12166 if ((thisarg
& N_EQK
) == 0)
12168 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
12170 if ((given_type
& types_allowed
) == 0)
12172 first_error (_("bad type in Neon instruction"));
12178 enum neon_el_type mod_k_type
= k_type
;
12179 unsigned mod_k_size
= k_size
;
12180 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
12181 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
12183 first_error (_("inconsistent types in Neon instruction"));
12191 return inst
.vectype
.el
[key_el
];
12194 /* Neon-style VFP instruction forwarding. */
12196 /* Thumb VFP instructions have 0xE in the condition field. */
12199 do_vfp_cond_or_thumb (void)
12204 inst
.instruction
|= 0xe0000000;
12206 inst
.instruction
|= inst
.cond
<< 28;
12209 /* Look up and encode a simple mnemonic, for use as a helper function for the
12210 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12211 etc. It is assumed that operand parsing has already been done, and that the
12212 operands are in the form expected by the given opcode (this isn't necessarily
12213 the same as the form in which they were parsed, hence some massaging must
12214 take place before this function is called).
12215 Checks current arch version against that in the looked-up opcode. */
12218 do_vfp_nsyn_opcode (const char *opname
)
12220 const struct asm_opcode
*opcode
;
12222 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
12227 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
12228 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
12235 inst
.instruction
= opcode
->tvalue
;
12236 opcode
->tencode ();
12240 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
12241 opcode
->aencode ();
12246 do_vfp_nsyn_add_sub (enum neon_shape rs
)
12248 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
12253 do_vfp_nsyn_opcode ("fadds");
12255 do_vfp_nsyn_opcode ("fsubs");
12260 do_vfp_nsyn_opcode ("faddd");
12262 do_vfp_nsyn_opcode ("fsubd");
12266 /* Check operand types to see if this is a VFP instruction, and if so call
12270 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
12272 enum neon_shape rs
;
12273 struct neon_type_el et
;
12278 rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12279 et
= neon_check_type (2, rs
,
12280 N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12284 rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12285 et
= neon_check_type (3, rs
,
12286 N_EQK
| N_VFP
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12293 if (et
.type
!= NT_invtype
)
12304 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
12306 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
12311 do_vfp_nsyn_opcode ("fmacs");
12313 do_vfp_nsyn_opcode ("fnmacs");
12318 do_vfp_nsyn_opcode ("fmacd");
12320 do_vfp_nsyn_opcode ("fnmacd");
12325 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
12327 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
12332 do_vfp_nsyn_opcode ("ffmas");
12334 do_vfp_nsyn_opcode ("ffnmas");
12339 do_vfp_nsyn_opcode ("ffmad");
12341 do_vfp_nsyn_opcode ("ffnmad");
12346 do_vfp_nsyn_mul (enum neon_shape rs
)
12349 do_vfp_nsyn_opcode ("fmuls");
12351 do_vfp_nsyn_opcode ("fmuld");
12355 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
12357 int is_neg
= (inst
.instruction
& 0x80) != 0;
12358 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_VFP
| N_KEY
);
12363 do_vfp_nsyn_opcode ("fnegs");
12365 do_vfp_nsyn_opcode ("fabss");
12370 do_vfp_nsyn_opcode ("fnegd");
12372 do_vfp_nsyn_opcode ("fabsd");
12376 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
12377 insns belong to Neon, and are handled elsewhere. */
12380 do_vfp_nsyn_ldm_stm (int is_dbmode
)
12382 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
12386 do_vfp_nsyn_opcode ("fldmdbs");
12388 do_vfp_nsyn_opcode ("fldmias");
12393 do_vfp_nsyn_opcode ("fstmdbs");
12395 do_vfp_nsyn_opcode ("fstmias");
12400 do_vfp_nsyn_sqrt (void)
12402 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12403 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12406 do_vfp_nsyn_opcode ("fsqrts");
12408 do_vfp_nsyn_opcode ("fsqrtd");
12412 do_vfp_nsyn_div (void)
12414 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12415 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12416 N_F32
| N_F64
| N_KEY
| N_VFP
);
12419 do_vfp_nsyn_opcode ("fdivs");
12421 do_vfp_nsyn_opcode ("fdivd");
12425 do_vfp_nsyn_nmul (void)
12427 enum neon_shape rs
= neon_select_shape (NS_FFF
, NS_DDD
, NS_NULL
);
12428 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
12429 N_F32
| N_F64
| N_KEY
| N_VFP
);
12433 NEON_ENCODE (SINGLE
, inst
);
12434 do_vfp_sp_dyadic ();
12438 NEON_ENCODE (DOUBLE
, inst
);
12439 do_vfp_dp_rd_rn_rm ();
12441 do_vfp_cond_or_thumb ();
12445 do_vfp_nsyn_cmp (void)
12447 if (inst
.operands
[1].isreg
)
12449 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_DD
, NS_NULL
);
12450 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F32
| N_F64
| N_KEY
| N_VFP
);
12454 NEON_ENCODE (SINGLE
, inst
);
12455 do_vfp_sp_monadic ();
12459 NEON_ENCODE (DOUBLE
, inst
);
12460 do_vfp_dp_rd_rm ();
12465 enum neon_shape rs
= neon_select_shape (NS_FI
, NS_DI
, NS_NULL
);
12466 neon_check_type (2, rs
, N_F32
| N_F64
| N_KEY
| N_VFP
, N_EQK
);
12468 switch (inst
.instruction
& 0x0fffffff)
12471 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
12474 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
12482 NEON_ENCODE (SINGLE
, inst
);
12483 do_vfp_sp_compare_z ();
12487 NEON_ENCODE (DOUBLE
, inst
);
12491 do_vfp_cond_or_thumb ();
12495 nsyn_insert_sp (void)
12497 inst
.operands
[1] = inst
.operands
[0];
12498 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
12499 inst
.operands
[0].reg
= REG_SP
;
12500 inst
.operands
[0].isreg
= 1;
12501 inst
.operands
[0].writeback
= 1;
12502 inst
.operands
[0].present
= 1;
12506 do_vfp_nsyn_push (void)
12509 if (inst
.operands
[1].issingle
)
12510 do_vfp_nsyn_opcode ("fstmdbs");
12512 do_vfp_nsyn_opcode ("fstmdbd");
12516 do_vfp_nsyn_pop (void)
12519 if (inst
.operands
[1].issingle
)
12520 do_vfp_nsyn_opcode ("fldmias");
12522 do_vfp_nsyn_opcode ("fldmiad");
12525 /* Fix up Neon data-processing instructions, ORing in the correct bits for
12526 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
12529 neon_dp_fixup (struct arm_it
* insn
)
12531 unsigned int i
= insn
->instruction
;
12536 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
12547 insn
->instruction
= i
;
12550 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
12554 neon_logbits (unsigned x
)
12556 return ffs (x
) - 4;
12559 #define LOW4(R) ((R) & 0xf)
12560 #define HI1(R) (((R) >> 4) & 1)
12562 /* Encode insns with bit pattern:
12564 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12565 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
12567 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
12568 different meaning for some instruction. */
12571 neon_three_same (int isquad
, int ubit
, int size
)
12573 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12574 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12575 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
12576 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
12577 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
12578 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
12579 inst
.instruction
|= (isquad
!= 0) << 6;
12580 inst
.instruction
|= (ubit
!= 0) << 24;
12582 inst
.instruction
|= neon_logbits (size
) << 20;
12584 neon_dp_fixup (&inst
);
12587 /* Encode instructions of the form:
12589 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
12590 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
12592 Don't write size if SIZE == -1. */
12595 neon_two_same (int qbit
, int ubit
, int size
)
12597 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12598 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12599 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12600 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12601 inst
.instruction
|= (qbit
!= 0) << 6;
12602 inst
.instruction
|= (ubit
!= 0) << 24;
12605 inst
.instruction
|= neon_logbits (size
) << 18;
12607 neon_dp_fixup (&inst
);
12610 /* Neon instruction encoders, in approximate order of appearance. */
12613 do_neon_dyadic_i_su (void)
12615 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12616 struct neon_type_el et
= neon_check_type (3, rs
,
12617 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
12618 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12622 do_neon_dyadic_i64_su (void)
12624 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12625 struct neon_type_el et
= neon_check_type (3, rs
,
12626 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12627 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12631 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
12634 unsigned size
= et
.size
>> 3;
12635 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
12636 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
12637 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
12638 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
12639 inst
.instruction
|= (isquad
!= 0) << 6;
12640 inst
.instruction
|= immbits
<< 16;
12641 inst
.instruction
|= (size
>> 3) << 7;
12642 inst
.instruction
|= (size
& 0x7) << 19;
12644 inst
.instruction
|= (uval
!= 0) << 24;
12646 neon_dp_fixup (&inst
);
12650 do_neon_shl_imm (void)
12652 if (!inst
.operands
[2].isreg
)
12654 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12655 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
12656 NEON_ENCODE (IMMED
, inst
);
12657 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, inst
.operands
[2].imm
);
12661 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12662 struct neon_type_el et
= neon_check_type (3, rs
,
12663 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12666 /* VSHL/VQSHL 3-register variants have syntax such as:
12668 whereas other 3-register operations encoded by neon_three_same have
12671 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
12673 tmp
= inst
.operands
[2].reg
;
12674 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12675 inst
.operands
[1].reg
= tmp
;
12676 NEON_ENCODE (INTEGER
, inst
);
12677 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12682 do_neon_qshl_imm (void)
12684 if (!inst
.operands
[2].isreg
)
12686 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
12687 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
12689 NEON_ENCODE (IMMED
, inst
);
12690 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
12691 inst
.operands
[2].imm
);
12695 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12696 struct neon_type_el et
= neon_check_type (3, rs
,
12697 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
12700 /* See note in do_neon_shl_imm. */
12701 tmp
= inst
.operands
[2].reg
;
12702 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12703 inst
.operands
[1].reg
= tmp
;
12704 NEON_ENCODE (INTEGER
, inst
);
12705 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12710 do_neon_rshl (void)
12712 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12713 struct neon_type_el et
= neon_check_type (3, rs
,
12714 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
12717 tmp
= inst
.operands
[2].reg
;
12718 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
12719 inst
.operands
[1].reg
= tmp
;
12720 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
12724 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
12726 /* Handle .I8 pseudo-instructions. */
12729 /* Unfortunately, this will make everything apart from zero out-of-range.
12730 FIXME is this the intended semantics? There doesn't seem much point in
12731 accepting .I8 if so. */
12732 immediate
|= immediate
<< 8;
12738 if (immediate
== (immediate
& 0x000000ff))
12740 *immbits
= immediate
;
12743 else if (immediate
== (immediate
& 0x0000ff00))
12745 *immbits
= immediate
>> 8;
12748 else if (immediate
== (immediate
& 0x00ff0000))
12750 *immbits
= immediate
>> 16;
12753 else if (immediate
== (immediate
& 0xff000000))
12755 *immbits
= immediate
>> 24;
12758 if ((immediate
& 0xffff) != (immediate
>> 16))
12759 goto bad_immediate
;
12760 immediate
&= 0xffff;
12763 if (immediate
== (immediate
& 0x000000ff))
12765 *immbits
= immediate
;
12768 else if (immediate
== (immediate
& 0x0000ff00))
12770 *immbits
= immediate
>> 8;
12775 first_error (_("immediate value out of range"));
12779 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
12783 neon_bits_same_in_bytes (unsigned imm
)
12785 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
12786 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
12787 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
12788 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
12791 /* For immediate of above form, return 0bABCD. */
12794 neon_squash_bits (unsigned imm
)
12796 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
12797 | ((imm
& 0x01000000) >> 21);
12800 /* Compress quarter-float representation to 0b...000 abcdefgh. */
12803 neon_qfloat_bits (unsigned imm
)
12805 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
12808 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
12809 the instruction. *OP is passed as the initial value of the op field, and
12810 may be set to a different value depending on the constant (i.e.
12811 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
12812 MVN). If the immediate looks like a repeated pattern then also
12813 try smaller element sizes. */
12816 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
12817 unsigned *immbits
, int *op
, int size
,
12818 enum neon_el_type type
)
12820 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
12822 if (type
== NT_float
&& !float_p
)
12825 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
12827 if (size
!= 32 || *op
== 1)
12829 *immbits
= neon_qfloat_bits (immlo
);
12835 if (neon_bits_same_in_bytes (immhi
)
12836 && neon_bits_same_in_bytes (immlo
))
12840 *immbits
= (neon_squash_bits (immhi
) << 4)
12841 | neon_squash_bits (immlo
);
12846 if (immhi
!= immlo
)
12852 if (immlo
== (immlo
& 0x000000ff))
12857 else if (immlo
== (immlo
& 0x0000ff00))
12859 *immbits
= immlo
>> 8;
12862 else if (immlo
== (immlo
& 0x00ff0000))
12864 *immbits
= immlo
>> 16;
12867 else if (immlo
== (immlo
& 0xff000000))
12869 *immbits
= immlo
>> 24;
12872 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
12874 *immbits
= (immlo
>> 8) & 0xff;
12877 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
12879 *immbits
= (immlo
>> 16) & 0xff;
12883 if ((immlo
& 0xffff) != (immlo
>> 16))
12890 if (immlo
== (immlo
& 0x000000ff))
12895 else if (immlo
== (immlo
& 0x0000ff00))
12897 *immbits
= immlo
>> 8;
12901 if ((immlo
& 0xff) != (immlo
>> 8))
12906 if (immlo
== (immlo
& 0x000000ff))
12908 /* Don't allow MVN with 8-bit immediate. */
12918 /* Write immediate bits [7:0] to the following locations:
12920 |28/24|23 19|18 16|15 4|3 0|
12921 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
12923 This function is used by VMOV/VMVN/VORR/VBIC. */
12926 neon_write_immbits (unsigned immbits
)
12928 inst
.instruction
|= immbits
& 0xf;
12929 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
12930 inst
.instruction
|= ((immbits
>> 7) & 0x1) << 24;
12933 /* Invert low-order SIZE bits of XHI:XLO. */
12936 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
12938 unsigned immlo
= xlo
? *xlo
: 0;
12939 unsigned immhi
= xhi
? *xhi
: 0;
12944 immlo
= (~immlo
) & 0xff;
12948 immlo
= (~immlo
) & 0xffff;
12952 immhi
= (~immhi
) & 0xffffffff;
12953 /* fall through. */
12956 immlo
= (~immlo
) & 0xffffffff;
12971 do_neon_logic (void)
12973 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
12975 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
12976 neon_check_type (3, rs
, N_IGNORE_TYPE
);
12977 /* U bit and size field were set as part of the bitmask. */
12978 NEON_ENCODE (INTEGER
, inst
);
12979 neon_three_same (neon_quad (rs
), 0, -1);
12983 const int three_ops_form
= (inst
.operands
[2].present
12984 && !inst
.operands
[2].isreg
);
12985 const int immoperand
= (three_ops_form
? 2 : 1);
12986 enum neon_shape rs
= (three_ops_form
12987 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
12988 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
12989 struct neon_type_el et
= neon_check_type (2, rs
,
12990 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
12991 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
12995 if (et
.type
== NT_invtype
)
12998 if (three_ops_form
)
12999 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13000 _("first and second operands shall be the same register"));
13002 NEON_ENCODE (IMMED
, inst
);
13004 immbits
= inst
.operands
[immoperand
].imm
;
13007 /* .i64 is a pseudo-op, so the immediate must be a repeating
13009 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
13010 inst
.operands
[immoperand
].reg
: 0))
13012 /* Set immbits to an invalid constant. */
13013 immbits
= 0xdeadbeef;
13020 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13024 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13028 /* Pseudo-instruction for VBIC. */
13029 neon_invert_size (&immbits
, 0, et
.size
);
13030 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13034 /* Pseudo-instruction for VORR. */
13035 neon_invert_size (&immbits
, 0, et
.size
);
13036 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
13046 inst
.instruction
|= neon_quad (rs
) << 6;
13047 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13048 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13049 inst
.instruction
|= cmode
<< 8;
13050 neon_write_immbits (immbits
);
13052 neon_dp_fixup (&inst
);
13057 do_neon_bitfield (void)
13059 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13060 neon_check_type (3, rs
, N_IGNORE_TYPE
);
13061 neon_three_same (neon_quad (rs
), 0, -1);
13065 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
13068 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13069 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
13071 if (et
.type
== NT_float
)
13073 NEON_ENCODE (FLOAT
, inst
);
13074 neon_three_same (neon_quad (rs
), 0, -1);
13078 NEON_ENCODE (INTEGER
, inst
);
13079 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
13084 do_neon_dyadic_if_su (void)
13086 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
13090 do_neon_dyadic_if_su_d (void)
13092 /* This version only allow D registers, but that constraint is enforced during
13093 operand parsing so we don't need to do anything extra here. */
13094 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
13098 do_neon_dyadic_if_i_d (void)
13100 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13101 affected if we specify unsigned args. */
13102 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13105 enum vfp_or_neon_is_neon_bits
13108 NEON_CHECK_ARCH
= 2
13111 /* Call this function if an instruction which may have belonged to the VFP or
13112 Neon instruction sets, but turned out to be a Neon instruction (due to the
13113 operand types involved, etc.). We have to check and/or fix-up a couple of
13116 - Make sure the user hasn't attempted to make a Neon instruction
13118 - Alter the value in the condition code field if necessary.
13119 - Make sure that the arch supports Neon instructions.
13121 Which of these operations take place depends on bits from enum
13122 vfp_or_neon_is_neon_bits.
13124 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13125 current instruction's condition is COND_ALWAYS, the condition field is
13126 changed to inst.uncond_value. This is necessary because instructions shared
13127 between VFP and Neon may be conditional for the VFP variants only, and the
13128 unconditional Neon version must have, e.g., 0xF in the condition field. */
13131 vfp_or_neon_is_neon (unsigned check
)
13133 /* Conditions are always legal in Thumb mode (IT blocks). */
13134 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
13136 if (inst
.cond
!= COND_ALWAYS
)
13138 first_error (_(BAD_COND
));
13141 if (inst
.uncond_value
!= -1)
13142 inst
.instruction
|= inst
.uncond_value
<< 28;
13145 if ((check
& NEON_CHECK_ARCH
)
13146 && !ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
13148 first_error (_(BAD_FPU
));
13156 do_neon_addsub_if_i (void)
13158 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
13161 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13164 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13165 affected if we specify unsigned args. */
13166 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
13169 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13171 V<op> A,B (A is operand 0, B is operand 2)
13176 so handle that case specially. */
13179 neon_exchange_operands (void)
13181 void *scratch
= alloca (sizeof (inst
.operands
[0]));
13182 if (inst
.operands
[1].present
)
13184 /* Swap operands[1] and operands[2]. */
13185 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
13186 inst
.operands
[1] = inst
.operands
[2];
13187 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
13191 inst
.operands
[1] = inst
.operands
[2];
13192 inst
.operands
[2] = inst
.operands
[0];
13197 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
13199 if (inst
.operands
[2].isreg
)
13202 neon_exchange_operands ();
13203 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
13207 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13208 struct neon_type_el et
= neon_check_type (2, rs
,
13209 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
13211 NEON_ENCODE (IMMED
, inst
);
13212 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13213 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13214 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13215 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13216 inst
.instruction
|= neon_quad (rs
) << 6;
13217 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13218 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13220 neon_dp_fixup (&inst
);
13227 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, FALSE
);
13231 do_neon_cmp_inv (void)
13233 neon_compare (N_SUF_32
, N_S8
| N_S16
| N_S32
| N_F32
, TRUE
);
13239 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
13242 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13243 scalars, which are encoded in 5 bits, M : Rm.
13244 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13245 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13249 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
13251 unsigned regno
= NEON_SCALAR_REG (scalar
);
13252 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
13257 if (regno
> 7 || elno
> 3)
13259 return regno
| (elno
<< 3);
13262 if (regno
> 15 || elno
> 1)
13264 return regno
| (elno
<< 4);
13268 first_error (_("scalar out of range for multiply instruction"));
13274 /* Encode multiply / multiply-accumulate scalar instructions. */
13277 neon_mul_mac (struct neon_type_el et
, int ubit
)
13281 /* Give a more helpful error message if we have an invalid type. */
13282 if (et
.type
== NT_invtype
)
13285 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
13286 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13287 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13288 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
13289 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
13290 inst
.instruction
|= LOW4 (scalar
);
13291 inst
.instruction
|= HI1 (scalar
) << 5;
13292 inst
.instruction
|= (et
.type
== NT_float
) << 8;
13293 inst
.instruction
|= neon_logbits (et
.size
) << 20;
13294 inst
.instruction
|= (ubit
!= 0) << 24;
13296 neon_dp_fixup (&inst
);
13300 do_neon_mac_maybe_scalar (void)
13302 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
13305 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13308 if (inst
.operands
[2].isscalar
)
13310 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13311 struct neon_type_el et
= neon_check_type (3, rs
,
13312 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F32
| N_KEY
);
13313 NEON_ENCODE (SCALAR
, inst
);
13314 neon_mul_mac (et
, neon_quad (rs
));
13318 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13319 affected if we specify unsigned args. */
13320 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13325 do_neon_fmac (void)
13327 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
13330 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13333 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
13339 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13340 struct neon_type_el et
= neon_check_type (3, rs
,
13341 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
13342 neon_three_same (neon_quad (rs
), 0, et
.size
);
13345 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
13346 same types as the MAC equivalents. The polynomial type for this instruction
13347 is encoded the same as the integer type. */
13352 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
13355 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13358 if (inst
.operands
[2].isscalar
)
13359 do_neon_mac_maybe_scalar ();
13361 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F32
| N_P8
, 0);
13365 do_neon_qdmulh (void)
13367 if (inst
.operands
[2].isscalar
)
13369 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
13370 struct neon_type_el et
= neon_check_type (3, rs
,
13371 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13372 NEON_ENCODE (SCALAR
, inst
);
13373 neon_mul_mac (et
, neon_quad (rs
));
13377 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13378 struct neon_type_el et
= neon_check_type (3, rs
,
13379 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
13380 NEON_ENCODE (INTEGER
, inst
);
13381 /* The U bit (rounding) comes from bit mask. */
13382 neon_three_same (neon_quad (rs
), 0, et
.size
);
13387 do_neon_fcmp_absolute (void)
13389 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13390 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13391 /* Size field comes from bit mask. */
13392 neon_three_same (neon_quad (rs
), 1, -1);
13396 do_neon_fcmp_absolute_inv (void)
13398 neon_exchange_operands ();
13399 do_neon_fcmp_absolute ();
13403 do_neon_step (void)
13405 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
13406 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_F32
| N_KEY
);
13407 neon_three_same (neon_quad (rs
), 0, -1);
13411 do_neon_abs_neg (void)
13413 enum neon_shape rs
;
13414 struct neon_type_el et
;
13416 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
13419 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13422 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13423 et
= neon_check_type (2, rs
, N_EQK
, N_S8
| N_S16
| N_S32
| N_F32
| N_KEY
);
13425 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13426 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13427 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13428 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13429 inst
.instruction
|= neon_quad (rs
) << 6;
13430 inst
.instruction
|= (et
.type
== NT_float
) << 10;
13431 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13433 neon_dp_fixup (&inst
);
13439 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13440 struct neon_type_el et
= neon_check_type (2, rs
,
13441 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13442 int imm
= inst
.operands
[2].imm
;
13443 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13444 _("immediate out of range for insert"));
13445 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13451 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13452 struct neon_type_el et
= neon_check_type (2, rs
,
13453 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
13454 int imm
= inst
.operands
[2].imm
;
13455 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13456 _("immediate out of range for insert"));
13457 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
13461 do_neon_qshlu_imm (void)
13463 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
13464 struct neon_type_el et
= neon_check_type (2, rs
,
13465 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
13466 int imm
= inst
.operands
[2].imm
;
13467 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
13468 _("immediate out of range for shift"));
13469 /* Only encodes the 'U present' variant of the instruction.
13470 In this case, signed types have OP (bit 8) set to 0.
13471 Unsigned types have OP set to 1. */
13472 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
13473 /* The rest of the bits are the same as other immediate shifts. */
13474 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
13478 do_neon_qmovn (void)
13480 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13481 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13482 /* Saturating move where operands can be signed or unsigned, and the
13483 destination has the same signedness. */
13484 NEON_ENCODE (INTEGER
, inst
);
13485 if (et
.type
== NT_unsigned
)
13486 inst
.instruction
|= 0xc0;
13488 inst
.instruction
|= 0x80;
13489 neon_two_same (0, 1, et
.size
/ 2);
13493 do_neon_qmovun (void)
13495 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13496 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13497 /* Saturating move with unsigned results. Operands must be signed. */
13498 NEON_ENCODE (INTEGER
, inst
);
13499 neon_two_same (0, 1, et
.size
/ 2);
13503 do_neon_rshift_sat_narrow (void)
13505 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13506 or unsigned. If operands are unsigned, results must also be unsigned. */
13507 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13508 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
13509 int imm
= inst
.operands
[2].imm
;
13510 /* This gets the bounds check, size encoding and immediate bits calculation
13514 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
13515 VQMOVN.I<size> <Dd>, <Qm>. */
13518 inst
.operands
[2].present
= 0;
13519 inst
.instruction
= N_MNEM_vqmovn
;
13524 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13525 _("immediate out of range"));
13526 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
13530 do_neon_rshift_sat_narrow_u (void)
13532 /* FIXME: Types for narrowing. If operands are signed, results can be signed
13533 or unsigned. If operands are unsigned, results must also be unsigned. */
13534 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13535 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
13536 int imm
= inst
.operands
[2].imm
;
13537 /* This gets the bounds check, size encoding and immediate bits calculation
13541 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
13542 VQMOVUN.I<size> <Dd>, <Qm>. */
13545 inst
.operands
[2].present
= 0;
13546 inst
.instruction
= N_MNEM_vqmovun
;
13551 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13552 _("immediate out of range"));
13553 /* FIXME: The manual is kind of unclear about what value U should have in
13554 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
13556 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
13560 do_neon_movn (void)
13562 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
13563 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13564 NEON_ENCODE (INTEGER
, inst
);
13565 neon_two_same (0, 1, et
.size
/ 2);
13569 do_neon_rshift_narrow (void)
13571 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
13572 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
13573 int imm
= inst
.operands
[2].imm
;
13574 /* This gets the bounds check, size encoding and immediate bits calculation
13578 /* If immediate is zero then we are a pseudo-instruction for
13579 VMOVN.I<size> <Dd>, <Qm> */
13582 inst
.operands
[2].present
= 0;
13583 inst
.instruction
= N_MNEM_vmovn
;
13588 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
13589 _("immediate out of range for narrowing operation"));
13590 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
13594 do_neon_shll (void)
13596 /* FIXME: Type checking when lengthening. */
13597 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
13598 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
13599 unsigned imm
= inst
.operands
[2].imm
;
13601 if (imm
== et
.size
)
13603 /* Maximum shift variant. */
13604 NEON_ENCODE (INTEGER
, inst
);
13605 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13606 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13607 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13608 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13609 inst
.instruction
|= neon_logbits (et
.size
) << 18;
13611 neon_dp_fixup (&inst
);
13615 /* A more-specific type check for non-max versions. */
13616 et
= neon_check_type (2, NS_QDI
,
13617 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
13618 NEON_ENCODE (IMMED
, inst
);
13619 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
13623 /* Check the various types for the VCVT instruction, and return which version
13624 the current instruction is. */
13627 neon_cvt_flavour (enum neon_shape rs
)
13629 #define CVT_VAR(C,X,Y) \
13630 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
13631 if (et.type != NT_invtype) \
13633 inst.error = NULL; \
13636 struct neon_type_el et
;
13637 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
13638 || rs
== NS_FF
) ? N_VFP
: 0;
13639 /* The instruction versions which take an immediate take one register
13640 argument, which is extended to the width of the full register. Thus the
13641 "source" and "destination" registers must have the same width. Hack that
13642 here by making the size equal to the key (wider, in this case) operand. */
13643 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
13645 CVT_VAR (0, N_S32
, N_F32
);
13646 CVT_VAR (1, N_U32
, N_F32
);
13647 CVT_VAR (2, N_F32
, N_S32
);
13648 CVT_VAR (3, N_F32
, N_U32
);
13649 /* Half-precision conversions. */
13650 CVT_VAR (4, N_F32
, N_F16
);
13651 CVT_VAR (5, N_F16
, N_F32
);
13655 /* VFP instructions. */
13656 CVT_VAR (6, N_F32
, N_F64
);
13657 CVT_VAR (7, N_F64
, N_F32
);
13658 CVT_VAR (8, N_S32
, N_F64
| key
);
13659 CVT_VAR (9, N_U32
, N_F64
| key
);
13660 CVT_VAR (10, N_F64
| key
, N_S32
);
13661 CVT_VAR (11, N_F64
| key
, N_U32
);
13662 /* VFP instructions with bitshift. */
13663 CVT_VAR (12, N_F32
| key
, N_S16
);
13664 CVT_VAR (13, N_F32
| key
, N_U16
);
13665 CVT_VAR (14, N_F64
| key
, N_S16
);
13666 CVT_VAR (15, N_F64
| key
, N_U16
);
13667 CVT_VAR (16, N_S16
, N_F32
| key
);
13668 CVT_VAR (17, N_U16
, N_F32
| key
);
13669 CVT_VAR (18, N_S16
, N_F64
| key
);
13670 CVT_VAR (19, N_U16
, N_F64
| key
);
13676 /* Neon-syntax VFP conversions. */
13679 do_vfp_nsyn_cvt (enum neon_shape rs
, int flavour
)
13681 const char *opname
= 0;
13683 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
)
13685 /* Conversions with immediate bitshift. */
13686 const char *enc
[] =
13710 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13712 opname
= enc
[flavour
];
13713 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13714 _("operands 0 and 1 must be the same register"));
13715 inst
.operands
[1] = inst
.operands
[2];
13716 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
13721 /* Conversions without bitshift. */
13722 const char *enc
[] =
13738 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
))
13739 opname
= enc
[flavour
];
13743 do_vfp_nsyn_opcode (opname
);
13747 do_vfp_nsyn_cvtz (void)
13749 enum neon_shape rs
= neon_select_shape (NS_FF
, NS_FD
, NS_NULL
);
13750 int flavour
= neon_cvt_flavour (rs
);
13751 const char *enc
[] =
13765 if (flavour
>= 0 && flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
13766 do_vfp_nsyn_opcode (enc
[flavour
]);
13770 do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED
)
13772 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
13773 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
, NS_NULL
);
13774 int flavour
= neon_cvt_flavour (rs
);
13776 /* PR11109: Handle round-to-zero for VCVT conversions. */
13778 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
13779 && (flavour
== 0 || flavour
== 1 || flavour
== 8 || flavour
== 9)
13780 && (rs
== NS_FD
|| rs
== NS_FF
))
13782 do_vfp_nsyn_cvtz ();
13786 /* VFP rather than Neon conversions. */
13789 do_vfp_nsyn_cvt (rs
, flavour
);
13799 unsigned enctab
[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
13801 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13804 /* Fixed-point conversion with #0 immediate is encoded as an
13805 integer conversion. */
13806 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
13808 immbits
= 32 - inst
.operands
[2].imm
;
13809 NEON_ENCODE (IMMED
, inst
);
13811 inst
.instruction
|= enctab
[flavour
];
13812 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13813 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13814 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13815 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13816 inst
.instruction
|= neon_quad (rs
) << 6;
13817 inst
.instruction
|= 1 << 21;
13818 inst
.instruction
|= immbits
<< 16;
13820 neon_dp_fixup (&inst
);
13828 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080 };
13830 NEON_ENCODE (INTEGER
, inst
);
13832 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
13836 inst
.instruction
|= enctab
[flavour
];
13838 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13839 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13840 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13841 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13842 inst
.instruction
|= neon_quad (rs
) << 6;
13843 inst
.instruction
|= 2 << 18;
13845 neon_dp_fixup (&inst
);
13849 /* Half-precision conversions for Advanced SIMD -- neon. */
13854 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
13856 as_bad (_("operand size must match register width"));
13861 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
13863 as_bad (_("operand size must match register width"));
13868 inst
.instruction
= 0x3b60600;
13870 inst
.instruction
= 0x3b60700;
13872 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13873 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13874 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13875 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13876 neon_dp_fixup (&inst
);
13880 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
13881 do_vfp_nsyn_cvt (rs
, flavour
);
13886 do_neon_cvtr (void)
13888 do_neon_cvt_1 (FALSE
);
13894 do_neon_cvt_1 (TRUE
);
13898 do_neon_cvtb (void)
13900 inst
.instruction
= 0xeb20a40;
13902 /* The sizes are attached to the mnemonic. */
13903 if (inst
.vectype
.el
[0].type
!= NT_invtype
13904 && inst
.vectype
.el
[0].size
== 16)
13905 inst
.instruction
|= 0x00010000;
13907 /* Programmer's syntax: the sizes are attached to the operands. */
13908 else if (inst
.operands
[0].vectype
.type
!= NT_invtype
13909 && inst
.operands
[0].vectype
.size
== 16)
13910 inst
.instruction
|= 0x00010000;
13912 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
13913 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
13914 do_vfp_cond_or_thumb ();
13919 do_neon_cvtt (void)
13922 inst
.instruction
|= 0x80;
13926 neon_move_immediate (void)
13928 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
13929 struct neon_type_el et
= neon_check_type (2, rs
,
13930 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
13931 unsigned immlo
, immhi
= 0, immbits
;
13932 int op
, cmode
, float_p
;
13934 constraint (et
.type
== NT_invtype
,
13935 _("operand size must be specified for immediate VMOV"));
13937 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
13938 op
= (inst
.instruction
& (1 << 5)) != 0;
13940 immlo
= inst
.operands
[1].imm
;
13941 if (inst
.operands
[1].regisimm
)
13942 immhi
= inst
.operands
[1].reg
;
13944 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
13945 _("immediate has bits set outside the operand size"));
13947 float_p
= inst
.operands
[1].immisfloat
;
13949 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
13950 et
.size
, et
.type
)) == FAIL
)
13952 /* Invert relevant bits only. */
13953 neon_invert_size (&immlo
, &immhi
, et
.size
);
13954 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
13955 with one or the other; those cases are caught by
13956 neon_cmode_for_move_imm. */
13958 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
13959 &op
, et
.size
, et
.type
)) == FAIL
)
13961 first_error (_("immediate out of range"));
13966 inst
.instruction
&= ~(1 << 5);
13967 inst
.instruction
|= op
<< 5;
13969 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13970 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13971 inst
.instruction
|= neon_quad (rs
) << 6;
13972 inst
.instruction
|= cmode
<< 8;
13974 neon_write_immbits (immbits
);
13980 if (inst
.operands
[1].isreg
)
13982 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
13984 NEON_ENCODE (INTEGER
, inst
);
13985 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
13986 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
13987 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
13988 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
13989 inst
.instruction
|= neon_quad (rs
) << 6;
13993 NEON_ENCODE (IMMED
, inst
);
13994 neon_move_immediate ();
13997 neon_dp_fixup (&inst
);
14000 /* Encode instructions of form:
14002 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14003 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
14006 neon_mixed_length (struct neon_type_el et
, unsigned size
)
14008 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14009 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14010 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14011 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14012 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14013 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14014 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
14015 inst
.instruction
|= neon_logbits (size
) << 20;
14017 neon_dp_fixup (&inst
);
14021 do_neon_dyadic_long (void)
14023 /* FIXME: Type checking for lengthening op. */
14024 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14025 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
14026 neon_mixed_length (et
, et
.size
);
14030 do_neon_abal (void)
14032 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14033 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
14034 neon_mixed_length (et
, et
.size
);
14038 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
14040 if (inst
.operands
[2].isscalar
)
14042 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
14043 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
14044 NEON_ENCODE (SCALAR
, inst
);
14045 neon_mul_mac (et
, et
.type
== NT_unsigned
);
14049 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14050 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
14051 NEON_ENCODE (INTEGER
, inst
);
14052 neon_mixed_length (et
, et
.size
);
14057 do_neon_mac_maybe_scalar_long (void)
14059 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
14063 do_neon_dyadic_wide (void)
14065 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
14066 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14067 neon_mixed_length (et
, et
.size
);
14071 do_neon_dyadic_narrow (void)
14073 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14074 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
14075 /* Operand sign is unimportant, and the U bit is part of the opcode,
14076 so force the operand type to integer. */
14077 et
.type
= NT_integer
;
14078 neon_mixed_length (et
, et
.size
/ 2);
14082 do_neon_mul_sat_scalar_long (void)
14084 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
14088 do_neon_vmull (void)
14090 if (inst
.operands
[2].isscalar
)
14091 do_neon_mac_maybe_scalar_long ();
14094 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
14095 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_KEY
);
14096 if (et
.type
== NT_poly
)
14097 NEON_ENCODE (POLY
, inst
);
14099 NEON_ENCODE (INTEGER
, inst
);
14100 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14101 zero. Should be OK as-is. */
14102 neon_mixed_length (et
, et
.size
);
14109 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
14110 struct neon_type_el et
= neon_check_type (3, rs
,
14111 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
14112 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
14114 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
14115 _("shift out of range"));
14116 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14117 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14118 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14119 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14120 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14121 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14122 inst
.instruction
|= neon_quad (rs
) << 6;
14123 inst
.instruction
|= imm
<< 8;
14125 neon_dp_fixup (&inst
);
14131 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14132 struct neon_type_el et
= neon_check_type (2, rs
,
14133 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14134 unsigned op
= (inst
.instruction
>> 7) & 3;
14135 /* N (width of reversed regions) is encoded as part of the bitmask. We
14136 extract it here to check the elements to be reversed are smaller.
14137 Otherwise we'd get a reserved instruction. */
14138 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
14139 gas_assert (elsize
!= 0);
14140 constraint (et
.size
>= elsize
,
14141 _("elements must be smaller than reversal region"));
14142 neon_two_same (neon_quad (rs
), 1, et
.size
);
14148 if (inst
.operands
[1].isscalar
)
14150 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
14151 struct neon_type_el et
= neon_check_type (2, rs
,
14152 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14153 unsigned sizebits
= et
.size
>> 3;
14154 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
14155 int logsize
= neon_logbits (et
.size
);
14156 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
14158 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
14161 NEON_ENCODE (SCALAR
, inst
);
14162 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14163 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14164 inst
.instruction
|= LOW4 (dm
);
14165 inst
.instruction
|= HI1 (dm
) << 5;
14166 inst
.instruction
|= neon_quad (rs
) << 6;
14167 inst
.instruction
|= x
<< 17;
14168 inst
.instruction
|= sizebits
<< 16;
14170 neon_dp_fixup (&inst
);
14174 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
14175 struct neon_type_el et
= neon_check_type (2, rs
,
14176 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
14177 /* Duplicate ARM register to lanes of vector. */
14178 NEON_ENCODE (ARMREG
, inst
);
14181 case 8: inst
.instruction
|= 0x400000; break;
14182 case 16: inst
.instruction
|= 0x000020; break;
14183 case 32: inst
.instruction
|= 0x000000; break;
14186 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
14187 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
14188 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
14189 inst
.instruction
|= neon_quad (rs
) << 21;
14190 /* The encoding for this instruction is identical for the ARM and Thumb
14191 variants, except for the condition field. */
14192 do_vfp_cond_or_thumb ();
14196 /* VMOV has particularly many variations. It can be one of:
14197 0. VMOV<c><q> <Qd>, <Qm>
14198 1. VMOV<c><q> <Dd>, <Dm>
14199 (Register operations, which are VORR with Rm = Rn.)
14200 2. VMOV<c><q>.<dt> <Qd>, #<imm>
14201 3. VMOV<c><q>.<dt> <Dd>, #<imm>
14203 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14204 (ARM register to scalar.)
14205 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14206 (Two ARM registers to vector.)
14207 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14208 (Scalar to ARM register.)
14209 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14210 (Vector to two ARM registers.)
14211 8. VMOV.F32 <Sd>, <Sm>
14212 9. VMOV.F64 <Dd>, <Dm>
14213 (VFP register moves.)
14214 10. VMOV.F32 <Sd>, #imm
14215 11. VMOV.F64 <Dd>, #imm
14216 (VFP float immediate load.)
14217 12. VMOV <Rd>, <Sm>
14218 (VFP single to ARM reg.)
14219 13. VMOV <Sd>, <Rm>
14220 (ARM reg to VFP single.)
14221 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14222 (Two ARM regs to two VFP singles.)
14223 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14224 (Two VFP singles to two ARM regs.)
14226 These cases can be disambiguated using neon_select_shape, except cases 1/9
14227 and 3/11 which depend on the operand type too.
14229 All the encoded bits are hardcoded by this function.
14231 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14232 Cases 5, 7 may be used with VFPv2 and above.
14234 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14235 can specify a type where it doesn't make sense to, and is ignored). */
14240 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
14241 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
, NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
14243 struct neon_type_el et
;
14244 const char *ldconst
= 0;
14248 case NS_DD
: /* case 1/9. */
14249 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14250 /* It is not an error here if no type is given. */
14252 if (et
.type
== NT_float
&& et
.size
== 64)
14254 do_vfp_nsyn_opcode ("fcpyd");
14257 /* fall through. */
14259 case NS_QQ
: /* case 0/1. */
14261 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14263 /* The architecture manual I have doesn't explicitly state which
14264 value the U bit should have for register->register moves, but
14265 the equivalent VORR instruction has U = 0, so do that. */
14266 inst
.instruction
= 0x0200110;
14267 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14268 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14269 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
14270 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
14271 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14272 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14273 inst
.instruction
|= neon_quad (rs
) << 6;
14275 neon_dp_fixup (&inst
);
14279 case NS_DI
: /* case 3/11. */
14280 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
14282 if (et
.type
== NT_float
&& et
.size
== 64)
14284 /* case 11 (fconstd). */
14285 ldconst
= "fconstd";
14286 goto encode_fconstd
;
14288 /* fall through. */
14290 case NS_QI
: /* case 2/3. */
14291 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
14293 inst
.instruction
= 0x0800010;
14294 neon_move_immediate ();
14295 neon_dp_fixup (&inst
);
14298 case NS_SR
: /* case 4. */
14300 unsigned bcdebits
= 0;
14302 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
14303 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
14305 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
14306 logsize
= neon_logbits (et
.size
);
14308 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14310 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14311 && et
.size
!= 32, _(BAD_FPU
));
14312 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14313 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14317 case 8: bcdebits
= 0x8; break;
14318 case 16: bcdebits
= 0x1; break;
14319 case 32: bcdebits
= 0x0; break;
14323 bcdebits
|= x
<< logsize
;
14325 inst
.instruction
= 0xe000b10;
14326 do_vfp_cond_or_thumb ();
14327 inst
.instruction
|= LOW4 (dn
) << 16;
14328 inst
.instruction
|= HI1 (dn
) << 7;
14329 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14330 inst
.instruction
|= (bcdebits
& 3) << 5;
14331 inst
.instruction
|= (bcdebits
>> 2) << 21;
14335 case NS_DRR
: /* case 5 (fmdrr). */
14336 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14339 inst
.instruction
= 0xc400b10;
14340 do_vfp_cond_or_thumb ();
14341 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
14342 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
14343 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
14344 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
14347 case NS_RS
: /* case 6. */
14350 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
14351 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
14352 unsigned abcdebits
= 0;
14354 et
= neon_check_type (2, NS_NULL
,
14355 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
14356 logsize
= neon_logbits (et
.size
);
14358 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
14360 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
14361 && et
.size
!= 32, _(BAD_FPU
));
14362 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
14363 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
14367 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
14368 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
14369 case 32: abcdebits
= 0x00; break;
14373 abcdebits
|= x
<< logsize
;
14374 inst
.instruction
= 0xe100b10;
14375 do_vfp_cond_or_thumb ();
14376 inst
.instruction
|= LOW4 (dn
) << 16;
14377 inst
.instruction
|= HI1 (dn
) << 7;
14378 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14379 inst
.instruction
|= (abcdebits
& 3) << 5;
14380 inst
.instruction
|= (abcdebits
>> 2) << 21;
14384 case NS_RRD
: /* case 7 (fmrrd). */
14385 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
14388 inst
.instruction
= 0xc500b10;
14389 do_vfp_cond_or_thumb ();
14390 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
14391 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14392 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14393 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14396 case NS_FF
: /* case 8 (fcpys). */
14397 do_vfp_nsyn_opcode ("fcpys");
14400 case NS_FI
: /* case 10 (fconsts). */
14401 ldconst
= "fconsts";
14403 if (is_quarter_float (inst
.operands
[1].imm
))
14405 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
14406 do_vfp_nsyn_opcode (ldconst
);
14409 first_error (_("immediate out of range"));
14412 case NS_RF
: /* case 12 (fmrs). */
14413 do_vfp_nsyn_opcode ("fmrs");
14416 case NS_FR
: /* case 13 (fmsr). */
14417 do_vfp_nsyn_opcode ("fmsr");
14420 /* The encoders for the fmrrs and fmsrr instructions expect three operands
14421 (one of which is a list), but we have parsed four. Do some fiddling to
14422 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
14424 case NS_RRFF
: /* case 14 (fmrrs). */
14425 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
14426 _("VFP registers must be adjacent"));
14427 inst
.operands
[2].imm
= 2;
14428 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14429 do_vfp_nsyn_opcode ("fmrrs");
14432 case NS_FFRR
: /* case 15 (fmsrr). */
14433 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
14434 _("VFP registers must be adjacent"));
14435 inst
.operands
[1] = inst
.operands
[2];
14436 inst
.operands
[2] = inst
.operands
[3];
14437 inst
.operands
[0].imm
= 2;
14438 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
14439 do_vfp_nsyn_opcode ("fmsrr");
14448 do_neon_rshift_round_imm (void)
14450 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
14451 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
14452 int imm
= inst
.operands
[2].imm
;
14454 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
14457 inst
.operands
[2].present
= 0;
14462 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
14463 _("immediate out of range for shift"));
14464 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
14469 do_neon_movl (void)
14471 struct neon_type_el et
= neon_check_type (2, NS_QD
,
14472 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
14473 unsigned sizebits
= et
.size
>> 3;
14474 inst
.instruction
|= sizebits
<< 19;
14475 neon_two_same (0, et
.type
== NT_unsigned
, -1);
14481 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14482 struct neon_type_el et
= neon_check_type (2, rs
,
14483 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14484 NEON_ENCODE (INTEGER
, inst
);
14485 neon_two_same (neon_quad (rs
), 1, et
.size
);
14489 do_neon_zip_uzp (void)
14491 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14492 struct neon_type_el et
= neon_check_type (2, rs
,
14493 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
14494 if (rs
== NS_DD
&& et
.size
== 32)
14496 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
14497 inst
.instruction
= N_MNEM_vtrn
;
14501 neon_two_same (neon_quad (rs
), 1, et
.size
);
14505 do_neon_sat_abs_neg (void)
14507 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14508 struct neon_type_el et
= neon_check_type (2, rs
,
14509 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14510 neon_two_same (neon_quad (rs
), 1, et
.size
);
14514 do_neon_pair_long (void)
14516 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14517 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
14518 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
14519 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
14520 neon_two_same (neon_quad (rs
), 1, et
.size
);
14524 do_neon_recip_est (void)
14526 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14527 struct neon_type_el et
= neon_check_type (2, rs
,
14528 N_EQK
| N_FLT
, N_F32
| N_U32
| N_KEY
);
14529 inst
.instruction
|= (et
.type
== NT_float
) << 8;
14530 neon_two_same (neon_quad (rs
), 1, et
.size
);
14536 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14537 struct neon_type_el et
= neon_check_type (2, rs
,
14538 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
14539 neon_two_same (neon_quad (rs
), 1, et
.size
);
14545 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14546 struct neon_type_el et
= neon_check_type (2, rs
,
14547 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
14548 neon_two_same (neon_quad (rs
), 1, et
.size
);
14554 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14555 struct neon_type_el et
= neon_check_type (2, rs
,
14556 N_EQK
| N_INT
, N_8
| N_KEY
);
14557 neon_two_same (neon_quad (rs
), 1, et
.size
);
14563 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
14564 neon_two_same (neon_quad (rs
), 1, -1);
14568 do_neon_tbl_tbx (void)
14570 unsigned listlenbits
;
14571 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
14573 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
14575 first_error (_("bad list length for table lookup"));
14579 listlenbits
= inst
.operands
[1].imm
- 1;
14580 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14581 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14582 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
14583 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
14584 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
14585 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
14586 inst
.instruction
|= listlenbits
<< 8;
14588 neon_dp_fixup (&inst
);
14592 do_neon_ldm_stm (void)
14594 /* P, U and L bits are part of bitmask. */
14595 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
14596 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
14598 if (inst
.operands
[1].issingle
)
14600 do_vfp_nsyn_ldm_stm (is_dbmode
);
14604 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
14605 _("writeback (!) must be used for VLDMDB and VSTMDB"));
14607 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14608 _("register list must contain at least 1 and at most 16 "
14611 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
14612 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
14613 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
14614 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
14616 inst
.instruction
|= offsetbits
;
14618 do_vfp_cond_or_thumb ();
14622 do_neon_ldr_str (void)
14624 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
14626 if (inst
.operands
[0].issingle
)
14629 do_vfp_nsyn_opcode ("flds");
14631 do_vfp_nsyn_opcode ("fsts");
14636 do_vfp_nsyn_opcode ("fldd");
14638 do_vfp_nsyn_opcode ("fstd");
14642 /* "interleave" version also handles non-interleaving register VLD1/VST1
14646 do_neon_ld_st_interleave (void)
14648 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
14649 N_8
| N_16
| N_32
| N_64
);
14650 unsigned alignbits
= 0;
14652 /* The bits in this table go:
14653 0: register stride of one (0) or two (1)
14654 1,2: register list length, minus one (1, 2, 3, 4).
14655 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
14656 We use -1 for invalid entries. */
14657 const int typetable
[] =
14659 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
14660 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
14661 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
14662 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
14666 if (et
.type
== NT_invtype
)
14669 if (inst
.operands
[1].immisalign
)
14670 switch (inst
.operands
[1].imm
>> 8)
14672 case 64: alignbits
= 1; break;
14674 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
14675 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
14676 goto bad_alignment
;
14680 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
14681 goto bad_alignment
;
14686 first_error (_("bad alignment"));
14690 inst
.instruction
|= alignbits
<< 4;
14691 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14693 /* Bits [4:6] of the immediate in a list specifier encode register stride
14694 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
14695 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
14696 up the right value for "type" in a table based on this value and the given
14697 list style, then stick it back. */
14698 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
14699 | (((inst
.instruction
>> 8) & 3) << 3);
14701 typebits
= typetable
[idx
];
14703 constraint (typebits
== -1, _("bad list type for instruction"));
14705 inst
.instruction
&= ~0xf00;
14706 inst
.instruction
|= typebits
<< 8;
14709 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
14710 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
14711 otherwise. The variable arguments are a list of pairs of legal (size, align)
14712 values, terminated with -1. */
14715 neon_alignment_bit (int size
, int align
, int *do_align
, ...)
14718 int result
= FAIL
, thissize
, thisalign
;
14720 if (!inst
.operands
[1].immisalign
)
14726 va_start (ap
, do_align
);
14730 thissize
= va_arg (ap
, int);
14731 if (thissize
== -1)
14733 thisalign
= va_arg (ap
, int);
14735 if (size
== thissize
&& align
== thisalign
)
14738 while (result
!= SUCCESS
);
14742 if (result
== SUCCESS
)
14745 first_error (_("unsupported alignment for instruction"));
14751 do_neon_ld_st_lane (void)
14753 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14754 int align_good
, do_align
= 0;
14755 int logsize
= neon_logbits (et
.size
);
14756 int align
= inst
.operands
[1].imm
>> 8;
14757 int n
= (inst
.instruction
>> 8) & 3;
14758 int max_el
= 64 / et
.size
;
14760 if (et
.type
== NT_invtype
)
14763 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
14764 _("bad list length"));
14765 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
14766 _("scalar index out of range"));
14767 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
14769 _("stride of 2 unavailable when element size is 8"));
14773 case 0: /* VLD1 / VST1. */
14774 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 16, 16,
14776 if (align_good
== FAIL
)
14780 unsigned alignbits
= 0;
14783 case 16: alignbits
= 0x1; break;
14784 case 32: alignbits
= 0x3; break;
14787 inst
.instruction
|= alignbits
<< 4;
14791 case 1: /* VLD2 / VST2. */
14792 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 16, 16, 32,
14794 if (align_good
== FAIL
)
14797 inst
.instruction
|= 1 << 4;
14800 case 2: /* VLD3 / VST3. */
14801 constraint (inst
.operands
[1].immisalign
,
14802 _("can't use alignment with this instruction"));
14805 case 3: /* VLD4 / VST4. */
14806 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14807 16, 64, 32, 64, 32, 128, -1);
14808 if (align_good
== FAIL
)
14812 unsigned alignbits
= 0;
14815 case 8: alignbits
= 0x1; break;
14816 case 16: alignbits
= 0x1; break;
14817 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
14820 inst
.instruction
|= alignbits
<< 4;
14827 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
14828 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14829 inst
.instruction
|= 1 << (4 + logsize
);
14831 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
14832 inst
.instruction
|= logsize
<< 10;
14835 /* Encode single n-element structure to all lanes VLD<n> instructions. */
14838 do_neon_ld_dup (void)
14840 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
14841 int align_good
, do_align
= 0;
14843 if (et
.type
== NT_invtype
)
14846 switch ((inst
.instruction
>> 8) & 3)
14848 case 0: /* VLD1. */
14849 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
14850 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14851 &do_align
, 16, 16, 32, 32, -1);
14852 if (align_good
== FAIL
)
14854 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
14857 case 2: inst
.instruction
|= 1 << 5; break;
14858 default: first_error (_("bad list length")); return;
14860 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14863 case 1: /* VLD2. */
14864 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
14865 &do_align
, 8, 16, 16, 32, 32, 64, -1);
14866 if (align_good
== FAIL
)
14868 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
14869 _("bad list length"));
14870 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14871 inst
.instruction
|= 1 << 5;
14872 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14875 case 2: /* VLD3. */
14876 constraint (inst
.operands
[1].immisalign
,
14877 _("can't use alignment with this instruction"));
14878 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
14879 _("bad list length"));
14880 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14881 inst
.instruction
|= 1 << 5;
14882 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14885 case 3: /* VLD4. */
14887 int align
= inst
.operands
[1].imm
>> 8;
14888 align_good
= neon_alignment_bit (et
.size
, align
, &do_align
, 8, 32,
14889 16, 64, 32, 64, 32, 128, -1);
14890 if (align_good
== FAIL
)
14892 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
14893 _("bad list length"));
14894 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
14895 inst
.instruction
|= 1 << 5;
14896 if (et
.size
== 32 && align
== 128)
14897 inst
.instruction
|= 0x3 << 6;
14899 inst
.instruction
|= neon_logbits (et
.size
) << 6;
14906 inst
.instruction
|= do_align
<< 4;
14909 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
14910 apart from bits [11:4]. */
14913 do_neon_ldx_stx (void)
14915 if (inst
.operands
[1].isreg
)
14916 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
14918 switch (NEON_LANE (inst
.operands
[0].imm
))
14920 case NEON_INTERLEAVE_LANES
:
14921 NEON_ENCODE (INTERLV
, inst
);
14922 do_neon_ld_st_interleave ();
14925 case NEON_ALL_LANES
:
14926 NEON_ENCODE (DUP
, inst
);
14931 NEON_ENCODE (LANE
, inst
);
14932 do_neon_ld_st_lane ();
14935 /* L bit comes from bit mask. */
14936 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
14937 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
14938 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
14940 if (inst
.operands
[1].postind
)
14942 int postreg
= inst
.operands
[1].imm
& 0xf;
14943 constraint (!inst
.operands
[1].immisreg
,
14944 _("post-index must be a register"));
14945 constraint (postreg
== 0xd || postreg
== 0xf,
14946 _("bad register for post-index"));
14947 inst
.instruction
|= postreg
;
14949 else if (inst
.operands
[1].writeback
)
14951 inst
.instruction
|= 0xd;
14954 inst
.instruction
|= 0xf;
14957 inst
.instruction
|= 0xf9000000;
14959 inst
.instruction
|= 0xf4000000;
14962 /* Overall per-instruction processing. */
14964 /* We need to be able to fix up arbitrary expressions in some statements.
14965 This is so that we can handle symbols that are an arbitrary distance from
14966 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
14967 which returns part of an address in a form which will be valid for
14968 a data instruction. We do this by pushing the expression into a symbol
14969 in the expr_section, and creating a fix for that. */
14972 fix_new_arm (fragS
* frag
,
14987 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
14988 (enum bfd_reloc_code_real
) reloc
);
14992 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
14993 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
14997 /* Mark whether the fix is to a THUMB instruction, or an ARM
14999 new_fix
->tc_fix_data
= thumb_mode
;
15002 /* Create a frg for an instruction requiring relaxation. */
15004 output_relax_insn (void)
15010 /* The size of the instruction is unknown, so tie the debug info to the
15011 start of the instruction. */
15012 dwarf2_emit_insn (0);
15014 switch (inst
.reloc
.exp
.X_op
)
15017 sym
= inst
.reloc
.exp
.X_add_symbol
;
15018 offset
= inst
.reloc
.exp
.X_add_number
;
15022 offset
= inst
.reloc
.exp
.X_add_number
;
15025 sym
= make_expr_symbol (&inst
.reloc
.exp
);
15029 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
15030 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
15031 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
15034 /* Write a 32-bit thumb instruction to buf. */
15036 put_thumb32_insn (char * buf
, unsigned long insn
)
15038 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
15039 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
15043 output_inst (const char * str
)
15049 as_bad ("%s -- `%s'", inst
.error
, str
);
15054 output_relax_insn ();
15057 if (inst
.size
== 0)
15060 to
= frag_more (inst
.size
);
15061 /* PR 9814: Record the thumb mode into the current frag so that we know
15062 what type of NOP padding to use, if necessary. We override any previous
15063 setting so that if the mode has changed then the NOPS that we use will
15064 match the encoding of the last instruction in the frag. */
15065 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
15067 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
15069 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
15070 put_thumb32_insn (to
, inst
.instruction
);
15072 else if (inst
.size
> INSN_SIZE
)
15074 gas_assert (inst
.size
== (2 * INSN_SIZE
));
15075 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
15076 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
15079 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
15081 if (inst
.reloc
.type
!= BFD_RELOC_UNUSED
)
15082 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
15083 inst
.size
, & inst
.reloc
.exp
, inst
.reloc
.pc_rel
,
15086 dwarf2_emit_insn (inst
.size
);
15090 output_it_inst (int cond
, int mask
, char * to
)
15092 unsigned long instruction
= 0xbf00;
15095 instruction
|= mask
;
15096 instruction
|= cond
<< 4;
15100 to
= frag_more (2);
15102 dwarf2_emit_insn (2);
15106 md_number_to_chars (to
, instruction
, 2);
15111 /* Tag values used in struct asm_opcode's tag field. */
15114 OT_unconditional
, /* Instruction cannot be conditionalized.
15115 The ARM condition field is still 0xE. */
15116 OT_unconditionalF
, /* Instruction cannot be conditionalized
15117 and carries 0xF in its ARM condition field. */
15118 OT_csuffix
, /* Instruction takes a conditional suffix. */
15119 OT_csuffixF
, /* Some forms of the instruction take a conditional
15120 suffix, others place 0xF where the condition field
15122 OT_cinfix3
, /* Instruction takes a conditional infix,
15123 beginning at character index 3. (In
15124 unified mode, it becomes a suffix.) */
15125 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
15126 tsts, cmps, cmns, and teqs. */
15127 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
15128 character index 3, even in unified mode. Used for
15129 legacy instructions where suffix and infix forms
15130 may be ambiguous. */
15131 OT_csuf_or_in3
, /* Instruction takes either a conditional
15132 suffix or an infix at character index 3. */
15133 OT_odd_infix_unc
, /* This is the unconditional variant of an
15134 instruction that takes a conditional infix
15135 at an unusual position. In unified mode,
15136 this variant will accept a suffix. */
15137 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
15138 are the conditional variants of instructions that
15139 take conditional infixes in unusual positions.
15140 The infix appears at character index
15141 (tag - OT_odd_infix_0). These are not accepted
15142 in unified mode. */
15145 /* Subroutine of md_assemble, responsible for looking up the primary
15146 opcode from the mnemonic the user wrote. STR points to the
15147 beginning of the mnemonic.
15149 This is not simply a hash table lookup, because of conditional
15150 variants. Most instructions have conditional variants, which are
15151 expressed with a _conditional affix_ to the mnemonic. If we were
15152 to encode each conditional variant as a literal string in the opcode
15153 table, it would have approximately 20,000 entries.
15155 Most mnemonics take this affix as a suffix, and in unified syntax,
15156 'most' is upgraded to 'all'. However, in the divided syntax, some
15157 instructions take the affix as an infix, notably the s-variants of
15158 the arithmetic instructions. Of those instructions, all but six
15159 have the infix appear after the third character of the mnemonic.
15161 Accordingly, the algorithm for looking up primary opcodes given
15164 1. Look up the identifier in the opcode table.
15165 If we find a match, go to step U.
15167 2. Look up the last two characters of the identifier in the
15168 conditions table. If we find a match, look up the first N-2
15169 characters of the identifier in the opcode table. If we
15170 find a match, go to step CE.
15172 3. Look up the fourth and fifth characters of the identifier in
15173 the conditions table. If we find a match, extract those
15174 characters from the identifier, and look up the remaining
15175 characters in the opcode table. If we find a match, go
15180 U. Examine the tag field of the opcode structure, in case this is
15181 one of the six instructions with its conditional infix in an
15182 unusual place. If it is, the tag tells us where to find the
15183 infix; look it up in the conditions table and set inst.cond
15184 accordingly. Otherwise, this is an unconditional instruction.
15185 Again set inst.cond accordingly. Return the opcode structure.
15187 CE. Examine the tag field to make sure this is an instruction that
15188 should receive a conditional suffix. If it is not, fail.
15189 Otherwise, set inst.cond from the suffix we already looked up,
15190 and return the opcode structure.
15192 CM. Examine the tag field to make sure this is an instruction that
15193 should receive a conditional infix after the third character.
15194 If it is not, fail. Otherwise, undo the edits to the current
15195 line of input and proceed as for case CE. */
15197 static const struct asm_opcode
*
15198 opcode_lookup (char **str
)
15202 const struct asm_opcode
*opcode
;
15203 const struct asm_cond
*cond
;
15206 /* Scan up to the end of the mnemonic, which must end in white space,
15207 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
15208 for (base
= end
= *str
; *end
!= '\0'; end
++)
15209 if (*end
== ' ' || *end
== '.')
15215 /* Handle a possible width suffix and/or Neon type suffix. */
15220 /* The .w and .n suffixes are only valid if the unified syntax is in
15222 if (unified_syntax
&& end
[1] == 'w')
15224 else if (unified_syntax
&& end
[1] == 'n')
15229 inst
.vectype
.elems
= 0;
15231 *str
= end
+ offset
;
15233 if (end
[offset
] == '.')
15235 /* See if we have a Neon type suffix (possible in either unified or
15236 non-unified ARM syntax mode). */
15237 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
15240 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
15246 /* Look for unaffixed or special-case affixed mnemonic. */
15247 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15252 if (opcode
->tag
< OT_odd_infix_0
)
15254 inst
.cond
= COND_ALWAYS
;
15258 if (warn_on_deprecated
&& unified_syntax
)
15259 as_warn (_("conditional infixes are deprecated in unified syntax"));
15260 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
15261 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15264 inst
.cond
= cond
->value
;
15268 /* Cannot have a conditional suffix on a mnemonic of less than two
15270 if (end
- base
< 3)
15273 /* Look for suffixed mnemonic. */
15275 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15276 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15278 if (opcode
&& cond
)
15281 switch (opcode
->tag
)
15283 case OT_cinfix3_legacy
:
15284 /* Ignore conditional suffixes matched on infix only mnemonics. */
15288 case OT_cinfix3_deprecated
:
15289 case OT_odd_infix_unc
:
15290 if (!unified_syntax
)
15292 /* else fall through */
15296 case OT_csuf_or_in3
:
15297 inst
.cond
= cond
->value
;
15300 case OT_unconditional
:
15301 case OT_unconditionalF
:
15303 inst
.cond
= cond
->value
;
15306 /* Delayed diagnostic. */
15307 inst
.error
= BAD_COND
;
15308 inst
.cond
= COND_ALWAYS
;
15317 /* Cannot have a usual-position infix on a mnemonic of less than
15318 six characters (five would be a suffix). */
15319 if (end
- base
< 6)
15322 /* Look for infixed mnemonic in the usual position. */
15324 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
15328 memcpy (save
, affix
, 2);
15329 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
15330 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
15332 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
15333 memcpy (affix
, save
, 2);
15336 && (opcode
->tag
== OT_cinfix3
15337 || opcode
->tag
== OT_cinfix3_deprecated
15338 || opcode
->tag
== OT_csuf_or_in3
15339 || opcode
->tag
== OT_cinfix3_legacy
))
15342 if (warn_on_deprecated
&& unified_syntax
15343 && (opcode
->tag
== OT_cinfix3
15344 || opcode
->tag
== OT_cinfix3_deprecated
))
15345 as_warn (_("conditional infixes are deprecated in unified syntax"));
15347 inst
.cond
= cond
->value
;
15354 /* This function generates an initial IT instruction, leaving its block
15355 virtually open for the new instructions. Eventually,
15356 the mask will be updated by now_it_add_mask () each time
15357 a new instruction needs to be included in the IT block.
15358 Finally, the block is closed with close_automatic_it_block ().
15359 The block closure can be requested either from md_assemble (),
15360 a tencode (), or due to a label hook. */
15363 new_automatic_it_block (int cond
)
15365 now_it
.state
= AUTOMATIC_IT_BLOCK
;
15366 now_it
.mask
= 0x18;
15368 now_it
.block_length
= 1;
15369 mapping_state (MAP_THUMB
);
15370 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
15373 /* Close an automatic IT block.
15374 See comments in new_automatic_it_block (). */
15377 close_automatic_it_block (void)
15379 now_it
.mask
= 0x10;
15380 now_it
.block_length
= 0;
15383 /* Update the mask of the current automatically-generated IT
15384 instruction. See comments in new_automatic_it_block (). */
15387 now_it_add_mask (int cond
)
15389 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
15390 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
15391 | ((bitvalue) << (nbit)))
15392 const int resulting_bit
= (cond
& 1);
15394 now_it
.mask
&= 0xf;
15395 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15397 (5 - now_it
.block_length
));
15398 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
15400 ((5 - now_it
.block_length
) - 1) );
15401 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
15404 #undef SET_BIT_VALUE
15407 /* The IT blocks handling machinery is accessed through the these functions:
15408 it_fsm_pre_encode () from md_assemble ()
15409 set_it_insn_type () optional, from the tencode functions
15410 set_it_insn_type_last () ditto
15411 in_it_block () ditto
15412 it_fsm_post_encode () from md_assemble ()
15413 force_automatic_it_block_close () from label habdling functions
15416 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
15417 initializing the IT insn type with a generic initial value depending
15418 on the inst.condition.
15419 2) During the tencode function, two things may happen:
15420 a) The tencode function overrides the IT insn type by
15421 calling either set_it_insn_type (type) or set_it_insn_type_last ().
15422 b) The tencode function queries the IT block state by
15423 calling in_it_block () (i.e. to determine narrow/not narrow mode).
15425 Both set_it_insn_type and in_it_block run the internal FSM state
15426 handling function (handle_it_state), because: a) setting the IT insn
15427 type may incur in an invalid state (exiting the function),
15428 and b) querying the state requires the FSM to be updated.
15429 Specifically we want to avoid creating an IT block for conditional
15430 branches, so it_fsm_pre_encode is actually a guess and we can't
15431 determine whether an IT block is required until the tencode () routine
15432 has decided what type of instruction this actually it.
15433 Because of this, if set_it_insn_type and in_it_block have to be used,
15434 set_it_insn_type has to be called first.
15436 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
15437 determines the insn IT type depending on the inst.cond code.
15438 When a tencode () routine encodes an instruction that can be
15439 either outside an IT block, or, in the case of being inside, has to be
15440 the last one, set_it_insn_type_last () will determine the proper
15441 IT instruction type based on the inst.cond code. Otherwise,
15442 set_it_insn_type can be called for overriding that logic or
15443 for covering other cases.
15445 Calling handle_it_state () may not transition the IT block state to
15446 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
15447 still queried. Instead, if the FSM determines that the state should
15448 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
15449 after the tencode () function: that's what it_fsm_post_encode () does.
15451 Since in_it_block () calls the state handling function to get an
15452 updated state, an error may occur (due to invalid insns combination).
15453 In that case, inst.error is set.
15454 Therefore, inst.error has to be checked after the execution of
15455 the tencode () routine.
15457 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
15458 any pending state change (if any) that didn't take place in
15459 handle_it_state () as explained above. */
15462 it_fsm_pre_encode (void)
15464 if (inst
.cond
!= COND_ALWAYS
)
15465 inst
.it_insn_type
= INSIDE_IT_INSN
;
15467 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
15469 now_it
.state_handled
= 0;
15472 /* IT state FSM handling function. */
15475 handle_it_state (void)
15477 now_it
.state_handled
= 1;
15479 switch (now_it
.state
)
15481 case OUTSIDE_IT_BLOCK
:
15482 switch (inst
.it_insn_type
)
15484 case OUTSIDE_IT_INSN
:
15487 case INSIDE_IT_INSN
:
15488 case INSIDE_IT_LAST_INSN
:
15489 if (thumb_mode
== 0)
15492 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
15493 as_tsktsk (_("Warning: conditional outside an IT block"\
15498 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
15499 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
))
15501 /* Automatically generate the IT instruction. */
15502 new_automatic_it_block (inst
.cond
);
15503 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
15504 close_automatic_it_block ();
15508 inst
.error
= BAD_OUT_IT
;
15514 case IF_INSIDE_IT_LAST_INSN
:
15515 case NEUTRAL_IT_INSN
:
15519 now_it
.state
= MANUAL_IT_BLOCK
;
15520 now_it
.block_length
= 0;
15525 case AUTOMATIC_IT_BLOCK
:
15526 /* Three things may happen now:
15527 a) We should increment current it block size;
15528 b) We should close current it block (closing insn or 4 insns);
15529 c) We should close current it block and start a new one (due
15530 to incompatible conditions or
15531 4 insns-length block reached). */
15533 switch (inst
.it_insn_type
)
15535 case OUTSIDE_IT_INSN
:
15536 /* The closure of the block shall happen immediatelly,
15537 so any in_it_block () call reports the block as closed. */
15538 force_automatic_it_block_close ();
15541 case INSIDE_IT_INSN
:
15542 case INSIDE_IT_LAST_INSN
:
15543 case IF_INSIDE_IT_LAST_INSN
:
15544 now_it
.block_length
++;
15546 if (now_it
.block_length
> 4
15547 || !now_it_compatible (inst
.cond
))
15549 force_automatic_it_block_close ();
15550 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
15551 new_automatic_it_block (inst
.cond
);
15555 now_it_add_mask (inst
.cond
);
15558 if (now_it
.state
== AUTOMATIC_IT_BLOCK
15559 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
15560 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
15561 close_automatic_it_block ();
15564 case NEUTRAL_IT_INSN
:
15565 now_it
.block_length
++;
15567 if (now_it
.block_length
> 4)
15568 force_automatic_it_block_close ();
15570 now_it_add_mask (now_it
.cc
& 1);
15574 close_automatic_it_block ();
15575 now_it
.state
= MANUAL_IT_BLOCK
;
15580 case MANUAL_IT_BLOCK
:
15582 /* Check conditional suffixes. */
15583 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
15586 now_it
.mask
&= 0x1f;
15587 is_last
= (now_it
.mask
== 0x10);
15589 switch (inst
.it_insn_type
)
15591 case OUTSIDE_IT_INSN
:
15592 inst
.error
= BAD_NOT_IT
;
15595 case INSIDE_IT_INSN
:
15596 if (cond
!= inst
.cond
)
15598 inst
.error
= BAD_IT_COND
;
15603 case INSIDE_IT_LAST_INSN
:
15604 case IF_INSIDE_IT_LAST_INSN
:
15605 if (cond
!= inst
.cond
)
15607 inst
.error
= BAD_IT_COND
;
15612 inst
.error
= BAD_BRANCH
;
15617 case NEUTRAL_IT_INSN
:
15618 /* The BKPT instruction is unconditional even in an IT block. */
15622 inst
.error
= BAD_IT_IT
;
15633 it_fsm_post_encode (void)
15637 if (!now_it
.state_handled
)
15638 handle_it_state ();
15640 is_last
= (now_it
.mask
== 0x10);
15643 now_it
.state
= OUTSIDE_IT_BLOCK
;
15649 force_automatic_it_block_close (void)
15651 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
15653 close_automatic_it_block ();
15654 now_it
.state
= OUTSIDE_IT_BLOCK
;
15662 if (!now_it
.state_handled
)
15663 handle_it_state ();
15665 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
15669 md_assemble (char *str
)
15672 const struct asm_opcode
* opcode
;
15674 /* Align the previous label if needed. */
15675 if (last_label_seen
!= NULL
)
15677 symbol_set_frag (last_label_seen
, frag_now
);
15678 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
15679 S_SET_SEGMENT (last_label_seen
, now_seg
);
15682 memset (&inst
, '\0', sizeof (inst
));
15683 inst
.reloc
.type
= BFD_RELOC_UNUSED
;
15685 opcode
= opcode_lookup (&p
);
15688 /* It wasn't an instruction, but it might be a register alias of
15689 the form alias .req reg, or a Neon .dn/.qn directive. */
15690 if (! create_register_alias (str
, p
)
15691 && ! create_neon_reg_alias (str
, p
))
15692 as_bad (_("bad instruction `%s'"), str
);
15697 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
15698 as_warn (_("s suffix on comparison instruction is deprecated"));
15700 /* The value which unconditional instructions should have in place of the
15701 condition field. */
15702 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
15706 arm_feature_set variant
;
15708 variant
= cpu_variant
;
15709 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
15710 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
15711 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
15712 /* Check that this instruction is supported for this CPU. */
15713 if (!opcode
->tvariant
15714 || (thumb_mode
== 1
15715 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
15717 as_bad (_("selected processor does not support `%s'"), str
);
15720 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
15721 && opcode
->tencode
!= do_t_branch
)
15723 as_bad (_("Thumb does not support conditional execution"));
15727 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
))
15729 if (opcode
->tencode
!= do_t_blx
&& opcode
->tencode
!= do_t_branch23
15730 && !(ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_msr
)
15731 || ARM_CPU_HAS_FEATURE(*opcode
->tvariant
, arm_ext_barrier
)))
15733 /* Two things are addressed here.
15734 1) Implicit require narrow instructions on Thumb-1.
15735 This avoids relaxation accidentally introducing Thumb-2
15737 2) Reject wide instructions in non Thumb-2 cores. */
15738 if (inst
.size_req
== 0)
15740 else if (inst
.size_req
== 4)
15742 as_bad (_("selected processor does not support `%s'"), str
);
15748 inst
.instruction
= opcode
->tvalue
;
15750 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
15752 /* Prepare the it_insn_type for those encodings that don't set
15754 it_fsm_pre_encode ();
15756 opcode
->tencode ();
15758 it_fsm_post_encode ();
15761 if (!(inst
.error
|| inst
.relax
))
15763 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
15764 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
15765 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
15767 as_bad (_("cannot honor width suffix -- `%s'"), str
);
15772 /* Something has gone badly wrong if we try to relax a fixed size
15774 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
15776 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15777 *opcode
->tvariant
);
15778 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
15779 set those bits when Thumb-2 32-bit instructions are seen. ie.
15780 anything other than bl/blx and v6-M instructions.
15781 This is overly pessimistic for relaxable instructions. */
15782 if (((inst
.size
== 4 && (inst
.instruction
& 0xf800e800) != 0xf000e800)
15784 && !(ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
15785 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
)))
15786 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
15789 check_neon_suffixes
;
15793 mapping_state (MAP_THUMB
);
15796 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
15800 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
15801 is_bx
= (opcode
->aencode
== do_bx
);
15803 /* Check that this instruction is supported for this CPU. */
15804 if (!(is_bx
&& fix_v4bx
)
15805 && !(opcode
->avariant
&&
15806 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
15808 as_bad (_("selected processor does not support `%s'"), str
);
15813 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
15817 inst
.instruction
= opcode
->avalue
;
15818 if (opcode
->tag
== OT_unconditionalF
)
15819 inst
.instruction
|= 0xF << 28;
15821 inst
.instruction
|= inst
.cond
<< 28;
15822 inst
.size
= INSN_SIZE
;
15823 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
15825 it_fsm_pre_encode ();
15826 opcode
->aencode ();
15827 it_fsm_post_encode ();
15829 /* Arm mode bx is marked as both v4T and v5 because it's still required
15830 on a hypothetical non-thumb v5 core. */
15832 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
15834 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
15835 *opcode
->avariant
);
15837 check_neon_suffixes
;
15841 mapping_state (MAP_ARM
);
15846 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
15854 check_it_blocks_finished (void)
15859 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
15860 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
15861 == MANUAL_IT_BLOCK
)
15863 as_warn (_("section '%s' finished with an open IT block."),
15867 if (now_it
.state
== MANUAL_IT_BLOCK
)
15868 as_warn (_("file finished with an open IT block."));
15872 /* Various frobbings of labels and their addresses. */
15875 arm_start_line_hook (void)
15877 last_label_seen
= NULL
;
15881 arm_frob_label (symbolS
* sym
)
15883 last_label_seen
= sym
;
15885 ARM_SET_THUMB (sym
, thumb_mode
);
15887 #if defined OBJ_COFF || defined OBJ_ELF
15888 ARM_SET_INTERWORK (sym
, support_interwork
);
15891 force_automatic_it_block_close ();
15893 /* Note - do not allow local symbols (.Lxxx) to be labelled
15894 as Thumb functions. This is because these labels, whilst
15895 they exist inside Thumb code, are not the entry points for
15896 possible ARM->Thumb calls. Also, these labels can be used
15897 as part of a computed goto or switch statement. eg gcc
15898 can generate code that looks like this:
15900 ldr r2, [pc, .Laaa]
15910 The first instruction loads the address of the jump table.
15911 The second instruction converts a table index into a byte offset.
15912 The third instruction gets the jump address out of the table.
15913 The fourth instruction performs the jump.
15915 If the address stored at .Laaa is that of a symbol which has the
15916 Thumb_Func bit set, then the linker will arrange for this address
15917 to have the bottom bit set, which in turn would mean that the
15918 address computation performed by the third instruction would end
15919 up with the bottom bit set. Since the ARM is capable of unaligned
15920 word loads, the instruction would then load the incorrect address
15921 out of the jump table, and chaos would ensue. */
15922 if (label_is_thumb_function_name
15923 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
15924 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
15926 /* When the address of a Thumb function is taken the bottom
15927 bit of that address should be set. This will allow
15928 interworking between Arm and Thumb functions to work
15931 THUMB_SET_FUNC (sym
, 1);
15933 label_is_thumb_function_name
= FALSE
;
15936 dwarf2_emit_label (sym
);
15940 arm_data_in_code (void)
15942 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
15944 *input_line_pointer
= '/';
15945 input_line_pointer
+= 5;
15946 *input_line_pointer
= 0;
15954 arm_canonicalize_symbol_name (char * name
)
15958 if (thumb_mode
&& (len
= strlen (name
)) > 5
15959 && streq (name
+ len
- 5, "/data"))
15960 *(name
+ len
- 5) = 0;
15965 /* Table of all register names defined by default. The user can
15966 define additional names with .req. Note that all register names
15967 should appear in both upper and lowercase variants. Some registers
15968 also have mixed-case names. */
15970 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
15971 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
15972 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
15973 #define REGSET(p,t) \
15974 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
15975 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
15976 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
15977 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
15978 #define REGSETH(p,t) \
15979 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
15980 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
15981 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
15982 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
15983 #define REGSET2(p,t) \
15984 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
15985 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
15986 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
15987 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
15989 static const struct reg_entry reg_names
[] =
15991 /* ARM integer registers. */
15992 REGSET(r
, RN
), REGSET(R
, RN
),
15994 /* ATPCS synonyms. */
15995 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
15996 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
15997 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
15999 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
16000 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
16001 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
16003 /* Well-known aliases. */
16004 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
16005 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
16007 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
16008 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
16010 /* Coprocessor numbers. */
16011 REGSET(p
, CP
), REGSET(P
, CP
),
16013 /* Coprocessor register numbers. The "cr" variants are for backward
16015 REGSET(c
, CN
), REGSET(C
, CN
),
16016 REGSET(cr
, CN
), REGSET(CR
, CN
),
16018 /* FPA registers. */
16019 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
16020 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
16022 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
16023 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
16025 /* VFP SP registers. */
16026 REGSET(s
,VFS
), REGSET(S
,VFS
),
16027 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
16029 /* VFP DP Registers. */
16030 REGSET(d
,VFD
), REGSET(D
,VFD
),
16031 /* Extra Neon DP registers. */
16032 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
16034 /* Neon QP registers. */
16035 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
16037 /* VFP control registers. */
16038 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
16039 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
16040 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
16041 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
16042 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
16043 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
16045 /* Maverick DSP coprocessor registers. */
16046 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
16047 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
16049 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
16050 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
16051 REGDEF(dspsc
,0,DSPSC
),
16053 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
16054 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
16055 REGDEF(DSPSC
,0,DSPSC
),
16057 /* iWMMXt data registers - p0, c0-15. */
16058 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
16060 /* iWMMXt control registers - p1, c0-3. */
16061 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
16062 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
16063 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
16064 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
16066 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
16067 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
16068 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
16069 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
16070 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
16072 /* XScale accumulator registers. */
16073 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
16079 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
16080 within psr_required_here. */
16081 static const struct asm_psr psrs
[] =
16083 /* Backward compatibility notation. Note that "all" is no longer
16084 truly all possible PSR bits. */
16085 {"all", PSR_c
| PSR_f
},
16089 /* Individual flags. */
16094 /* Combinations of flags. */
16095 {"fs", PSR_f
| PSR_s
},
16096 {"fx", PSR_f
| PSR_x
},
16097 {"fc", PSR_f
| PSR_c
},
16098 {"sf", PSR_s
| PSR_f
},
16099 {"sx", PSR_s
| PSR_x
},
16100 {"sc", PSR_s
| PSR_c
},
16101 {"xf", PSR_x
| PSR_f
},
16102 {"xs", PSR_x
| PSR_s
},
16103 {"xc", PSR_x
| PSR_c
},
16104 {"cf", PSR_c
| PSR_f
},
16105 {"cs", PSR_c
| PSR_s
},
16106 {"cx", PSR_c
| PSR_x
},
16107 {"fsx", PSR_f
| PSR_s
| PSR_x
},
16108 {"fsc", PSR_f
| PSR_s
| PSR_c
},
16109 {"fxs", PSR_f
| PSR_x
| PSR_s
},
16110 {"fxc", PSR_f
| PSR_x
| PSR_c
},
16111 {"fcs", PSR_f
| PSR_c
| PSR_s
},
16112 {"fcx", PSR_f
| PSR_c
| PSR_x
},
16113 {"sfx", PSR_s
| PSR_f
| PSR_x
},
16114 {"sfc", PSR_s
| PSR_f
| PSR_c
},
16115 {"sxf", PSR_s
| PSR_x
| PSR_f
},
16116 {"sxc", PSR_s
| PSR_x
| PSR_c
},
16117 {"scf", PSR_s
| PSR_c
| PSR_f
},
16118 {"scx", PSR_s
| PSR_c
| PSR_x
},
16119 {"xfs", PSR_x
| PSR_f
| PSR_s
},
16120 {"xfc", PSR_x
| PSR_f
| PSR_c
},
16121 {"xsf", PSR_x
| PSR_s
| PSR_f
},
16122 {"xsc", PSR_x
| PSR_s
| PSR_c
},
16123 {"xcf", PSR_x
| PSR_c
| PSR_f
},
16124 {"xcs", PSR_x
| PSR_c
| PSR_s
},
16125 {"cfs", PSR_c
| PSR_f
| PSR_s
},
16126 {"cfx", PSR_c
| PSR_f
| PSR_x
},
16127 {"csf", PSR_c
| PSR_s
| PSR_f
},
16128 {"csx", PSR_c
| PSR_s
| PSR_x
},
16129 {"cxf", PSR_c
| PSR_x
| PSR_f
},
16130 {"cxs", PSR_c
| PSR_x
| PSR_s
},
16131 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
16132 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
16133 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
16134 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
16135 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
16136 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
16137 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
16138 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
16139 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
16140 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
16141 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
16142 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
16143 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
16144 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
16145 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
16146 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
16147 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
16148 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
16149 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
16150 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
16151 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
16152 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
16153 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
16154 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
16157 /* Table of V7M psr names. */
16158 static const struct asm_psr v7m_psrs
[] =
16160 {"apsr", 0 }, {"APSR", 0 },
16161 {"iapsr", 1 }, {"IAPSR", 1 },
16162 {"eapsr", 2 }, {"EAPSR", 2 },
16163 {"psr", 3 }, {"PSR", 3 },
16164 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
16165 {"ipsr", 5 }, {"IPSR", 5 },
16166 {"epsr", 6 }, {"EPSR", 6 },
16167 {"iepsr", 7 }, {"IEPSR", 7 },
16168 {"msp", 8 }, {"MSP", 8 },
16169 {"psp", 9 }, {"PSP", 9 },
16170 {"primask", 16}, {"PRIMASK", 16},
16171 {"basepri", 17}, {"BASEPRI", 17},
16172 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
16173 {"faultmask", 19}, {"FAULTMASK", 19},
16174 {"control", 20}, {"CONTROL", 20}
16177 /* Table of all shift-in-operand names. */
16178 static const struct asm_shift_name shift_names
[] =
16180 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
16181 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
16182 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
16183 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
16184 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
16185 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
16188 /* Table of all explicit relocation names. */
16190 static struct reloc_entry reloc_names
[] =
16192 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
16193 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
16194 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
16195 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
16196 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
16197 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
16198 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
16199 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
16200 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
16201 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
16202 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
16203 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
}
16207 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
16208 static const struct asm_cond conds
[] =
16212 {"cs", 0x2}, {"hs", 0x2},
16213 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
16227 static struct asm_barrier_opt barrier_opt_names
[] =
16235 /* Table of ARM-format instructions. */
16237 /* Macros for gluing together operand strings. N.B. In all cases
16238 other than OPS0, the trailing OP_stop comes from default
16239 zero-initialization of the unspecified elements of the array. */
16240 #define OPS0() { OP_stop, }
16241 #define OPS1(a) { OP_##a, }
16242 #define OPS2(a,b) { OP_##a,OP_##b, }
16243 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
16244 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
16245 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
16246 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
16248 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
16249 This is useful when mixing operands for ARM and THUMB, i.e. using the
16250 MIX_ARM_THUMB_OPERANDS macro.
16251 In order to use these macros, prefix the number of operands with _
16253 #define OPS_1(a) { a, }
16254 #define OPS_2(a,b) { a,b, }
16255 #define OPS_3(a,b,c) { a,b,c, }
16256 #define OPS_4(a,b,c,d) { a,b,c,d, }
16257 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
16258 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
16260 /* These macros abstract out the exact format of the mnemonic table and
16261 save some repeated characters. */
16263 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
16264 #define TxCE(mnem, op, top, nops, ops, ae, te) \
16265 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
16266 THUMB_VARIANT, do_##ae, do_##te }
16268 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
16269 a T_MNEM_xyz enumerator. */
16270 #define TCE(mnem, aop, top, nops, ops, ae, te) \
16271 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
16272 #define tCE(mnem, aop, top, nops, ops, ae, te) \
16273 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16275 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
16276 infix after the third character. */
16277 #define TxC3(mnem, op, top, nops, ops, ae, te) \
16278 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
16279 THUMB_VARIANT, do_##ae, do_##te }
16280 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
16281 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
16282 THUMB_VARIANT, do_##ae, do_##te }
16283 #define TC3(mnem, aop, top, nops, ops, ae, te) \
16284 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
16285 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
16286 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
16287 #define tC3(mnem, aop, top, nops, ops, ae, te) \
16288 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16289 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
16290 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
16292 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
16293 appear in the condition table. */
16294 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
16295 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16296 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
16298 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
16299 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
16300 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
16301 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
16302 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
16303 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
16304 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
16305 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
16306 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
16307 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
16308 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
16309 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
16310 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
16311 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
16312 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
16313 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
16314 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
16315 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
16316 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
16317 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
16319 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
16320 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
16321 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
16322 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
16324 /* Mnemonic that cannot be conditionalized. The ARM condition-code
16325 field is still 0xE. Many of the Thumb variants can be executed
16326 conditionally, so this is checked separately. */
16327 #define TUE(mnem, op, top, nops, ops, ae, te) \
16328 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
16329 THUMB_VARIANT, do_##ae, do_##te }
16331 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
16332 condition code field. */
16333 #define TUF(mnem, op, top, nops, ops, ae, te) \
16334 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
16335 THUMB_VARIANT, do_##ae, do_##te }
16337 /* ARM-only variants of all the above. */
16338 #define CE(mnem, op, nops, ops, ae) \
16339 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16341 #define C3(mnem, op, nops, ops, ae) \
16342 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16344 /* Legacy mnemonics that always have conditional infix after the third
16346 #define CL(mnem, op, nops, ops, ae) \
16347 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16348 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16350 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
16351 #define cCE(mnem, op, nops, ops, ae) \
16352 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16354 /* Legacy coprocessor instructions where conditional infix and conditional
16355 suffix are ambiguous. For consistency this includes all FPA instructions,
16356 not just the potentially ambiguous ones. */
16357 #define cCL(mnem, op, nops, ops, ae) \
16358 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
16359 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16361 /* Coprocessor, takes either a suffix or a position-3 infix
16362 (for an FPA corner case). */
16363 #define C3E(mnem, op, nops, ops, ae) \
16364 { mnem, OPS##nops ops, OT_csuf_or_in3, \
16365 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
16367 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
16368 { m1 #m2 m3, OPS##nops ops, \
16369 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
16370 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
16372 #define CM(m1, m2, op, nops, ops, ae) \
16373 xCM_ (m1, , m2, op, nops, ops, ae), \
16374 xCM_ (m1, eq, m2, op, nops, ops, ae), \
16375 xCM_ (m1, ne, m2, op, nops, ops, ae), \
16376 xCM_ (m1, cs, m2, op, nops, ops, ae), \
16377 xCM_ (m1, hs, m2, op, nops, ops, ae), \
16378 xCM_ (m1, cc, m2, op, nops, ops, ae), \
16379 xCM_ (m1, ul, m2, op, nops, ops, ae), \
16380 xCM_ (m1, lo, m2, op, nops, ops, ae), \
16381 xCM_ (m1, mi, m2, op, nops, ops, ae), \
16382 xCM_ (m1, pl, m2, op, nops, ops, ae), \
16383 xCM_ (m1, vs, m2, op, nops, ops, ae), \
16384 xCM_ (m1, vc, m2, op, nops, ops, ae), \
16385 xCM_ (m1, hi, m2, op, nops, ops, ae), \
16386 xCM_ (m1, ls, m2, op, nops, ops, ae), \
16387 xCM_ (m1, ge, m2, op, nops, ops, ae), \
16388 xCM_ (m1, lt, m2, op, nops, ops, ae), \
16389 xCM_ (m1, gt, m2, op, nops, ops, ae), \
16390 xCM_ (m1, le, m2, op, nops, ops, ae), \
16391 xCM_ (m1, al, m2, op, nops, ops, ae)
16393 #define UE(mnem, op, nops, ops, ae) \
16394 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16396 #define UF(mnem, op, nops, ops, ae) \
16397 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
16399 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
16400 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
16401 use the same encoding function for each. */
16402 #define NUF(mnem, op, nops, ops, enc) \
16403 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
16404 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16406 /* Neon data processing, version which indirects through neon_enc_tab for
16407 the various overloaded versions of opcodes. */
16408 #define nUF(mnem, op, nops, ops, enc) \
16409 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
16410 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16412 /* Neon insn with conditional suffix for the ARM version, non-overloaded
16414 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
16415 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
16416 THUMB_VARIANT, do_##enc, do_##enc }
16418 #define NCE(mnem, op, nops, ops, enc) \
16419 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16421 #define NCEF(mnem, op, nops, ops, enc) \
16422 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16424 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
16425 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
16426 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
16427 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
16429 #define nCE(mnem, op, nops, ops, enc) \
16430 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
16432 #define nCEF(mnem, op, nops, ops, enc) \
16433 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
16437 /* Thumb-only, unconditional. */
16438 #define UT(mnem, op, nops, ops, te) TUE (mnem, 0, op, nops, ops, 0, te)
16440 static const struct asm_opcode insns
[] =
16442 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
16443 #define THUMB_VARIANT &arm_ext_v4t
16444 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16445 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16446 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16447 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16448 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16449 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
16450 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16451 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
16452 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16453 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16454 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16455 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16456 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16457 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
16458 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16459 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
16461 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
16462 for setting PSR flag bits. They are obsolete in V6 and do not
16463 have Thumb equivalents. */
16464 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16465 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16466 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
16467 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16468 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
16469 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
16470 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16471 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16472 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
16474 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16475 tC3("movs", 1b00000
, _movs
, 2, (RR
, SH
), mov
, t_mov_cmp
),
16476 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16477 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
16479 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
16480 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
16481 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
16483 OP_ADDRGLDR
),ldst
, t_ldst
),
16484 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
16486 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16487 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16488 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16489 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16490 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16491 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16493 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16494 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
16495 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
16496 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
16499 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
16500 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
16501 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
16503 /* Thumb-compatibility pseudo ops. */
16504 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16505 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16506 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16507 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16508 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16509 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16510 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16511 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
16512 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
16513 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
16514 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
16515 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
16517 /* These may simplify to neg. */
16518 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16519 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
16521 #undef THUMB_VARIANT
16522 #define THUMB_VARIANT & arm_ext_v6
16524 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
16526 /* V1 instructions with no Thumb analogue prior to V6T2. */
16527 #undef THUMB_VARIANT
16528 #define THUMB_VARIANT & arm_ext_v6t2
16530 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16531 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
16532 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
16534 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16535 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16536 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
16537 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
16539 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16540 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16542 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16543 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
16545 /* V1 instructions with no Thumb analogue at all. */
16546 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
16547 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
16549 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16550 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
16551 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16552 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
16553 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16554 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
16555 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16556 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
16559 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
16560 #undef THUMB_VARIANT
16561 #define THUMB_VARIANT & arm_ext_v4t
16563 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16564 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
16566 #undef THUMB_VARIANT
16567 #define THUMB_VARIANT & arm_ext_v6t2
16569 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
16570 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
16572 /* Generic coprocessor instructions. */
16573 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16574 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16575 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16576 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16577 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16578 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16579 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16582 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
16584 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16585 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
16588 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
16589 #undef THUMB_VARIANT
16590 #define THUMB_VARIANT & arm_ext_msr
16592 TCE("mrs", 10f0000
, f3ef8000
, 2, (APSR_RR
, RVC_PSR
), mrs
, t_mrs
),
16593 TCE("msr", 120f000
, f3808000
, 2, (RVC_PSR
, RR_EXi
), msr
, t_msr
),
16596 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
16597 #undef THUMB_VARIANT
16598 #define THUMB_VARIANT & arm_ext_v6t2
16600 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16601 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16602 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16603 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16604 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16605 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16606 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
16607 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
16610 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
16611 #undef THUMB_VARIANT
16612 #define THUMB_VARIANT & arm_ext_v4t
16614 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16615 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16616 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16617 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16618 tCM("ld","sh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16619 tCM("ld","sb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
16622 #define ARM_VARIANT & arm_ext_v4t_5
16624 /* ARM Architecture 4T. */
16625 /* Note: bx (and blx) are required on V5, even if the processor does
16626 not support Thumb. */
16627 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
16630 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
16631 #undef THUMB_VARIANT
16632 #define THUMB_VARIANT & arm_ext_v5t
16634 /* Note: blx has 2 variants; the .value coded here is for
16635 BLX(2). Only this variant has conditional execution. */
16636 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
16637 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
16639 #undef THUMB_VARIANT
16640 #define THUMB_VARIANT & arm_ext_v6t2
16642 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
16643 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16644 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16645 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16646 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
16647 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
16648 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16649 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
16652 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
16653 #undef THUMB_VARIANT
16654 #define THUMB_VARIANT &arm_ext_v5exp
16656 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16657 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16658 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16659 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16661 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16662 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
16664 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16665 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16666 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16667 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
16669 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16670 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16671 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16672 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16674 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16675 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16677 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16678 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16679 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16680 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
16683 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
16684 #undef THUMB_VARIANT
16685 #define THUMB_VARIANT &arm_ext_v6t2
16687 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
16688 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
16690 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
16691 ADDRGLDRS
), ldrd
, t_ldstd
),
16693 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16694 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16697 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
16699 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
16702 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
16703 #undef THUMB_VARIANT
16704 #define THUMB_VARIANT & arm_ext_v6
16706 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16707 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
16708 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16709 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16710 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
16711 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16712 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16713 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16714 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16715 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
16717 #undef THUMB_VARIANT
16718 #define THUMB_VARIANT & arm_ext_v6t2
16720 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
16721 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
16723 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16724 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
16726 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
16727 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
16729 /* ARM V6 not included in V7M. */
16730 #undef THUMB_VARIANT
16731 #define THUMB_VARIANT & arm_ext_v6_notm
16732 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16733 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
16734 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
16735 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16736 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
16737 UF(rfefa
, 9900a00
, 1, (RRw
), rfe
),
16738 UF(rfeea
, 8100a00
, 1, (RRw
), rfe
),
16739 TUF("rfeed", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
16740 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
16741 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
16742 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
16743 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
16745 /* ARM V6 not included in V7M (eg. integer SIMD). */
16746 #undef THUMB_VARIANT
16747 #define THUMB_VARIANT & arm_ext_v6_dsp
16748 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
16749 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
16750 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
16751 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16752 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16753 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16754 /* Old name for QASX. */
16755 TCE("qaddsubx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16756 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16757 /* Old name for QSAX. */
16758 TCE("qsubaddx", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16759 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16760 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16761 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16762 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16763 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16764 /* Old name for SASX. */
16765 TCE("saddsubx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16766 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16767 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16768 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16769 /* Old name for SHASX. */
16770 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16771 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16772 /* Old name for SHSAX. */
16773 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16774 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16775 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16776 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16777 /* Old name for SSAX. */
16778 TCE("ssubaddx", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16779 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16780 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16781 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16782 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16783 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16784 /* Old name for UASX. */
16785 TCE("uaddsubx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16786 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16787 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16788 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16789 /* Old name for UHASX. */
16790 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16791 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16792 /* Old name for UHSAX. */
16793 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16794 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16795 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16796 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16797 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16798 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16799 /* Old name for UQASX. */
16800 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16801 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16802 /* Old name for UQSAX. */
16803 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16804 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16805 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16806 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16807 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16808 /* Old name for USAX. */
16809 TCE("usubaddx", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16810 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16811 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16812 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16813 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16814 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16815 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16816 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16817 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
16818 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
16819 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
16820 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16821 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16822 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16823 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16824 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16825 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16826 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16827 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
16828 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16829 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16830 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16831 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16832 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16833 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16834 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16835 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16836 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16837 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16838 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
16839 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
16840 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
16841 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
16842 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
16845 #define ARM_VARIANT & arm_ext_v6k
16846 #undef THUMB_VARIANT
16847 #define THUMB_VARIANT & arm_ext_v6k
16849 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
16850 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
16851 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
16852 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
16854 #undef THUMB_VARIANT
16855 #define THUMB_VARIANT & arm_ext_v6_notm
16856 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
16858 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
16859 RRnpcb
), strexd
, t_strexd
),
16861 #undef THUMB_VARIANT
16862 #define THUMB_VARIANT & arm_ext_v6t2
16863 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
16865 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
16867 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
16869 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
16871 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
16874 #define ARM_VARIANT & arm_ext_v6z
16876 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
16879 #define ARM_VARIANT & arm_ext_v6t2
16881 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
16882 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
16883 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
16884 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
16886 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
16887 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
16888 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
16889 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
16891 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
16892 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
16893 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
16894 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
16896 UT("cbnz", b900
, 2, (RR
, EXP
), t_cbz
),
16897 UT("cbz", b100
, 2, (RR
, EXP
), t_cbz
),
16899 /* ARM does not really have an IT instruction, so always allow it.
16900 The opcode is copied from Thumb in order to allow warnings in
16901 -mimplicit-it=[never | arm] modes. */
16903 #define ARM_VARIANT & arm_ext_v1
16905 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
16906 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
16907 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
16908 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
16909 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
16910 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
16911 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
16912 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
16913 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
16914 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
16915 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
16916 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
16917 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
16918 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
16919 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
16920 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
16921 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
16922 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
16924 /* Thumb2 only instructions. */
16926 #define ARM_VARIANT NULL
16928 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
16929 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
16930 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
16931 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
16932 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
16933 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
16935 /* Thumb-2 hardware division instructions (R and M profiles only). */
16936 #undef THUMB_VARIANT
16937 #define THUMB_VARIANT & arm_ext_div
16939 TCE("sdiv", 0, fb90f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
16940 TCE("udiv", 0, fbb0f0f0
, 3, (RR
, oRR
, RR
), 0, t_div
),
16942 /* ARM V6M/V7 instructions. */
16944 #define ARM_VARIANT & arm_ext_barrier
16945 #undef THUMB_VARIANT
16946 #define THUMB_VARIANT & arm_ext_barrier
16948 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER
), barrier
, t_barrier
),
16949 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER
), barrier
, t_barrier
),
16950 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER
), barrier
, t_barrier
),
16952 /* ARM V7 instructions. */
16954 #define ARM_VARIANT & arm_ext_v7
16955 #undef THUMB_VARIANT
16956 #define THUMB_VARIANT & arm_ext_v7
16958 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
16959 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
16962 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
16964 cCE("wfs", e200110
, 1, (RR
), rd
),
16965 cCE("rfs", e300110
, 1, (RR
), rd
),
16966 cCE("wfc", e400110
, 1, (RR
), rd
),
16967 cCE("rfc", e500110
, 1, (RR
), rd
),
16969 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16970 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16971 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16972 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16974 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16975 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16976 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16977 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
16979 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
16980 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
16981 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
16982 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
16983 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
16984 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
16985 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
16986 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
16987 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
16988 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
16989 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
16990 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
16992 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
16993 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
16994 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
16995 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
16996 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
16997 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
16998 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
16999 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
17000 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
17001 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
17002 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
17003 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
17005 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
17006 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
17007 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
17008 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
17009 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
17010 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
17011 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
17012 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
17013 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
17014 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
17015 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
17016 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
17018 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
17019 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
17020 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
17021 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
17022 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
17023 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
17024 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
17025 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
17026 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
17027 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
17028 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
17029 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
17031 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
17032 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
17033 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
17034 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
17035 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
17036 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
17037 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
17038 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
17039 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
17040 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
17041 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
17042 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
17044 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
17045 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
17046 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
17047 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
17048 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
17049 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
17050 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
17051 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
17052 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
17053 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
17054 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
17055 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
17057 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
17058 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
17059 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
17060 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
17061 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
17062 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
17063 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
17064 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
17065 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
17066 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
17067 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
17068 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
17070 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
17071 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
17072 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
17073 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
17074 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
17075 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
17076 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
17077 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
17078 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
17079 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
17080 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
17081 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
17083 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
17084 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
17085 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
17086 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
17087 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
17088 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
17089 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
17090 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
17091 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
17092 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
17093 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
17094 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
17096 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
17097 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
17098 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
17099 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
17100 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
17101 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
17102 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
17103 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
17104 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
17105 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
17106 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
17107 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
17109 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
17110 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
17111 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
17112 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
17113 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
17114 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
17115 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
17116 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
17117 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
17118 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
17119 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
17120 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
17122 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
17123 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
17124 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
17125 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
17126 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
17127 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
17128 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
17129 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
17130 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
17131 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
17132 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
17133 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
17135 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
17136 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
17137 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
17138 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
17139 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
17140 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
17141 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
17142 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
17143 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
17144 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
17145 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
17146 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
17148 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
17149 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
17150 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
17151 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
17152 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
17153 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
17154 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
17155 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
17156 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
17157 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
17158 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
17159 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
17161 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
17162 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
17163 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
17164 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
17165 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
17166 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
17167 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
17168 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
17169 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
17170 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
17171 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
17172 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
17174 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
17175 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
17176 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
17177 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
17178 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
17179 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
17180 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
17181 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
17182 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
17183 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
17184 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
17185 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
17187 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17188 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17189 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17190 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17191 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17192 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17193 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17194 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17195 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17196 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17197 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17198 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17200 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17201 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17202 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17203 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17204 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17205 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17206 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17207 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17208 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17209 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17210 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17211 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17213 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17214 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17215 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17216 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17217 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17218 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17219 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17220 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17221 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17222 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17223 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17224 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17226 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17227 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17228 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17229 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17230 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17231 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17232 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17233 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17234 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17235 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17236 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17237 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17239 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17240 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17241 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17242 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17243 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17244 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17245 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17246 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17247 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17248 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17249 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17250 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17252 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17253 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17254 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17255 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17256 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17257 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17258 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17259 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17260 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17261 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17262 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17263 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17265 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17266 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17267 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17268 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17269 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17270 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17271 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17272 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17273 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17274 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17275 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17276 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17278 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17279 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17280 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17281 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17282 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17283 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17284 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17285 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17286 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17287 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17288 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17289 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17291 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17292 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17293 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17294 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17295 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17296 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17297 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17298 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17299 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17300 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17301 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17302 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17304 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17305 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17306 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17307 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17308 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17309 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17310 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17311 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17312 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17313 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17314 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17315 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17317 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17318 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17319 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17320 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17321 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17322 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17323 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17324 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17325 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17326 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17327 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17328 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17330 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17331 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17332 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17333 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17334 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17335 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17336 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17337 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17338 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17339 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17340 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17341 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17343 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17344 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17345 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17346 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17347 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17348 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17349 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17350 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17351 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17352 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17353 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17354 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
17356 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17357 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17358 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17359 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
17361 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
17362 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
17363 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
17364 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
17365 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
17366 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
17367 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
17368 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
17369 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
17370 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
17371 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
17372 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
17374 /* The implementation of the FIX instruction is broken on some
17375 assemblers, in that it accepts a precision specifier as well as a
17376 rounding specifier, despite the fact that this is meaningless.
17377 To be more compatible, we accept it as well, though of course it
17378 does not set any bits. */
17379 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
17380 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
17381 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
17382 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
17383 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
17384 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
17385 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
17386 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
17387 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
17388 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
17389 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
17390 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
17391 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
17393 /* Instructions that were new with the real FPA, call them V2. */
17395 #define ARM_VARIANT & fpu_fpa_ext_v2
17397 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17398 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17399 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17400 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17401 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17402 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
17405 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
17407 /* Moves and type conversions. */
17408 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17409 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
17410 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
17411 cCE("fmstat", ef1fa10
, 0, (), noargs
),
17412 cCE("vmrs", ef10a10
, 2, (APSR_RR
, RVC
), vmrs
),
17413 cCE("vmsr", ee10a10
, 2, (RVC
, RR
), vmsr
),
17414 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17415 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17416 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17417 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17418 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17419 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17420 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
17421 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
17423 /* Memory operations. */
17424 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17425 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
17426 cCE("fldmias", c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17427 cCE("fldmfds", c900a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17428 cCE("fldmdbs", d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17429 cCE("fldmeas", d300a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17430 cCE("fldmiax", c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17431 cCE("fldmfdx", c900b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17432 cCE("fldmdbx", d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17433 cCE("fldmeax", d300b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17434 cCE("fstmias", c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17435 cCE("fstmeas", c800a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmia
),
17436 cCE("fstmdbs", d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17437 cCE("fstmfds", d200a00
, 2, (RRw
, VRSLST
), vfp_sp_ldstmdb
),
17438 cCE("fstmiax", c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17439 cCE("fstmeax", c800b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmia
),
17440 cCE("fstmdbx", d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17441 cCE("fstmfdx", d200b00
, 2, (RRw
, VRDLST
), vfp_xp_ldstmdb
),
17443 /* Monadic operations. */
17444 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17445 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17446 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17448 /* Dyadic operations. */
17449 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17450 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17451 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17452 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17453 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17454 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17455 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17456 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17457 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17460 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17461 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
17462 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
17463 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
17465 /* Double precision load/store are still present on single precision
17466 implementations. */
17467 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17468 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
17469 cCE("fldmiad", c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17470 cCE("fldmfdd", c900b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17471 cCE("fldmdbd", d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17472 cCE("fldmead", d300b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17473 cCE("fstmiad", c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17474 cCE("fstmead", c800b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmia
),
17475 cCE("fstmdbd", d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17476 cCE("fstmfdd", d200b00
, 2, (RRw
, VRDLST
), vfp_dp_ldstmdb
),
17479 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
17481 /* Moves and type conversions. */
17482 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17483 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17484 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17485 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17486 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
17487 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17488 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
17489 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17490 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
17491 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17492 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17493 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17494 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
17496 /* Monadic operations. */
17497 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17498 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17499 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17501 /* Dyadic operations. */
17502 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17503 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17504 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17505 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17506 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17507 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17508 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17509 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17510 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17513 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17514 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
17515 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
17516 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
17519 #define ARM_VARIANT & fpu_vfp_ext_v2
17521 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
17522 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
17523 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
17524 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
17526 /* Instructions which may belong to either the Neon or VFP instruction sets.
17527 Individual encoder functions perform additional architecture checks. */
17529 #define ARM_VARIANT & fpu_vfp_ext_v1xd
17530 #undef THUMB_VARIANT
17531 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
17533 /* These mnemonics are unique to VFP. */
17534 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
17535 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
17536 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17537 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17538 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17539 nCE(vcmp
, _vcmp
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17540 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RVSD_I0
), vfp_nsyn_cmp
),
17541 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
17542 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
17543 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
17545 /* Mnemonics shared by Neon and VFP. */
17546 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
17547 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17548 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
17550 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17551 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
17553 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17554 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
17556 NCE(vldm
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17557 NCE(vldmia
, c900b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17558 NCE(vldmdb
, d100b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17559 NCE(vstm
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17560 NCE(vstmia
, c800b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17561 NCE(vstmdb
, d000b00
, 2, (RRw
, VRSDLST
), neon_ldm_stm
),
17562 NCE(vldr
, d100b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17563 NCE(vstr
, d000b00
, 2, (RVSD
, ADDRGLDC
), neon_ldr_str
),
17565 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32b
), neon_cvt
),
17566 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
17567 nCEF(vcvtb
, _vcvt
, 2, (RVS
, RVS
), neon_cvtb
),
17568 nCEF(vcvtt
, _vcvt
, 2, (RVS
, RVS
), neon_cvtt
),
17571 /* NOTE: All VMOV encoding is special-cased! */
17572 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
17573 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
17575 #undef THUMB_VARIANT
17576 #define THUMB_VARIANT & fpu_neon_ext_v1
17578 #define ARM_VARIANT & fpu_neon_ext_v1
17580 /* Data processing with three registers of the same length. */
17581 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
17582 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
17583 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
17584 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17585 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17586 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17587 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17588 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
17589 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
17590 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
17591 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17592 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17593 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
17594 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
17595 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17596 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17597 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
17598 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
17599 /* If not immediate, fall back to neon_dyadic_i64_su.
17600 shl_imm should accept I8 I16 I32 I64,
17601 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
17602 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
17603 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
17604 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
17605 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
17606 /* Logic ops, types optional & ignored. */
17607 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17608 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17609 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17610 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17611 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17612 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17613 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
17614 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
17615 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
17616 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
17617 /* Bitfield ops, untyped. */
17618 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17619 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17620 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17621 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17622 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
17623 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
17624 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
17625 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17626 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17627 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17628 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17629 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
17630 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
17631 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
17632 back to neon_dyadic_if_su. */
17633 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17634 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17635 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
17636 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
17637 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17638 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17639 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
17640 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
17641 /* Comparison. Type I8 I16 I32 F32. */
17642 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
17643 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
17644 /* As above, D registers only. */
17645 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17646 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
17647 /* Int and float variants, signedness unimportant. */
17648 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17649 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
17650 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
17651 /* Add/sub take types I8 I16 I32 I64 F32. */
17652 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17653 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
17654 /* vtst takes sizes 8, 16, 32. */
17655 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
17656 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
17657 /* VMUL takes I8 I16 I32 F32 P8. */
17658 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
17659 /* VQD{R}MULH takes S16 S32. */
17660 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17661 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17662 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
17663 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
17664 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17665 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17666 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
17667 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
17668 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17669 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17670 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
17671 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
17672 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17673 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17674 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
17675 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
17677 /* Two address, int/float. Types S8 S16 S32 F32. */
17678 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17679 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
17681 /* Data processing with two registers and a shift amount. */
17682 /* Right shifts, and variants with rounding.
17683 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
17684 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17685 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17686 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
17687 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
17688 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17689 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17690 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
17691 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
17692 /* Shift and insert. Sizes accepted 8 16 32 64. */
17693 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
17694 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
17695 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
17696 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
17697 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
17698 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
17699 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
17700 /* Right shift immediate, saturating & narrowing, with rounding variants.
17701 Types accepted S16 S32 S64 U16 U32 U64. */
17702 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17703 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
17704 /* As above, unsigned. Types accepted S16 S32 S64. */
17705 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17706 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
17707 /* Right shift narrowing. Types accepted I16 I32 I64. */
17708 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17709 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
17710 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
17711 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
17712 /* CVT with optional immediate for fixed-point variant. */
17713 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
17715 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
17716 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
17718 /* Data processing, three registers of different lengths. */
17719 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
17720 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
17721 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17722 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17723 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
17724 /* If not scalar, fall back to neon_dyadic_long.
17725 Vector types as above, scalar types S16 S32 U16 U32. */
17726 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17727 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
17728 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
17729 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17730 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
17731 /* Dyadic, narrowing insns. Types I16 I32 I64. */
17732 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17733 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17734 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17735 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
17736 /* Saturating doubling multiplies. Types S16 S32. */
17737 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17738 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17739 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
17740 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
17741 S16 S32 U16 U32. */
17742 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
17744 /* Extract. Size 8. */
17745 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
17746 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
17748 /* Two registers, miscellaneous. */
17749 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
17750 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
17751 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
17752 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
17753 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
17754 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
17755 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
17756 /* Vector replicate. Sizes 8 16 32. */
17757 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
17758 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
17759 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
17760 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
17761 /* VMOVN. Types I16 I32 I64. */
17762 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
17763 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
17764 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
17765 /* VQMOVUN. Types S16 S32 S64. */
17766 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
17767 /* VZIP / VUZP. Sizes 8 16 32. */
17768 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17769 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17770 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
17771 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
17772 /* VQABS / VQNEG. Types S8 S16 S32. */
17773 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
17774 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
17775 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
17776 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
17777 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
17778 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
17779 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
17780 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
17781 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
17782 /* Reciprocal estimates. Types U32 F32. */
17783 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
17784 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
17785 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
17786 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
17787 /* VCLS. Types S8 S16 S32. */
17788 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
17789 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
17790 /* VCLZ. Types I8 I16 I32. */
17791 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
17792 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
17793 /* VCNT. Size 8. */
17794 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
17795 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
17796 /* Two address, untyped. */
17797 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
17798 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
17799 /* VTRN. Sizes 8 16 32. */
17800 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
17801 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
17803 /* Table lookup. Size 8. */
17804 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
17805 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
17807 #undef THUMB_VARIANT
17808 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
17810 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
17812 /* Neon element/structure load/store. */
17813 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17814 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17815 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17816 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17817 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17818 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17819 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17820 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
17822 #undef THUMB_VARIANT
17823 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
17825 #define ARM_VARIANT &fpu_vfp_ext_v3xd
17826 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
17827 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17828 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17829 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17830 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17831 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17832 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17833 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
17834 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
17836 #undef THUMB_VARIANT
17837 #define THUMB_VARIANT & fpu_vfp_ext_v3
17839 #define ARM_VARIANT & fpu_vfp_ext_v3
17841 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
17842 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17843 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17844 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17845 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17846 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17847 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17848 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
17849 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
17852 #define ARM_VARIANT &fpu_vfp_ext_fma
17853 #undef THUMB_VARIANT
17854 #define THUMB_VARIANT &fpu_vfp_ext_fma
17855 /* Mnemonics shared by Neon and VFP. These are included in the
17856 VFP FMA variant; NEON and VFP FMA always includes the NEON
17857 FMA instructions. */
17858 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
17859 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
17860 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
17861 the v form should always be used. */
17862 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17863 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
17864 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17865 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
17866 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17867 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
17869 #undef THUMB_VARIANT
17871 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
17873 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17874 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17875 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17876 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17877 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17878 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
17879 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
17880 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
17883 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
17885 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
17886 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
17887 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
17888 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
17889 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
17890 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
17891 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
17892 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
17893 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
17894 cCE("textrmub", e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17895 cCE("textrmuh", e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17896 cCE("textrmuw", e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17897 cCE("textrmsb", e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17898 cCE("textrmsh", e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17899 cCE("textrmsw", e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
17900 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17901 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17902 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
17903 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
17904 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
17905 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17906 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17907 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17908 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17909 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17910 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
17911 cCE("tmovmskb", e100030
, 2, (RR
, RIWR
), rd_rn
),
17912 cCE("tmovmskh", e500030
, 2, (RR
, RIWR
), rd_rn
),
17913 cCE("tmovmskw", e900030
, 2, (RR
, RIWR
), rd_rn
),
17914 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
17915 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
17916 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
17917 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
17918 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
17919 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17920 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17921 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
17922 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17923 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17924 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17925 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17926 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17927 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17928 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17929 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17930 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17931 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
17932 cCE("walignr0", e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17933 cCE("walignr1", e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17934 cCE("walignr2", ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17935 cCE("walignr3", eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17936 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17937 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17938 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17939 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17940 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17941 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17942 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17943 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17944 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17945 cCE("wcmpgtub", e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17946 cCE("wcmpgtuh", e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17947 cCE("wcmpgtuw", e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17948 cCE("wcmpgtsb", e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17949 cCE("wcmpgtsh", e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17950 cCE("wcmpgtsw", eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17951 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17952 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
17953 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
17954 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
17955 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17956 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17957 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17958 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17959 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17960 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17961 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17962 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17963 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17964 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17965 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17966 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17967 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17968 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17969 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17970 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17971 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17972 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17973 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
17974 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17975 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17976 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17977 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17978 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17979 cCE("wpackhss", e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17980 cCE("wpackhus", e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17981 cCE("wpackwss", eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17982 cCE("wpackwus", e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17983 cCE("wpackdss", ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17984 cCE("wpackdus", ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17985 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17986 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17987 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17988 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17989 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17990 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17991 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17992 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17993 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17994 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
17995 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
17996 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17997 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
17998 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
17999 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18000 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18001 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18002 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18003 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18004 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18005 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18006 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18007 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18008 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18009 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18010 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18011 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18012 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
18013 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
18014 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18015 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
18016 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
18017 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
18018 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18019 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18020 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18021 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18022 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18023 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18024 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18025 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18026 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18027 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18028 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18029 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18030 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18031 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18032 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
18033 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18034 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18035 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18036 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18037 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18038 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18039 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18040 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18041 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
18042 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18043 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18044 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18045 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18046 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
18049 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
18051 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
18052 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
18053 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
18054 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18055 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18056 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
18057 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18058 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18059 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18060 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18061 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18062 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18063 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18064 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18065 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18066 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18067 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18068 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18069 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18070 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18071 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
18072 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18073 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18074 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18075 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18076 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18077 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18078 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18079 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18080 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18081 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18082 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18083 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18084 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18085 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18086 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18087 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18088 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18089 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18090 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18091 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18092 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18093 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18094 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18095 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18096 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18097 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18098 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18099 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18100 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18101 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18102 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18103 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18104 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18105 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18106 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18107 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
18110 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
18112 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
18113 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
18114 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
18115 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
18116 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
18117 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
18118 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
18119 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
18120 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
18121 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
18122 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
18123 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
18124 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
18125 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
18126 cCE("cfmv64lr", e000510
, 2, (RMDX
, RR
), rn_rd
),
18127 cCE("cfmvr64l", e100510
, 2, (RR
, RMDX
), rd_rn
),
18128 cCE("cfmv64hr", e000530
, 2, (RMDX
, RR
), rn_rd
),
18129 cCE("cfmvr64h", e100530
, 2, (RR
, RMDX
), rd_rn
),
18130 cCE("cfmval32", e200440
, 2, (RMAX
, RMFX
), rd_rn
),
18131 cCE("cfmv32al", e100440
, 2, (RMFX
, RMAX
), rd_rn
),
18132 cCE("cfmvam32", e200460
, 2, (RMAX
, RMFX
), rd_rn
),
18133 cCE("cfmv32am", e100460
, 2, (RMFX
, RMAX
), rd_rn
),
18134 cCE("cfmvah32", e200480
, 2, (RMAX
, RMFX
), rd_rn
),
18135 cCE("cfmv32ah", e100480
, 2, (RMFX
, RMAX
), rd_rn
),
18136 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
18137 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
18138 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
18139 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
18140 cCE("cfmvsc32", e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
18141 cCE("cfmv32sc", e1004e0
, 2, (RMDX
, RMDS
), rd
),
18142 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
18143 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
18144 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
18145 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
18146 cCE("cfcvt32s", e000480
, 2, (RMF
, RMFX
), rd_rn
),
18147 cCE("cfcvt32d", e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
18148 cCE("cfcvt64s", e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
18149 cCE("cfcvt64d", e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
18150 cCE("cfcvts32", e100580
, 2, (RMFX
, RMF
), rd_rn
),
18151 cCE("cfcvtd32", e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
18152 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
18153 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
18154 cCE("cfrshl32", e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
18155 cCE("cfrshl64", e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
18156 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
18157 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
18158 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
18159 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
18160 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
18161 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
18162 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
18163 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
18164 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
18165 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
18166 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18167 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18168 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18169 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18170 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
18171 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
18172 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
18173 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
18174 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
18175 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
18176 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18177 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18178 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18179 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18180 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18181 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
18182 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18183 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
18184 cCE("cfmadd32", e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
18185 cCE("cfmsub32", e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
18186 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
18187 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
18190 #undef THUMB_VARIANT
18217 /* MD interface: bits in the object file. */
18219 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
18220 for use in the a.out file, and stores them in the array pointed to by buf.
18221 This knows about the endian-ness of the target machine and does
18222 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
18223 2 (short) and 4 (long) Floating numbers are put out as a series of
18224 LITTLENUMS (shorts, here at least). */
18227 md_number_to_chars (char * buf
, valueT val
, int n
)
18229 if (target_big_endian
)
18230 number_to_chars_bigendian (buf
, val
, n
);
18232 number_to_chars_littleendian (buf
, val
, n
);
18236 md_chars_to_number (char * buf
, int n
)
18239 unsigned char * where
= (unsigned char *) buf
;
18241 if (target_big_endian
)
18246 result
|= (*where
++ & 255);
18254 result
|= (where
[n
] & 255);
18261 /* MD interface: Sections. */
18263 /* Estimate the size of a frag before relaxing. Assume everything fits in
18267 md_estimate_size_before_relax (fragS
* fragp
,
18268 segT segtype ATTRIBUTE_UNUSED
)
18274 /* Convert a machine dependent frag. */
18277 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
18279 unsigned long insn
;
18280 unsigned long old_op
;
18288 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
18290 old_op
= bfd_get_16(abfd
, buf
);
18291 if (fragp
->fr_symbol
)
18293 exp
.X_op
= O_symbol
;
18294 exp
.X_add_symbol
= fragp
->fr_symbol
;
18298 exp
.X_op
= O_constant
;
18300 exp
.X_add_number
= fragp
->fr_offset
;
18301 opcode
= fragp
->fr_subtype
;
18304 case T_MNEM_ldr_pc
:
18305 case T_MNEM_ldr_pc2
:
18306 case T_MNEM_ldr_sp
:
18307 case T_MNEM_str_sp
:
18314 if (fragp
->fr_var
== 4)
18316 insn
= THUMB_OP32 (opcode
);
18317 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
18319 insn
|= (old_op
& 0x700) << 4;
18323 insn
|= (old_op
& 7) << 12;
18324 insn
|= (old_op
& 0x38) << 13;
18326 insn
|= 0x00000c00;
18327 put_thumb32_insn (buf
, insn
);
18328 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
18332 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
18334 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
18337 if (fragp
->fr_var
== 4)
18339 insn
= THUMB_OP32 (opcode
);
18340 insn
|= (old_op
& 0xf0) << 4;
18341 put_thumb32_insn (buf
, insn
);
18342 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
18346 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18347 exp
.X_add_number
-= 4;
18355 if (fragp
->fr_var
== 4)
18357 int r0off
= (opcode
== T_MNEM_mov
18358 || opcode
== T_MNEM_movs
) ? 0 : 8;
18359 insn
= THUMB_OP32 (opcode
);
18360 insn
= (insn
& 0xe1ffffff) | 0x10000000;
18361 insn
|= (old_op
& 0x700) << r0off
;
18362 put_thumb32_insn (buf
, insn
);
18363 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18367 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
18372 if (fragp
->fr_var
== 4)
18374 insn
= THUMB_OP32(opcode
);
18375 put_thumb32_insn (buf
, insn
);
18376 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
18379 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
18383 if (fragp
->fr_var
== 4)
18385 insn
= THUMB_OP32(opcode
);
18386 insn
|= (old_op
& 0xf00) << 14;
18387 put_thumb32_insn (buf
, insn
);
18388 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
18391 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
18394 case T_MNEM_add_sp
:
18395 case T_MNEM_add_pc
:
18396 case T_MNEM_inc_sp
:
18397 case T_MNEM_dec_sp
:
18398 if (fragp
->fr_var
== 4)
18400 /* ??? Choose between add and addw. */
18401 insn
= THUMB_OP32 (opcode
);
18402 insn
|= (old_op
& 0xf0) << 4;
18403 put_thumb32_insn (buf
, insn
);
18404 if (opcode
== T_MNEM_add_pc
)
18405 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
18407 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18410 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18418 if (fragp
->fr_var
== 4)
18420 insn
= THUMB_OP32 (opcode
);
18421 insn
|= (old_op
& 0xf0) << 4;
18422 insn
|= (old_op
& 0xf) << 16;
18423 put_thumb32_insn (buf
, insn
);
18424 if (insn
& (1 << 20))
18425 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
18427 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
18430 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
18436 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
18437 (enum bfd_reloc_code_real
) reloc_type
);
18438 fixp
->fx_file
= fragp
->fr_file
;
18439 fixp
->fx_line
= fragp
->fr_line
;
18440 fragp
->fr_fix
+= fragp
->fr_var
;
18443 /* Return the size of a relaxable immediate operand instruction.
18444 SHIFT and SIZE specify the form of the allowable immediate. */
18446 relax_immediate (fragS
*fragp
, int size
, int shift
)
18452 /* ??? Should be able to do better than this. */
18453 if (fragp
->fr_symbol
)
18456 low
= (1 << shift
) - 1;
18457 mask
= (1 << (shift
+ size
)) - (1 << shift
);
18458 offset
= fragp
->fr_offset
;
18459 /* Force misaligned offsets to 32-bit variant. */
18462 if (offset
& ~mask
)
18467 /* Get the address of a symbol during relaxation. */
18469 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
18475 sym
= fragp
->fr_symbol
;
18476 sym_frag
= symbol_get_frag (sym
);
18477 know (S_GET_SEGMENT (sym
) != absolute_section
18478 || sym_frag
== &zero_address_frag
);
18479 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
18481 /* If frag has yet to be reached on this pass, assume it will
18482 move by STRETCH just as we did. If this is not so, it will
18483 be because some frag between grows, and that will force
18487 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
18491 /* Adjust stretch for any alignment frag. Note that if have
18492 been expanding the earlier code, the symbol may be
18493 defined in what appears to be an earlier frag. FIXME:
18494 This doesn't handle the fr_subtype field, which specifies
18495 a maximum number of bytes to skip when doing an
18497 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
18499 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
18502 stretch
= - ((- stretch
)
18503 & ~ ((1 << (int) f
->fr_offset
) - 1));
18505 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
18517 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
18520 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
18525 /* Assume worst case for symbols not known to be in the same section. */
18526 if (fragp
->fr_symbol
== NULL
18527 || !S_IS_DEFINED (fragp
->fr_symbol
)
18528 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
18531 val
= relaxed_symbol_addr (fragp
, stretch
);
18532 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
18533 addr
= (addr
+ 4) & ~3;
18534 /* Force misaligned targets to 32-bit variant. */
18538 if (val
< 0 || val
> 1020)
18543 /* Return the size of a relaxable add/sub immediate instruction. */
18545 relax_addsub (fragS
*fragp
, asection
*sec
)
18550 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
18551 op
= bfd_get_16(sec
->owner
, buf
);
18552 if ((op
& 0xf) == ((op
>> 4) & 0xf))
18553 return relax_immediate (fragp
, 8, 0);
18555 return relax_immediate (fragp
, 3, 0);
18559 /* Return the size of a relaxable branch instruction. BITS is the
18560 size of the offset field in the narrow instruction. */
18563 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
18569 /* Assume worst case for symbols not known to be in the same section. */
18570 if (!S_IS_DEFINED (fragp
->fr_symbol
)
18571 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
))
18575 if (S_IS_DEFINED (fragp
->fr_symbol
)
18576 && ARM_IS_FUNC (fragp
->fr_symbol
))
18580 val
= relaxed_symbol_addr (fragp
, stretch
);
18581 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
18584 /* Offset is a signed value *2 */
18586 if (val
>= limit
|| val
< -limit
)
18592 /* Relax a machine dependent frag. This returns the amount by which
18593 the current size of the frag should change. */
18596 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
18601 oldsize
= fragp
->fr_var
;
18602 switch (fragp
->fr_subtype
)
18604 case T_MNEM_ldr_pc2
:
18605 newsize
= relax_adr (fragp
, sec
, stretch
);
18607 case T_MNEM_ldr_pc
:
18608 case T_MNEM_ldr_sp
:
18609 case T_MNEM_str_sp
:
18610 newsize
= relax_immediate (fragp
, 8, 2);
18614 newsize
= relax_immediate (fragp
, 5, 2);
18618 newsize
= relax_immediate (fragp
, 5, 1);
18622 newsize
= relax_immediate (fragp
, 5, 0);
18625 newsize
= relax_adr (fragp
, sec
, stretch
);
18631 newsize
= relax_immediate (fragp
, 8, 0);
18634 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
18637 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
18639 case T_MNEM_add_sp
:
18640 case T_MNEM_add_pc
:
18641 newsize
= relax_immediate (fragp
, 8, 2);
18643 case T_MNEM_inc_sp
:
18644 case T_MNEM_dec_sp
:
18645 newsize
= relax_immediate (fragp
, 7, 2);
18651 newsize
= relax_addsub (fragp
, sec
);
18657 fragp
->fr_var
= newsize
;
18658 /* Freeze wide instructions that are at or before the same location as
18659 in the previous pass. This avoids infinite loops.
18660 Don't freeze them unconditionally because targets may be artificially
18661 misaligned by the expansion of preceding frags. */
18662 if (stretch
<= 0 && newsize
> 2)
18664 md_convert_frag (sec
->owner
, sec
, fragp
);
18668 return newsize
- oldsize
;
18671 /* Round up a section size to the appropriate boundary. */
18674 md_section_align (segT segment ATTRIBUTE_UNUSED
,
18677 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
18678 if (OUTPUT_FLAVOR
== bfd_target_aout_flavour
)
18680 /* For a.out, force the section size to be aligned. If we don't do
18681 this, BFD will align it for us, but it will not write out the
18682 final bytes of the section. This may be a bug in BFD, but it is
18683 easier to fix it here since that is how the other a.out targets
18687 align
= bfd_get_section_alignment (stdoutput
, segment
);
18688 size
= ((size
+ (1 << align
) - 1) & ((valueT
) -1 << align
));
18695 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
18696 of an rs_align_code fragment. */
18699 arm_handle_align (fragS
* fragP
)
18701 static char const arm_noop
[2][2][4] =
18704 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
18705 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
18708 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
18709 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
18712 static char const thumb_noop
[2][2][2] =
18715 {0xc0, 0x46}, /* LE */
18716 {0x46, 0xc0}, /* BE */
18719 {0x00, 0xbf}, /* LE */
18720 {0xbf, 0x00} /* BE */
18723 static char const wide_thumb_noop
[2][4] =
18724 { /* Wide Thumb-2 */
18725 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
18726 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
18729 unsigned bytes
, fix
, noop_size
;
18732 const char *narrow_noop
= NULL
;
18737 if (fragP
->fr_type
!= rs_align_code
)
18740 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
18741 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
18744 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18745 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
18747 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
18749 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
18751 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
18753 narrow_noop
= thumb_noop
[1][target_big_endian
];
18754 noop
= wide_thumb_noop
[target_big_endian
];
18757 noop
= thumb_noop
[0][target_big_endian
];
18765 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
) != 0]
18766 [target_big_endian
];
18773 fragP
->fr_var
= noop_size
;
18775 if (bytes
& (noop_size
- 1))
18777 fix
= bytes
& (noop_size
- 1);
18779 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
18781 memset (p
, 0, fix
);
18788 if (bytes
& noop_size
)
18790 /* Insert a narrow noop. */
18791 memcpy (p
, narrow_noop
, noop_size
);
18793 bytes
-= noop_size
;
18797 /* Use wide noops for the remainder */
18801 while (bytes
>= noop_size
)
18803 memcpy (p
, noop
, noop_size
);
18805 bytes
-= noop_size
;
18809 fragP
->fr_fix
+= fix
;
18812 /* Called from md_do_align. Used to create an alignment
18813 frag in a code section. */
18816 arm_frag_align_code (int n
, int max
)
18820 /* We assume that there will never be a requirement
18821 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
18822 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
18827 _("alignments greater than %d bytes not supported in .text sections."),
18828 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
18829 as_fatal ("%s", err_msg
);
18832 p
= frag_var (rs_align_code
,
18833 MAX_MEM_FOR_RS_ALIGN_CODE
,
18835 (relax_substateT
) max
,
18842 /* Perform target specific initialisation of a frag.
18843 Note - despite the name this initialisation is not done when the frag
18844 is created, but only when its type is assigned. A frag can be created
18845 and used a long time before its type is set, so beware of assuming that
18846 this initialisationis performed first. */
18850 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
18852 /* Record whether this frag is in an ARM or a THUMB area. */
18853 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18856 #else /* OBJ_ELF is defined. */
18858 arm_init_frag (fragS
* fragP
, int max_chars
)
18860 /* If the current ARM vs THUMB mode has not already
18861 been recorded into this frag then do so now. */
18862 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
18864 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18866 /* Record a mapping symbol for alignment frags. We will delete this
18867 later if the alignment ends up empty. */
18868 switch (fragP
->fr_type
)
18871 case rs_align_test
:
18873 mapping_state_2 (MAP_DATA
, max_chars
);
18875 case rs_align_code
:
18876 mapping_state_2 (thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
18884 /* When we change sections we need to issue a new mapping symbol. */
18887 arm_elf_change_section (void)
18889 /* Link an unlinked unwind index table section to the .text section. */
18890 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
18891 && elf_linked_to_section (now_seg
) == NULL
)
18892 elf_linked_to_section (now_seg
) = text_section
;
18896 arm_elf_section_type (const char * str
, size_t len
)
18898 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
18899 return SHT_ARM_EXIDX
;
18904 /* Code to deal with unwinding tables. */
18906 static void add_unwind_adjustsp (offsetT
);
18908 /* Generate any deferred unwind frame offset. */
18911 flush_pending_unwind (void)
18915 offset
= unwind
.pending_offset
;
18916 unwind
.pending_offset
= 0;
18918 add_unwind_adjustsp (offset
);
18921 /* Add an opcode to this list for this function. Two-byte opcodes should
18922 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
18926 add_unwind_opcode (valueT op
, int length
)
18928 /* Add any deferred stack adjustment. */
18929 if (unwind
.pending_offset
)
18930 flush_pending_unwind ();
18932 unwind
.sp_restored
= 0;
18934 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
18936 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
18937 if (unwind
.opcodes
)
18938 unwind
.opcodes
= (unsigned char *) xrealloc (unwind
.opcodes
,
18939 unwind
.opcode_alloc
);
18941 unwind
.opcodes
= (unsigned char *) xmalloc (unwind
.opcode_alloc
);
18946 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
18948 unwind
.opcode_count
++;
18952 /* Add unwind opcodes to adjust the stack pointer. */
18955 add_unwind_adjustsp (offsetT offset
)
18959 if (offset
> 0x200)
18961 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
18966 /* Long form: 0xb2, uleb128. */
18967 /* This might not fit in a word so add the individual bytes,
18968 remembering the list is built in reverse order. */
18969 o
= (valueT
) ((offset
- 0x204) >> 2);
18971 add_unwind_opcode (0, 1);
18973 /* Calculate the uleb128 encoding of the offset. */
18977 bytes
[n
] = o
& 0x7f;
18983 /* Add the insn. */
18985 add_unwind_opcode (bytes
[n
- 1], 1);
18986 add_unwind_opcode (0xb2, 1);
18988 else if (offset
> 0x100)
18990 /* Two short opcodes. */
18991 add_unwind_opcode (0x3f, 1);
18992 op
= (offset
- 0x104) >> 2;
18993 add_unwind_opcode (op
, 1);
18995 else if (offset
> 0)
18997 /* Short opcode. */
18998 op
= (offset
- 4) >> 2;
18999 add_unwind_opcode (op
, 1);
19001 else if (offset
< 0)
19004 while (offset
> 0x100)
19006 add_unwind_opcode (0x7f, 1);
19009 op
= ((offset
- 4) >> 2) | 0x40;
19010 add_unwind_opcode (op
, 1);
19014 /* Finish the list of unwind opcodes for this function. */
19016 finish_unwind_opcodes (void)
19020 if (unwind
.fp_used
)
19022 /* Adjust sp as necessary. */
19023 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
19024 flush_pending_unwind ();
19026 /* After restoring sp from the frame pointer. */
19027 op
= 0x90 | unwind
.fp_reg
;
19028 add_unwind_opcode (op
, 1);
19031 flush_pending_unwind ();
19035 /* Start an exception table entry. If idx is nonzero this is an index table
19039 start_unwind_section (const segT text_seg
, int idx
)
19041 const char * text_name
;
19042 const char * prefix
;
19043 const char * prefix_once
;
19044 const char * group_name
;
19048 size_t sec_name_len
;
19055 prefix
= ELF_STRING_ARM_unwind
;
19056 prefix_once
= ELF_STRING_ARM_unwind_once
;
19057 type
= SHT_ARM_EXIDX
;
19061 prefix
= ELF_STRING_ARM_unwind_info
;
19062 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
19063 type
= SHT_PROGBITS
;
19066 text_name
= segment_name (text_seg
);
19067 if (streq (text_name
, ".text"))
19070 if (strncmp (text_name
, ".gnu.linkonce.t.",
19071 strlen (".gnu.linkonce.t.")) == 0)
19073 prefix
= prefix_once
;
19074 text_name
+= strlen (".gnu.linkonce.t.");
19077 prefix_len
= strlen (prefix
);
19078 text_len
= strlen (text_name
);
19079 sec_name_len
= prefix_len
+ text_len
;
19080 sec_name
= (char *) xmalloc (sec_name_len
+ 1);
19081 memcpy (sec_name
, prefix
, prefix_len
);
19082 memcpy (sec_name
+ prefix_len
, text_name
, text_len
);
19083 sec_name
[prefix_len
+ text_len
] = '\0';
19089 /* Handle COMDAT group. */
19090 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
19092 group_name
= elf_group_name (text_seg
);
19093 if (group_name
== NULL
)
19095 as_bad (_("Group section `%s' has no group signature"),
19096 segment_name (text_seg
));
19097 ignore_rest_of_line ();
19100 flags
|= SHF_GROUP
;
19104 obj_elf_change_section (sec_name
, type
, flags
, 0, group_name
, linkonce
, 0);
19106 /* Set the section link for index tables. */
19108 elf_linked_to_section (now_seg
) = text_seg
;
19112 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
19113 personality routine data. Returns zero, or the index table value for
19114 and inline entry. */
19117 create_unwind_entry (int have_data
)
19122 /* The current word of data. */
19124 /* The number of bytes left in this word. */
19127 finish_unwind_opcodes ();
19129 /* Remember the current text section. */
19130 unwind
.saved_seg
= now_seg
;
19131 unwind
.saved_subseg
= now_subseg
;
19133 start_unwind_section (now_seg
, 0);
19135 if (unwind
.personality_routine
== NULL
)
19137 if (unwind
.personality_index
== -2)
19140 as_bad (_("handlerdata in cantunwind frame"));
19141 return 1; /* EXIDX_CANTUNWIND. */
19144 /* Use a default personality routine if none is specified. */
19145 if (unwind
.personality_index
== -1)
19147 if (unwind
.opcode_count
> 3)
19148 unwind
.personality_index
= 1;
19150 unwind
.personality_index
= 0;
19153 /* Space for the personality routine entry. */
19154 if (unwind
.personality_index
== 0)
19156 if (unwind
.opcode_count
> 3)
19157 as_bad (_("too many unwind opcodes for personality routine 0"));
19161 /* All the data is inline in the index table. */
19164 while (unwind
.opcode_count
> 0)
19166 unwind
.opcode_count
--;
19167 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
19171 /* Pad with "finish" opcodes. */
19173 data
= (data
<< 8) | 0xb0;
19180 /* We get two opcodes "free" in the first word. */
19181 size
= unwind
.opcode_count
- 2;
19184 /* An extra byte is required for the opcode count. */
19185 size
= unwind
.opcode_count
+ 1;
19187 size
= (size
+ 3) >> 2;
19189 as_bad (_("too many unwind opcodes"));
19191 frag_align (2, 0, 0);
19192 record_alignment (now_seg
, 2);
19193 unwind
.table_entry
= expr_build_dot ();
19195 /* Allocate the table entry. */
19196 ptr
= frag_more ((size
<< 2) + 4);
19197 where
= frag_now_fix () - ((size
<< 2) + 4);
19199 switch (unwind
.personality_index
)
19202 /* ??? Should this be a PLT generating relocation? */
19203 /* Custom personality routine. */
19204 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
19205 BFD_RELOC_ARM_PREL31
);
19210 /* Set the first byte to the number of additional words. */
19215 /* ABI defined personality routines. */
19217 /* Three opcodes bytes are packed into the first word. */
19224 /* The size and first two opcode bytes go in the first word. */
19225 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
19230 /* Should never happen. */
19234 /* Pack the opcodes into words (MSB first), reversing the list at the same
19236 while (unwind
.opcode_count
> 0)
19240 md_number_to_chars (ptr
, data
, 4);
19245 unwind
.opcode_count
--;
19247 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
19250 /* Finish off the last word. */
19253 /* Pad with "finish" opcodes. */
19255 data
= (data
<< 8) | 0xb0;
19257 md_number_to_chars (ptr
, data
, 4);
19262 /* Add an empty descriptor if there is no user-specified data. */
19263 ptr
= frag_more (4);
19264 md_number_to_chars (ptr
, 0, 4);
19271 /* Initialize the DWARF-2 unwind information for this procedure. */
19274 tc_arm_frame_initial_instructions (void)
19276 cfi_add_CFA_def_cfa (REG_SP
, 0);
19278 #endif /* OBJ_ELF */
19280 /* Convert REGNAME to a DWARF-2 register number. */
19283 tc_arm_regname_to_dw2regnum (char *regname
)
19285 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
19295 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
19299 exp
.X_op
= O_secrel
;
19300 exp
.X_add_symbol
= symbol
;
19301 exp
.X_add_number
= 0;
19302 emit_expr (&exp
, size
);
19306 /* MD interface: Symbol and relocation handling. */
19308 /* Return the address within the segment that a PC-relative fixup is
19309 relative to. For ARM, PC-relative fixups applied to instructions
19310 are generally relative to the location of the fixup plus 8 bytes.
19311 Thumb branches are offset by 4, and Thumb loads relative to PC
19312 require special handling. */
19315 md_pcrel_from_section (fixS
* fixP
, segT seg
)
19317 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19319 /* If this is pc-relative and we are going to emit a relocation
19320 then we just want to put out any pipeline compensation that the linker
19321 will need. Otherwise we want to use the calculated base.
19322 For WinCE we skip the bias for externals as well, since this
19323 is how the MS ARM-CE assembler behaves and we want to be compatible. */
19325 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19326 || (arm_force_relocation (fixP
)
19328 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
19334 switch (fixP
->fx_r_type
)
19336 /* PC relative addressing on the Thumb is slightly odd as the
19337 bottom two bits of the PC are forced to zero for the
19338 calculation. This happens *after* application of the
19339 pipeline offset. However, Thumb adrl already adjusts for
19340 this, so we need not do it again. */
19341 case BFD_RELOC_ARM_THUMB_ADD
:
19344 case BFD_RELOC_ARM_THUMB_OFFSET
:
19345 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
19346 case BFD_RELOC_ARM_T32_ADD_PC12
:
19347 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
19348 return (base
+ 4) & ~3;
19350 /* Thumb branches are simply offset by +4. */
19351 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
19352 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
19353 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
19354 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
19355 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
19358 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
19360 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19361 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19362 && ARM_IS_FUNC (fixP
->fx_addsy
)
19363 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19364 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19367 /* BLX is like branches above, but forces the low two bits of PC to
19369 case BFD_RELOC_THUMB_PCREL_BLX
:
19371 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19372 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19373 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19374 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19375 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19376 return (base
+ 4) & ~3;
19378 /* ARM mode branches are offset by +8. However, the Windows CE
19379 loader expects the relocation not to take this into account. */
19380 case BFD_RELOC_ARM_PCREL_BLX
:
19382 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19383 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19384 && ARM_IS_FUNC (fixP
->fx_addsy
)
19385 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19386 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19389 case BFD_RELOC_ARM_PCREL_CALL
:
19391 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19392 && (!S_IS_EXTERNAL (fixP
->fx_addsy
))
19393 && THUMB_IS_FUNC (fixP
->fx_addsy
)
19394 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
19395 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
19398 case BFD_RELOC_ARM_PCREL_BRANCH
:
19399 case BFD_RELOC_ARM_PCREL_JUMP
:
19400 case BFD_RELOC_ARM_PLT32
:
19402 /* When handling fixups immediately, because we have already
19403 discovered the value of a symbol, or the address of the frag involved
19404 we must account for the offset by +8, as the OS loader will never see the reloc.
19405 see fixup_segment() in write.c
19406 The S_IS_EXTERNAL test handles the case of global symbols.
19407 Those need the calculated base, not just the pipe compensation the linker will need. */
19409 && fixP
->fx_addsy
!= NULL
19410 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
19411 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
19419 /* ARM mode loads relative to PC are also offset by +8. Unlike
19420 branches, the Windows CE loader *does* expect the relocation
19421 to take this into account. */
19422 case BFD_RELOC_ARM_OFFSET_IMM
:
19423 case BFD_RELOC_ARM_OFFSET_IMM8
:
19424 case BFD_RELOC_ARM_HWLITERAL
:
19425 case BFD_RELOC_ARM_LITERAL
:
19426 case BFD_RELOC_ARM_CP_OFF_IMM
:
19430 /* Other PC-relative relocations are un-offset. */
19436 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
19437 Otherwise we have no need to default values of symbols. */
19440 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
19443 if (name
[0] == '_' && name
[1] == 'G'
19444 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
19448 if (symbol_find (name
))
19449 as_bad (_("GOT already in the symbol table"));
19451 GOT_symbol
= symbol_new (name
, undefined_section
,
19452 (valueT
) 0, & zero_address_frag
);
19462 /* Subroutine of md_apply_fix. Check to see if an immediate can be
19463 computed as two separate immediate values, added together. We
19464 already know that this value cannot be computed by just one ARM
19467 static unsigned int
19468 validate_immediate_twopart (unsigned int val
,
19469 unsigned int * highpart
)
19474 for (i
= 0; i
< 32; i
+= 2)
19475 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
19481 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
19483 else if (a
& 0xff0000)
19485 if (a
& 0xff000000)
19487 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
19491 gas_assert (a
& 0xff000000);
19492 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
19495 return (a
& 0xff) | (i
<< 7);
19502 validate_offset_imm (unsigned int val
, int hwse
)
19504 if ((hwse
&& val
> 255) || val
> 4095)
19509 /* Subroutine of md_apply_fix. Do those data_ops which can take a
19510 negative immediate constant by altering the instruction. A bit of
19515 by inverting the second operand, and
19518 by negating the second operand. */
19521 negate_data_op (unsigned long * instruction
,
19522 unsigned long value
)
19525 unsigned long negated
, inverted
;
19527 negated
= encode_arm_immediate (-value
);
19528 inverted
= encode_arm_immediate (~value
);
19530 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
19533 /* First negates. */
19534 case OPCODE_SUB
: /* ADD <-> SUB */
19535 new_inst
= OPCODE_ADD
;
19540 new_inst
= OPCODE_SUB
;
19544 case OPCODE_CMP
: /* CMP <-> CMN */
19545 new_inst
= OPCODE_CMN
;
19550 new_inst
= OPCODE_CMP
;
19554 /* Now Inverted ops. */
19555 case OPCODE_MOV
: /* MOV <-> MVN */
19556 new_inst
= OPCODE_MVN
;
19561 new_inst
= OPCODE_MOV
;
19565 case OPCODE_AND
: /* AND <-> BIC */
19566 new_inst
= OPCODE_BIC
;
19571 new_inst
= OPCODE_AND
;
19575 case OPCODE_ADC
: /* ADC <-> SBC */
19576 new_inst
= OPCODE_SBC
;
19581 new_inst
= OPCODE_ADC
;
19585 /* We cannot do anything. */
19590 if (value
== (unsigned) FAIL
)
19593 *instruction
&= OPCODE_MASK
;
19594 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
19598 /* Like negate_data_op, but for Thumb-2. */
19600 static unsigned int
19601 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
19605 unsigned int negated
, inverted
;
19607 negated
= encode_thumb32_immediate (-value
);
19608 inverted
= encode_thumb32_immediate (~value
);
19610 rd
= (*instruction
>> 8) & 0xf;
19611 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
19614 /* ADD <-> SUB. Includes CMP <-> CMN. */
19615 case T2_OPCODE_SUB
:
19616 new_inst
= T2_OPCODE_ADD
;
19620 case T2_OPCODE_ADD
:
19621 new_inst
= T2_OPCODE_SUB
;
19625 /* ORR <-> ORN. Includes MOV <-> MVN. */
19626 case T2_OPCODE_ORR
:
19627 new_inst
= T2_OPCODE_ORN
;
19631 case T2_OPCODE_ORN
:
19632 new_inst
= T2_OPCODE_ORR
;
19636 /* AND <-> BIC. TST has no inverted equivalent. */
19637 case T2_OPCODE_AND
:
19638 new_inst
= T2_OPCODE_BIC
;
19645 case T2_OPCODE_BIC
:
19646 new_inst
= T2_OPCODE_AND
;
19651 case T2_OPCODE_ADC
:
19652 new_inst
= T2_OPCODE_SBC
;
19656 case T2_OPCODE_SBC
:
19657 new_inst
= T2_OPCODE_ADC
;
19661 /* We cannot do anything. */
19666 if (value
== (unsigned int)FAIL
)
19669 *instruction
&= T2_OPCODE_MASK
;
19670 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
19674 /* Read a 32-bit thumb instruction from buf. */
19675 static unsigned long
19676 get_thumb32_insn (char * buf
)
19678 unsigned long insn
;
19679 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
19680 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19686 /* We usually want to set the low bit on the address of thumb function
19687 symbols. In particular .word foo - . should have the low bit set.
19688 Generic code tries to fold the difference of two symbols to
19689 a constant. Prevent this and force a relocation when the first symbols
19690 is a thumb function. */
19693 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
19695 if (op
== O_subtract
19696 && l
->X_op
== O_symbol
19697 && r
->X_op
== O_symbol
19698 && THUMB_IS_FUNC (l
->X_add_symbol
))
19700 l
->X_op
= O_subtract
;
19701 l
->X_op_symbol
= r
->X_add_symbol
;
19702 l
->X_add_number
-= r
->X_add_number
;
19706 /* Process as normal. */
19710 /* Encode Thumb2 unconditional branches and calls. The encoding
19711 for the 2 are identical for the immediate values. */
19714 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
19716 #define T2I1I2MASK ((1 << 13) | (1 << 11))
19719 addressT S
, I1
, I2
, lo
, hi
;
19721 S
= (value
>> 24) & 0x01;
19722 I1
= (value
>> 23) & 0x01;
19723 I2
= (value
>> 22) & 0x01;
19724 hi
= (value
>> 12) & 0x3ff;
19725 lo
= (value
>> 1) & 0x7ff;
19726 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19727 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
19728 newval
|= (S
<< 10) | hi
;
19729 newval2
&= ~T2I1I2MASK
;
19730 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
19731 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
19732 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
19736 md_apply_fix (fixS
* fixP
,
19740 offsetT value
= * valP
;
19742 unsigned int newimm
;
19743 unsigned long temp
;
19745 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
19747 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
19749 /* Note whether this will delete the relocation. */
19751 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
19754 /* On a 64-bit host, silently truncate 'value' to 32 bits for
19755 consistency with the behaviour on 32-bit hosts. Remember value
19757 value
&= 0xffffffff;
19758 value
^= 0x80000000;
19759 value
-= 0x80000000;
19762 fixP
->fx_addnumber
= value
;
19764 /* Same treatment for fixP->fx_offset. */
19765 fixP
->fx_offset
&= 0xffffffff;
19766 fixP
->fx_offset
^= 0x80000000;
19767 fixP
->fx_offset
-= 0x80000000;
19769 switch (fixP
->fx_r_type
)
19771 case BFD_RELOC_NONE
:
19772 /* This will need to go in the object file. */
19776 case BFD_RELOC_ARM_IMMEDIATE
:
19777 /* We claim that this fixup has been processed here,
19778 even if in fact we generate an error because we do
19779 not have a reloc for it, so tc_gen_reloc will reject it. */
19783 && ! S_IS_DEFINED (fixP
->fx_addsy
))
19785 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19786 _("undefined symbol %s used as an immediate value"),
19787 S_GET_NAME (fixP
->fx_addsy
));
19792 && S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19794 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19795 _("symbol %s is in a different section"),
19796 S_GET_NAME (fixP
->fx_addsy
));
19800 newimm
= encode_arm_immediate (value
);
19801 temp
= md_chars_to_number (buf
, INSN_SIZE
);
19803 /* If the instruction will fail, see if we can fix things up by
19804 changing the opcode. */
19805 if (newimm
== (unsigned int) FAIL
19806 && (newimm
= negate_data_op (&temp
, value
)) == (unsigned int) FAIL
)
19808 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19809 _("invalid constant (%lx) after fixup"),
19810 (unsigned long) value
);
19814 newimm
|= (temp
& 0xfffff000);
19815 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
19818 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
19820 unsigned int highpart
= 0;
19821 unsigned int newinsn
= 0xe1a00000; /* nop. */
19824 && ! S_IS_DEFINED (fixP
->fx_addsy
))
19826 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19827 _("undefined symbol %s used as an immediate value"),
19828 S_GET_NAME (fixP
->fx_addsy
));
19833 && S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
19835 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19836 _("symbol %s is in a different section"),
19837 S_GET_NAME (fixP
->fx_addsy
));
19841 newimm
= encode_arm_immediate (value
);
19842 temp
= md_chars_to_number (buf
, INSN_SIZE
);
19844 /* If the instruction will fail, see if we can fix things up by
19845 changing the opcode. */
19846 if (newimm
== (unsigned int) FAIL
19847 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
19849 /* No ? OK - try using two ADD instructions to generate
19851 newimm
= validate_immediate_twopart (value
, & highpart
);
19853 /* Yes - then make sure that the second instruction is
19855 if (newimm
!= (unsigned int) FAIL
)
19857 /* Still No ? Try using a negated value. */
19858 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
19859 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
19860 /* Otherwise - give up. */
19863 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19864 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
19869 /* Replace the first operand in the 2nd instruction (which
19870 is the PC) with the destination register. We have
19871 already added in the PC in the first instruction and we
19872 do not want to do it again. */
19873 newinsn
&= ~ 0xf0000;
19874 newinsn
|= ((newinsn
& 0x0f000) << 4);
19877 newimm
|= (temp
& 0xfffff000);
19878 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
19880 highpart
|= (newinsn
& 0xfffff000);
19881 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
19885 case BFD_RELOC_ARM_OFFSET_IMM
:
19886 if (!fixP
->fx_done
&& seg
->use_rela_p
)
19889 case BFD_RELOC_ARM_LITERAL
:
19895 if (validate_offset_imm (value
, 0) == FAIL
)
19897 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
19898 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19899 _("invalid literal constant: pool needs to be closer"));
19901 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19902 _("bad immediate value for offset (%ld)"),
19907 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19908 newval
&= 0xff7ff000;
19909 newval
|= value
| (sign
? INDEX_UP
: 0);
19910 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19913 case BFD_RELOC_ARM_OFFSET_IMM8
:
19914 case BFD_RELOC_ARM_HWLITERAL
:
19920 if (validate_offset_imm (value
, 1) == FAIL
)
19922 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
19923 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19924 _("invalid literal constant: pool needs to be closer"));
19926 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
19931 newval
= md_chars_to_number (buf
, INSN_SIZE
);
19932 newval
&= 0xff7ff0f0;
19933 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
19934 md_number_to_chars (buf
, newval
, INSN_SIZE
);
19937 case BFD_RELOC_ARM_T32_OFFSET_U8
:
19938 if (value
< 0 || value
> 1020 || value
% 4 != 0)
19939 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19940 _("bad immediate value for offset (%ld)"), (long) value
);
19943 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
19945 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
19948 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
19949 /* This is a complicated relocation used for all varieties of Thumb32
19950 load/store instruction with immediate offset:
19952 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
19953 *4, optional writeback(W)
19954 (doubleword load/store)
19956 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
19957 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
19958 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
19959 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
19960 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
19962 Uppercase letters indicate bits that are already encoded at
19963 this point. Lowercase letters are our problem. For the
19964 second block of instructions, the secondary opcode nybble
19965 (bits 8..11) is present, and bit 23 is zero, even if this is
19966 a PC-relative operation. */
19967 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
19969 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
19971 if ((newval
& 0xf0000000) == 0xe0000000)
19973 /* Doubleword load/store: 8-bit offset, scaled by 4. */
19975 newval
|= (1 << 23);
19978 if (value
% 4 != 0)
19980 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19981 _("offset not a multiple of 4"));
19987 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
19988 _("offset out of range"));
19993 else if ((newval
& 0x000f0000) == 0x000f0000)
19995 /* PC-relative, 12-bit offset. */
19997 newval
|= (1 << 23);
20002 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20003 _("offset out of range"));
20008 else if ((newval
& 0x00000100) == 0x00000100)
20010 /* Writeback: 8-bit, +/- offset. */
20012 newval
|= (1 << 9);
20017 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20018 _("offset out of range"));
20023 else if ((newval
& 0x00000f00) == 0x00000e00)
20025 /* T-instruction: positive 8-bit offset. */
20026 if (value
< 0 || value
> 0xff)
20028 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20029 _("offset out of range"));
20037 /* Positive 12-bit or negative 8-bit offset. */
20041 newval
|= (1 << 23);
20051 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20052 _("offset out of range"));
20059 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
20060 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
20063 case BFD_RELOC_ARM_SHIFT_IMM
:
20064 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20065 if (((unsigned long) value
) > 32
20067 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
20069 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20070 _("shift expression is too large"));
20075 /* Shifts of zero must be done as lsl. */
20077 else if (value
== 32)
20079 newval
&= 0xfffff07f;
20080 newval
|= (value
& 0x1f) << 7;
20081 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20084 case BFD_RELOC_ARM_T32_IMMEDIATE
:
20085 case BFD_RELOC_ARM_T32_ADD_IMM
:
20086 case BFD_RELOC_ARM_T32_IMM12
:
20087 case BFD_RELOC_ARM_T32_ADD_PC12
:
20088 /* We claim that this fixup has been processed here,
20089 even if in fact we generate an error because we do
20090 not have a reloc for it, so tc_gen_reloc will reject it. */
20094 && ! S_IS_DEFINED (fixP
->fx_addsy
))
20096 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20097 _("undefined symbol %s used as an immediate value"),
20098 S_GET_NAME (fixP
->fx_addsy
));
20102 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20104 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
20107 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
20108 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
20110 newimm
= encode_thumb32_immediate (value
);
20111 if (newimm
== (unsigned int) FAIL
)
20112 newimm
= thumb32_negate_data_op (&newval
, value
);
20114 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
20115 && newimm
== (unsigned int) FAIL
)
20117 /* Turn add/sum into addw/subw. */
20118 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
20119 newval
= (newval
& 0xfeffffff) | 0x02000000;
20121 /* 12 bit immediate for addw/subw. */
20125 newval
^= 0x00a00000;
20128 newimm
= (unsigned int) FAIL
;
20133 if (newimm
== (unsigned int)FAIL
)
20135 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20136 _("invalid constant (%lx) after fixup"),
20137 (unsigned long) value
);
20141 newval
|= (newimm
& 0x800) << 15;
20142 newval
|= (newimm
& 0x700) << 4;
20143 newval
|= (newimm
& 0x0ff);
20145 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
20146 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
20149 case BFD_RELOC_ARM_SMC
:
20150 if (((unsigned long) value
) > 0xffff)
20151 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20152 _("invalid smc expression"));
20153 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20154 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
20155 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20158 case BFD_RELOC_ARM_SWI
:
20159 if (fixP
->tc_fix_data
!= 0)
20161 if (((unsigned long) value
) > 0xff)
20162 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20163 _("invalid swi expression"));
20164 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20166 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20170 if (((unsigned long) value
) > 0x00ffffff)
20171 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20172 _("invalid swi expression"));
20173 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20175 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20179 case BFD_RELOC_ARM_MULTI
:
20180 if (((unsigned long) value
) > 0xffff)
20181 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20182 _("invalid expression in load/store multiple"));
20183 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
20184 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20188 case BFD_RELOC_ARM_PCREL_CALL
:
20190 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20192 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20193 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20194 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20195 /* Flip the bl to blx. This is a simple flip
20196 bit here because we generate PCREL_CALL for
20197 unconditional bls. */
20199 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20200 newval
= newval
| 0x10000000;
20201 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20207 goto arm_branch_common
;
20209 case BFD_RELOC_ARM_PCREL_JUMP
:
20210 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20212 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20213 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20214 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20216 /* This would map to a bl<cond>, b<cond>,
20217 b<always> to a Thumb function. We
20218 need to force a relocation for this particular
20220 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20224 case BFD_RELOC_ARM_PLT32
:
20226 case BFD_RELOC_ARM_PCREL_BRANCH
:
20228 goto arm_branch_common
;
20230 case BFD_RELOC_ARM_PCREL_BLX
:
20233 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
20235 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20236 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20237 && ARM_IS_FUNC (fixP
->fx_addsy
))
20239 /* Flip the blx to a bl and warn. */
20240 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
20241 newval
= 0xeb000000;
20242 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
20243 _("blx to '%s' an ARM ISA state function changed to bl"),
20245 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20251 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
20252 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
20256 /* We are going to store value (shifted right by two) in the
20257 instruction, in a 24 bit, signed field. Bits 26 through 32 either
20258 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
20259 also be be clear. */
20261 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20262 _("misaligned branch destination"));
20263 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
20264 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
20265 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20266 _("branch out of range"));
20268 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20270 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20271 newval
|= (value
>> 2) & 0x00ffffff;
20272 /* Set the H bit on BLX instructions. */
20276 newval
|= 0x01000000;
20278 newval
&= ~0x01000000;
20280 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20284 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
20285 /* CBZ can only branch forward. */
20287 /* Attempts to use CBZ to branch to the next instruction
20288 (which, strictly speaking, are prohibited) will be turned into
20291 FIXME: It may be better to remove the instruction completely and
20292 perform relaxation. */
20295 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20296 newval
= 0xbf00; /* NOP encoding T1 */
20297 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20302 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20303 _("branch out of range"));
20305 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20307 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20308 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
20309 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20314 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
20315 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
20316 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20317 _("branch out of range"));
20319 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20321 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20322 newval
|= (value
& 0x1ff) >> 1;
20323 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20327 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
20328 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
20329 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20330 _("branch out of range"));
20332 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20334 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20335 newval
|= (value
& 0xfff) >> 1;
20336 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20340 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
20342 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20343 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20344 && S_IS_DEFINED (fixP
->fx_addsy
)
20345 && ARM_IS_FUNC (fixP
->fx_addsy
)
20346 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
20348 /* Force a relocation for a branch 20 bits wide. */
20351 if ((value
& ~0x1fffff) && ((value
& ~0x1fffff) != ~0x1fffff))
20352 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20353 _("conditional branch out of range"));
20355 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20358 addressT S
, J1
, J2
, lo
, hi
;
20360 S
= (value
& 0x00100000) >> 20;
20361 J2
= (value
& 0x00080000) >> 19;
20362 J1
= (value
& 0x00040000) >> 18;
20363 hi
= (value
& 0x0003f000) >> 12;
20364 lo
= (value
& 0x00000ffe) >> 1;
20366 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20367 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20368 newval
|= (S
<< 10) | hi
;
20369 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
20370 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20371 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
20375 case BFD_RELOC_THUMB_PCREL_BLX
:
20377 /* If there is a blx from a thumb state function to
20378 another thumb function flip this to a bl and warn
20382 && S_IS_DEFINED (fixP
->fx_addsy
)
20383 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20384 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20385 && THUMB_IS_FUNC (fixP
->fx_addsy
))
20387 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
20388 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
20389 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
20391 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20392 newval
= newval
| 0x1000;
20393 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20394 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20399 goto thumb_bl_common
;
20401 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
20403 /* A bl from Thumb state ISA to an internal ARM state function
20404 is converted to a blx. */
20406 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
20407 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
20408 && S_IS_DEFINED (fixP
->fx_addsy
)
20409 && ARM_IS_FUNC (fixP
->fx_addsy
)
20410 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
20412 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
20413 newval
= newval
& ~0x1000;
20414 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
20415 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
20422 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
&&
20423 fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20424 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
20427 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
20428 /* For a BLX instruction, make sure that the relocation is rounded up
20429 to a word boundary. This follows the semantics of the instruction
20430 which specifies that bit 1 of the target address will come from bit
20431 1 of the base address. */
20432 value
= (value
+ 1) & ~ 1;
20435 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
20437 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_arch_t2
)))
20439 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20440 _("branch out of range"));
20442 else if ((value
& ~0x1ffffff)
20443 && ((value
& ~0x1ffffff) != ~0x1ffffff))
20445 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20446 _("Thumb2 branch out of range"));
20450 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20451 encode_thumb2_b_bl_offset (buf
, value
);
20455 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
20456 if ((value
& ~0x1ffffff) && ((value
& ~0x1ffffff) != ~0x1ffffff))
20457 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20458 _("branch out of range"));
20460 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20461 encode_thumb2_b_bl_offset (buf
, value
);
20466 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20467 md_number_to_chars (buf
, value
, 1);
20471 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20472 md_number_to_chars (buf
, value
, 2);
20476 case BFD_RELOC_ARM_TLS_GD32
:
20477 case BFD_RELOC_ARM_TLS_LE32
:
20478 case BFD_RELOC_ARM_TLS_IE32
:
20479 case BFD_RELOC_ARM_TLS_LDM32
:
20480 case BFD_RELOC_ARM_TLS_LDO32
:
20481 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
20484 case BFD_RELOC_ARM_GOT32
:
20485 case BFD_RELOC_ARM_GOTOFF
:
20486 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20487 md_number_to_chars (buf
, 0, 4);
20490 case BFD_RELOC_ARM_GOT_PREL
:
20491 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20492 md_number_to_chars (buf
, value
, 4);
20495 case BFD_RELOC_ARM_TARGET2
:
20496 /* TARGET2 is not partial-inplace, so we need to write the
20497 addend here for REL targets, because it won't be written out
20498 during reloc processing later. */
20499 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20500 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
20504 case BFD_RELOC_RVA
:
20506 case BFD_RELOC_ARM_TARGET1
:
20507 case BFD_RELOC_ARM_ROSEGREL32
:
20508 case BFD_RELOC_ARM_SBREL32
:
20509 case BFD_RELOC_32_PCREL
:
20511 case BFD_RELOC_32_SECREL
:
20513 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20515 /* For WinCE we only do this for pcrel fixups. */
20516 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
20518 md_number_to_chars (buf
, value
, 4);
20522 case BFD_RELOC_ARM_PREL31
:
20523 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20525 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
20526 if ((value
^ (value
>> 1)) & 0x40000000)
20528 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20529 _("rel31 relocation overflow"));
20531 newval
|= value
& 0x7fffffff;
20532 md_number_to_chars (buf
, newval
, 4);
20537 case BFD_RELOC_ARM_CP_OFF_IMM
:
20538 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
20539 if (value
< -1023 || value
> 1023 || (value
& 3))
20540 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20541 _("co-processor offset out of range"));
20546 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20547 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20548 newval
= md_chars_to_number (buf
, INSN_SIZE
);
20550 newval
= get_thumb32_insn (buf
);
20551 newval
&= 0xff7fff00;
20552 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
20553 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
20554 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
20555 md_number_to_chars (buf
, newval
, INSN_SIZE
);
20557 put_thumb32_insn (buf
, newval
);
20560 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
20561 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
20562 if (value
< -255 || value
> 255)
20563 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20564 _("co-processor offset out of range"));
20566 goto cp_off_common
;
20568 case BFD_RELOC_ARM_THUMB_OFFSET
:
20569 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20570 /* Exactly what ranges, and where the offset is inserted depends
20571 on the type of instruction, we can establish this from the
20573 switch (newval
>> 12)
20575 case 4: /* PC load. */
20576 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
20577 forced to zero for these loads; md_pcrel_from has already
20578 compensated for this. */
20580 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20581 _("invalid offset, target not word aligned (0x%08lX)"),
20582 (((unsigned long) fixP
->fx_frag
->fr_address
20583 + (unsigned long) fixP
->fx_where
) & ~3)
20584 + (unsigned long) value
);
20586 if (value
& ~0x3fc)
20587 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20588 _("invalid offset, value too big (0x%08lX)"),
20591 newval
|= value
>> 2;
20594 case 9: /* SP load/store. */
20595 if (value
& ~0x3fc)
20596 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20597 _("invalid offset, value too big (0x%08lX)"),
20599 newval
|= value
>> 2;
20602 case 6: /* Word load/store. */
20604 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20605 _("invalid offset, value too big (0x%08lX)"),
20607 newval
|= value
<< 4; /* 6 - 2. */
20610 case 7: /* Byte load/store. */
20612 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20613 _("invalid offset, value too big (0x%08lX)"),
20615 newval
|= value
<< 6;
20618 case 8: /* Halfword load/store. */
20620 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20621 _("invalid offset, value too big (0x%08lX)"),
20623 newval
|= value
<< 5; /* 6 - 1. */
20627 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20628 "Unable to process relocation for thumb opcode: %lx",
20629 (unsigned long) newval
);
20632 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20635 case BFD_RELOC_ARM_THUMB_ADD
:
20636 /* This is a complicated relocation, since we use it for all of
20637 the following immediate relocations:
20641 9bit ADD/SUB SP word-aligned
20642 10bit ADD PC/SP word-aligned
20644 The type of instruction being processed is encoded in the
20651 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20653 int rd
= (newval
>> 4) & 0xf;
20654 int rs
= newval
& 0xf;
20655 int subtract
= !!(newval
& 0x8000);
20657 /* Check for HI regs, only very restricted cases allowed:
20658 Adjusting SP, and using PC or SP to get an address. */
20659 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
20660 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
20661 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20662 _("invalid Hi register with immediate"));
20664 /* If value is negative, choose the opposite instruction. */
20668 subtract
= !subtract
;
20670 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20671 _("immediate value out of range"));
20676 if (value
& ~0x1fc)
20677 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20678 _("invalid immediate for stack address calculation"));
20679 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
20680 newval
|= value
>> 2;
20682 else if (rs
== REG_PC
|| rs
== REG_SP
)
20684 if (subtract
|| value
& ~0x3fc)
20685 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20686 _("invalid immediate for address calculation (value = 0x%08lX)"),
20687 (unsigned long) value
);
20688 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
20690 newval
|= value
>> 2;
20695 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20696 _("immediate value out of range"));
20697 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
20698 newval
|= (rd
<< 8) | value
;
20703 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20704 _("immediate value out of range"));
20705 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
20706 newval
|= rd
| (rs
<< 3) | (value
<< 6);
20709 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20712 case BFD_RELOC_ARM_THUMB_IMM
:
20713 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
20714 if (value
< 0 || value
> 255)
20715 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20716 _("invalid immediate: %ld is out of range"),
20719 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20722 case BFD_RELOC_ARM_THUMB_SHIFT
:
20723 /* 5bit shift value (0..32). LSL cannot take 32. */
20724 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
20725 temp
= newval
& 0xf800;
20726 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
20727 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20728 _("invalid shift value: %ld"), (long) value
);
20729 /* Shifts of zero must be encoded as LSL. */
20731 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
20732 /* Shifts of 32 are encoded as zero. */
20733 else if (value
== 32)
20735 newval
|= value
<< 6;
20736 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
20739 case BFD_RELOC_VTABLE_INHERIT
:
20740 case BFD_RELOC_VTABLE_ENTRY
:
20744 case BFD_RELOC_ARM_MOVW
:
20745 case BFD_RELOC_ARM_MOVT
:
20746 case BFD_RELOC_ARM_THUMB_MOVW
:
20747 case BFD_RELOC_ARM_THUMB_MOVT
:
20748 if (fixP
->fx_done
|| !seg
->use_rela_p
)
20750 /* REL format relocations are limited to a 16-bit addend. */
20751 if (!fixP
->fx_done
)
20753 if (value
< -0x8000 || value
> 0x7fff)
20754 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20755 _("offset out of range"));
20757 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
20758 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20763 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
20764 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
20766 newval
= get_thumb32_insn (buf
);
20767 newval
&= 0xfbf08f00;
20768 newval
|= (value
& 0xf000) << 4;
20769 newval
|= (value
& 0x0800) << 15;
20770 newval
|= (value
& 0x0700) << 4;
20771 newval
|= (value
& 0x00ff);
20772 put_thumb32_insn (buf
, newval
);
20776 newval
= md_chars_to_number (buf
, 4);
20777 newval
&= 0xfff0f000;
20778 newval
|= value
& 0x0fff;
20779 newval
|= (value
& 0xf000) << 4;
20780 md_number_to_chars (buf
, newval
, 4);
20785 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
20786 case BFD_RELOC_ARM_ALU_PC_G0
:
20787 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
20788 case BFD_RELOC_ARM_ALU_PC_G1
:
20789 case BFD_RELOC_ARM_ALU_PC_G2
:
20790 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
20791 case BFD_RELOC_ARM_ALU_SB_G0
:
20792 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
20793 case BFD_RELOC_ARM_ALU_SB_G1
:
20794 case BFD_RELOC_ARM_ALU_SB_G2
:
20795 gas_assert (!fixP
->fx_done
);
20796 if (!seg
->use_rela_p
)
20799 bfd_vma encoded_addend
;
20800 bfd_vma addend_abs
= abs (value
);
20802 /* Check that the absolute value of the addend can be
20803 expressed as an 8-bit constant plus a rotation. */
20804 encoded_addend
= encode_arm_immediate (addend_abs
);
20805 if (encoded_addend
== (unsigned int) FAIL
)
20806 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20807 _("the offset 0x%08lX is not representable"),
20808 (unsigned long) addend_abs
);
20810 /* Extract the instruction. */
20811 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20813 /* If the addend is positive, use an ADD instruction.
20814 Otherwise use a SUB. Take care not to destroy the S bit. */
20815 insn
&= 0xff1fffff;
20821 /* Place the encoded addend into the first 12 bits of the
20823 insn
&= 0xfffff000;
20824 insn
|= encoded_addend
;
20826 /* Update the instruction. */
20827 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20831 case BFD_RELOC_ARM_LDR_PC_G0
:
20832 case BFD_RELOC_ARM_LDR_PC_G1
:
20833 case BFD_RELOC_ARM_LDR_PC_G2
:
20834 case BFD_RELOC_ARM_LDR_SB_G0
:
20835 case BFD_RELOC_ARM_LDR_SB_G1
:
20836 case BFD_RELOC_ARM_LDR_SB_G2
:
20837 gas_assert (!fixP
->fx_done
);
20838 if (!seg
->use_rela_p
)
20841 bfd_vma addend_abs
= abs (value
);
20843 /* Check that the absolute value of the addend can be
20844 encoded in 12 bits. */
20845 if (addend_abs
>= 0x1000)
20846 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20847 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
20848 (unsigned long) addend_abs
);
20850 /* Extract the instruction. */
20851 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20853 /* If the addend is negative, clear bit 23 of the instruction.
20854 Otherwise set it. */
20856 insn
&= ~(1 << 23);
20860 /* Place the absolute value of the addend into the first 12 bits
20861 of the instruction. */
20862 insn
&= 0xfffff000;
20863 insn
|= addend_abs
;
20865 /* Update the instruction. */
20866 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20870 case BFD_RELOC_ARM_LDRS_PC_G0
:
20871 case BFD_RELOC_ARM_LDRS_PC_G1
:
20872 case BFD_RELOC_ARM_LDRS_PC_G2
:
20873 case BFD_RELOC_ARM_LDRS_SB_G0
:
20874 case BFD_RELOC_ARM_LDRS_SB_G1
:
20875 case BFD_RELOC_ARM_LDRS_SB_G2
:
20876 gas_assert (!fixP
->fx_done
);
20877 if (!seg
->use_rela_p
)
20880 bfd_vma addend_abs
= abs (value
);
20882 /* Check that the absolute value of the addend can be
20883 encoded in 8 bits. */
20884 if (addend_abs
>= 0x100)
20885 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20886 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
20887 (unsigned long) addend_abs
);
20889 /* Extract the instruction. */
20890 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20892 /* If the addend is negative, clear bit 23 of the instruction.
20893 Otherwise set it. */
20895 insn
&= ~(1 << 23);
20899 /* Place the first four bits of the absolute value of the addend
20900 into the first 4 bits of the instruction, and the remaining
20901 four into bits 8 .. 11. */
20902 insn
&= 0xfffff0f0;
20903 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
20905 /* Update the instruction. */
20906 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20910 case BFD_RELOC_ARM_LDC_PC_G0
:
20911 case BFD_RELOC_ARM_LDC_PC_G1
:
20912 case BFD_RELOC_ARM_LDC_PC_G2
:
20913 case BFD_RELOC_ARM_LDC_SB_G0
:
20914 case BFD_RELOC_ARM_LDC_SB_G1
:
20915 case BFD_RELOC_ARM_LDC_SB_G2
:
20916 gas_assert (!fixP
->fx_done
);
20917 if (!seg
->use_rela_p
)
20920 bfd_vma addend_abs
= abs (value
);
20922 /* Check that the absolute value of the addend is a multiple of
20923 four and, when divided by four, fits in 8 bits. */
20924 if (addend_abs
& 0x3)
20925 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20926 _("bad offset 0x%08lX (must be word-aligned)"),
20927 (unsigned long) addend_abs
);
20929 if ((addend_abs
>> 2) > 0xff)
20930 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20931 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
20932 (unsigned long) addend_abs
);
20934 /* Extract the instruction. */
20935 insn
= md_chars_to_number (buf
, INSN_SIZE
);
20937 /* If the addend is negative, clear bit 23 of the instruction.
20938 Otherwise set it. */
20940 insn
&= ~(1 << 23);
20944 /* Place the addend (divided by four) into the first eight
20945 bits of the instruction. */
20946 insn
&= 0xfffffff0;
20947 insn
|= addend_abs
>> 2;
20949 /* Update the instruction. */
20950 md_number_to_chars (buf
, insn
, INSN_SIZE
);
20954 case BFD_RELOC_ARM_V4BX
:
20955 /* This will need to go in the object file. */
20959 case BFD_RELOC_UNUSED
:
20961 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
20962 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
20966 /* Translate internal representation of relocation info to BFD target
20970 tc_gen_reloc (asection
*section
, fixS
*fixp
)
20973 bfd_reloc_code_real_type code
;
20975 reloc
= (arelent
*) xmalloc (sizeof (arelent
));
20977 reloc
->sym_ptr_ptr
= (asymbol
**) xmalloc (sizeof (asymbol
*));
20978 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
20979 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
20981 if (fixp
->fx_pcrel
)
20983 if (section
->use_rela_p
)
20984 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
20986 fixp
->fx_offset
= reloc
->address
;
20988 reloc
->addend
= fixp
->fx_offset
;
20990 switch (fixp
->fx_r_type
)
20993 if (fixp
->fx_pcrel
)
20995 code
= BFD_RELOC_8_PCREL
;
21000 if (fixp
->fx_pcrel
)
21002 code
= BFD_RELOC_16_PCREL
;
21007 if (fixp
->fx_pcrel
)
21009 code
= BFD_RELOC_32_PCREL
;
21013 case BFD_RELOC_ARM_MOVW
:
21014 if (fixp
->fx_pcrel
)
21016 code
= BFD_RELOC_ARM_MOVW_PCREL
;
21020 case BFD_RELOC_ARM_MOVT
:
21021 if (fixp
->fx_pcrel
)
21023 code
= BFD_RELOC_ARM_MOVT_PCREL
;
21027 case BFD_RELOC_ARM_THUMB_MOVW
:
21028 if (fixp
->fx_pcrel
)
21030 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
21034 case BFD_RELOC_ARM_THUMB_MOVT
:
21035 if (fixp
->fx_pcrel
)
21037 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
21041 case BFD_RELOC_NONE
:
21042 case BFD_RELOC_ARM_PCREL_BRANCH
:
21043 case BFD_RELOC_ARM_PCREL_BLX
:
21044 case BFD_RELOC_RVA
:
21045 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
21046 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
21047 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
21048 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21049 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21050 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21051 case BFD_RELOC_VTABLE_ENTRY
:
21052 case BFD_RELOC_VTABLE_INHERIT
:
21054 case BFD_RELOC_32_SECREL
:
21056 code
= fixp
->fx_r_type
;
21059 case BFD_RELOC_THUMB_PCREL_BLX
:
21061 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
21062 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
21065 code
= BFD_RELOC_THUMB_PCREL_BLX
;
21068 case BFD_RELOC_ARM_LITERAL
:
21069 case BFD_RELOC_ARM_HWLITERAL
:
21070 /* If this is called then the a literal has
21071 been referenced across a section boundary. */
21072 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21073 _("literal referenced across section boundary"));
21077 case BFD_RELOC_ARM_GOT32
:
21078 case BFD_RELOC_ARM_GOTOFF
:
21079 case BFD_RELOC_ARM_GOT_PREL
:
21080 case BFD_RELOC_ARM_PLT32
:
21081 case BFD_RELOC_ARM_TARGET1
:
21082 case BFD_RELOC_ARM_ROSEGREL32
:
21083 case BFD_RELOC_ARM_SBREL32
:
21084 case BFD_RELOC_ARM_PREL31
:
21085 case BFD_RELOC_ARM_TARGET2
:
21086 case BFD_RELOC_ARM_TLS_LE32
:
21087 case BFD_RELOC_ARM_TLS_LDO32
:
21088 case BFD_RELOC_ARM_PCREL_CALL
:
21089 case BFD_RELOC_ARM_PCREL_JUMP
:
21090 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
21091 case BFD_RELOC_ARM_ALU_PC_G0
:
21092 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
21093 case BFD_RELOC_ARM_ALU_PC_G1
:
21094 case BFD_RELOC_ARM_ALU_PC_G2
:
21095 case BFD_RELOC_ARM_LDR_PC_G0
:
21096 case BFD_RELOC_ARM_LDR_PC_G1
:
21097 case BFD_RELOC_ARM_LDR_PC_G2
:
21098 case BFD_RELOC_ARM_LDRS_PC_G0
:
21099 case BFD_RELOC_ARM_LDRS_PC_G1
:
21100 case BFD_RELOC_ARM_LDRS_PC_G2
:
21101 case BFD_RELOC_ARM_LDC_PC_G0
:
21102 case BFD_RELOC_ARM_LDC_PC_G1
:
21103 case BFD_RELOC_ARM_LDC_PC_G2
:
21104 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
21105 case BFD_RELOC_ARM_ALU_SB_G0
:
21106 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
21107 case BFD_RELOC_ARM_ALU_SB_G1
:
21108 case BFD_RELOC_ARM_ALU_SB_G2
:
21109 case BFD_RELOC_ARM_LDR_SB_G0
:
21110 case BFD_RELOC_ARM_LDR_SB_G1
:
21111 case BFD_RELOC_ARM_LDR_SB_G2
:
21112 case BFD_RELOC_ARM_LDRS_SB_G0
:
21113 case BFD_RELOC_ARM_LDRS_SB_G1
:
21114 case BFD_RELOC_ARM_LDRS_SB_G2
:
21115 case BFD_RELOC_ARM_LDC_SB_G0
:
21116 case BFD_RELOC_ARM_LDC_SB_G1
:
21117 case BFD_RELOC_ARM_LDC_SB_G2
:
21118 case BFD_RELOC_ARM_V4BX
:
21119 code
= fixp
->fx_r_type
;
21122 case BFD_RELOC_ARM_TLS_GD32
:
21123 case BFD_RELOC_ARM_TLS_IE32
:
21124 case BFD_RELOC_ARM_TLS_LDM32
:
21125 /* BFD will include the symbol's address in the addend.
21126 But we don't want that, so subtract it out again here. */
21127 if (!S_IS_COMMON (fixp
->fx_addsy
))
21128 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
21129 code
= fixp
->fx_r_type
;
21133 case BFD_RELOC_ARM_IMMEDIATE
:
21134 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21135 _("internal relocation (type: IMMEDIATE) not fixed up"));
21138 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
21139 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21140 _("ADRL used for a symbol not defined in the same file"));
21143 case BFD_RELOC_ARM_OFFSET_IMM
:
21144 if (section
->use_rela_p
)
21146 code
= fixp
->fx_r_type
;
21150 if (fixp
->fx_addsy
!= NULL
21151 && !S_IS_DEFINED (fixp
->fx_addsy
)
21152 && S_IS_LOCAL (fixp
->fx_addsy
))
21154 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21155 _("undefined local label `%s'"),
21156 S_GET_NAME (fixp
->fx_addsy
));
21160 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21161 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
21168 switch (fixp
->fx_r_type
)
21170 case BFD_RELOC_NONE
: type
= "NONE"; break;
21171 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
21172 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
21173 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
21174 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
21175 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
21176 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
21177 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
21178 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
21179 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
21180 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
21181 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
21182 default: type
= _("<unknown>"); break;
21184 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21185 _("cannot represent %s relocation in this object file format"),
21192 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
21194 && fixp
->fx_addsy
== GOT_symbol
)
21196 code
= BFD_RELOC_ARM_GOTPC
;
21197 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
21201 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
21203 if (reloc
->howto
== NULL
)
21205 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
21206 _("cannot represent %s relocation in this object file format"),
21207 bfd_get_reloc_code_name (code
));
21211 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
21212 vtable entry to be used in the relocation's section offset. */
21213 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
21214 reloc
->address
= fixp
->fx_offset
;
21219 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
21222 cons_fix_new_arm (fragS
* frag
,
21227 bfd_reloc_code_real_type type
;
21231 FIXME: @@ Should look at CPU word size. */
21235 type
= BFD_RELOC_8
;
21238 type
= BFD_RELOC_16
;
21242 type
= BFD_RELOC_32
;
21245 type
= BFD_RELOC_64
;
21250 if (exp
->X_op
== O_secrel
)
21252 exp
->X_op
= O_symbol
;
21253 type
= BFD_RELOC_32_SECREL
;
21257 fix_new_exp (frag
, where
, (int) size
, exp
, pcrel
, type
);
21260 #if defined (OBJ_COFF)
21262 arm_validate_fix (fixS
* fixP
)
21264 /* If the destination of the branch is a defined symbol which does not have
21265 the THUMB_FUNC attribute, then we must be calling a function which has
21266 the (interfacearm) attribute. We look for the Thumb entry point to that
21267 function and change the branch to refer to that function instead. */
21268 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
21269 && fixP
->fx_addsy
!= NULL
21270 && S_IS_DEFINED (fixP
->fx_addsy
)
21271 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
21273 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
21280 arm_force_relocation (struct fix
* fixp
)
21282 #if defined (OBJ_COFF) && defined (TE_PE)
21283 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
21287 /* In case we have a call or a branch to a function in ARM ISA mode from
21288 a thumb function or vice-versa force the relocation. These relocations
21289 are cleared off for some cores that might have blx and simple transformations
21293 switch (fixp
->fx_r_type
)
21295 case BFD_RELOC_ARM_PCREL_JUMP
:
21296 case BFD_RELOC_ARM_PCREL_CALL
:
21297 case BFD_RELOC_THUMB_PCREL_BLX
:
21298 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
21302 case BFD_RELOC_ARM_PCREL_BLX
:
21303 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
21304 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
21305 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
21306 if (ARM_IS_FUNC (fixp
->fx_addsy
))
21315 /* Resolve these relocations even if the symbol is extern or weak. */
21316 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
21317 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
21318 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
21319 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
21320 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
21321 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
21322 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
)
21325 /* Always leave these relocations for the linker. */
21326 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
21327 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
21328 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
21331 /* Always generate relocations against function symbols. */
21332 if (fixp
->fx_r_type
== BFD_RELOC_32
21334 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
21337 return generic_force_reloc (fixp
);
21340 #if defined (OBJ_ELF) || defined (OBJ_COFF)
21341 /* Relocations against function names must be left unadjusted,
21342 so that the linker can use this information to generate interworking
21343 stubs. The MIPS version of this function
21344 also prevents relocations that are mips-16 specific, but I do not
21345 know why it does this.
21348 There is one other problem that ought to be addressed here, but
21349 which currently is not: Taking the address of a label (rather
21350 than a function) and then later jumping to that address. Such
21351 addresses also ought to have their bottom bit set (assuming that
21352 they reside in Thumb code), but at the moment they will not. */
21355 arm_fix_adjustable (fixS
* fixP
)
21357 if (fixP
->fx_addsy
== NULL
)
21360 /* Preserve relocations against symbols with function type. */
21361 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
21364 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
21365 && fixP
->fx_subsy
== NULL
)
21368 /* We need the symbol name for the VTABLE entries. */
21369 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
21370 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
21373 /* Don't allow symbols to be discarded on GOT related relocs. */
21374 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
21375 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
21376 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
21377 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
21378 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
21379 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
21380 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
21381 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
21382 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
21385 /* Similarly for group relocations. */
21386 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
21387 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
21388 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
21391 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
21392 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
21393 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
21394 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
21395 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
21396 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
21397 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
21398 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
21399 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
21404 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
21409 elf32_arm_target_format (void)
21412 return (target_big_endian
21413 ? "elf32-bigarm-symbian"
21414 : "elf32-littlearm-symbian");
21415 #elif defined (TE_VXWORKS)
21416 return (target_big_endian
21417 ? "elf32-bigarm-vxworks"
21418 : "elf32-littlearm-vxworks");
21420 if (target_big_endian
)
21421 return "elf32-bigarm";
21423 return "elf32-littlearm";
21428 armelf_frob_symbol (symbolS
* symp
,
21431 elf_frob_symbol (symp
, puntp
);
21435 /* MD interface: Finalization. */
21440 literal_pool
* pool
;
21442 /* Ensure that all the IT blocks are properly closed. */
21443 check_it_blocks_finished ();
21445 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
21447 /* Put it at the end of the relevant section. */
21448 subseg_set (pool
->section
, pool
->sub_section
);
21450 arm_elf_change_section ();
21457 /* Remove any excess mapping symbols generated for alignment frags in
21458 SEC. We may have created a mapping symbol before a zero byte
21459 alignment; remove it if there's a mapping symbol after the
21462 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
21463 void *dummy ATTRIBUTE_UNUSED
)
21465 segment_info_type
*seginfo
= seg_info (sec
);
21468 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
21471 for (fragp
= seginfo
->frchainP
->frch_root
;
21473 fragp
= fragp
->fr_next
)
21475 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
21476 fragS
*next
= fragp
->fr_next
;
21478 /* Variable-sized frags have been converted to fixed size by
21479 this point. But if this was variable-sized to start with,
21480 there will be a fixed-size frag after it. So don't handle
21482 if (sym
== NULL
|| next
== NULL
)
21485 if (S_GET_VALUE (sym
) < next
->fr_address
)
21486 /* Not at the end of this frag. */
21488 know (S_GET_VALUE (sym
) == next
->fr_address
);
21492 if (next
->tc_frag_data
.first_map
!= NULL
)
21494 /* Next frag starts with a mapping symbol. Discard this
21496 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21500 if (next
->fr_next
== NULL
)
21502 /* This mapping symbol is at the end of the section. Discard
21504 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
21505 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
21509 /* As long as we have empty frags without any mapping symbols,
21511 /* If the next frag is non-empty and does not start with a
21512 mapping symbol, then this mapping symbol is required. */
21513 if (next
->fr_address
!= next
->fr_next
->fr_address
)
21516 next
= next
->fr_next
;
21518 while (next
!= NULL
);
21523 /* Adjust the symbol table. This marks Thumb symbols as distinct from
21527 arm_adjust_symtab (void)
21532 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21534 if (ARM_IS_THUMB (sym
))
21536 if (THUMB_IS_FUNC (sym
))
21538 /* Mark the symbol as a Thumb function. */
21539 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
21540 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
21541 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
21543 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
21544 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
21546 as_bad (_("%s: unexpected function type: %d"),
21547 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
21549 else switch (S_GET_STORAGE_CLASS (sym
))
21552 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
21555 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
21558 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
21566 if (ARM_IS_INTERWORK (sym
))
21567 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
21574 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
21576 if (ARM_IS_THUMB (sym
))
21578 elf_symbol_type
* elf_sym
;
21580 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
21581 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
21583 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
21584 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
21586 /* If it's a .thumb_func, declare it as so,
21587 otherwise tag label as .code 16. */
21588 if (THUMB_IS_FUNC (sym
))
21589 elf_sym
->internal_elf_sym
.st_info
=
21590 ELF_ST_INFO (bind
, STT_ARM_TFUNC
);
21591 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
21592 elf_sym
->internal_elf_sym
.st_info
=
21593 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
21598 /* Remove any overlapping mapping symbols generated by alignment frags. */
21599 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
21603 /* MD interface: Initialization. */
21606 set_constant_flonums (void)
21610 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
21611 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
21615 /* Auto-select Thumb mode if it's the only available instruction set for the
21616 given architecture. */
21619 autoselect_thumb_from_cpu_variant (void)
21621 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
21622 opcode_select (16);
21631 if ( (arm_ops_hsh
= hash_new ()) == NULL
21632 || (arm_cond_hsh
= hash_new ()) == NULL
21633 || (arm_shift_hsh
= hash_new ()) == NULL
21634 || (arm_psr_hsh
= hash_new ()) == NULL
21635 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
21636 || (arm_reg_hsh
= hash_new ()) == NULL
21637 || (arm_reloc_hsh
= hash_new ()) == NULL
21638 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
21639 as_fatal (_("virtual memory exhausted"));
21641 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
21642 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
21643 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
21644 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
21645 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
21646 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
21647 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
21648 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
21649 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
21650 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
21651 (void *) (v7m_psrs
+ i
));
21652 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
21653 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
21655 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
21657 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
21658 (void *) (barrier_opt_names
+ i
));
21660 for (i
= 0; i
< sizeof (reloc_names
) / sizeof (struct reloc_entry
); i
++)
21661 hash_insert (arm_reloc_hsh
, reloc_names
[i
].name
, (void *) (reloc_names
+ i
));
21664 set_constant_flonums ();
21666 /* Set the cpu variant based on the command-line options. We prefer
21667 -mcpu= over -march= if both are set (as for GCC); and we prefer
21668 -mfpu= over any other way of setting the floating point unit.
21669 Use of legacy options with new options are faulted. */
21672 if (mcpu_cpu_opt
|| march_cpu_opt
)
21673 as_bad (_("use of old and new-style options to set CPU type"));
21675 mcpu_cpu_opt
= legacy_cpu
;
21677 else if (!mcpu_cpu_opt
)
21678 mcpu_cpu_opt
= march_cpu_opt
;
21683 as_bad (_("use of old and new-style options to set FPU type"));
21685 mfpu_opt
= legacy_fpu
;
21687 else if (!mfpu_opt
)
21689 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
21690 || defined (TE_NetBSD) || defined (TE_VXWORKS))
21691 /* Some environments specify a default FPU. If they don't, infer it
21692 from the processor. */
21694 mfpu_opt
= mcpu_fpu_opt
;
21696 mfpu_opt
= march_fpu_opt
;
21698 mfpu_opt
= &fpu_default
;
21704 if (mcpu_cpu_opt
!= NULL
)
21705 mfpu_opt
= &fpu_default
;
21706 else if (mcpu_fpu_opt
!= NULL
&& ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt
, arm_ext_v5
))
21707 mfpu_opt
= &fpu_arch_vfp_v2
;
21709 mfpu_opt
= &fpu_arch_fpa
;
21715 mcpu_cpu_opt
= &cpu_default
;
21716 selected_cpu
= cpu_default
;
21720 selected_cpu
= *mcpu_cpu_opt
;
21722 mcpu_cpu_opt
= &arm_arch_any
;
21725 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
21727 autoselect_thumb_from_cpu_variant ();
21729 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
21731 #if defined OBJ_COFF || defined OBJ_ELF
21733 unsigned int flags
= 0;
21735 #if defined OBJ_ELF
21736 flags
= meabi_flags
;
21738 switch (meabi_flags
)
21740 case EF_ARM_EABI_UNKNOWN
:
21742 /* Set the flags in the private structure. */
21743 if (uses_apcs_26
) flags
|= F_APCS26
;
21744 if (support_interwork
) flags
|= F_INTERWORK
;
21745 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
21746 if (pic_code
) flags
|= F_PIC
;
21747 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
21748 flags
|= F_SOFT_FLOAT
;
21750 switch (mfloat_abi_opt
)
21752 case ARM_FLOAT_ABI_SOFT
:
21753 case ARM_FLOAT_ABI_SOFTFP
:
21754 flags
|= F_SOFT_FLOAT
;
21757 case ARM_FLOAT_ABI_HARD
:
21758 if (flags
& F_SOFT_FLOAT
)
21759 as_bad (_("hard-float conflicts with specified fpu"));
21763 /* Using pure-endian doubles (even if soft-float). */
21764 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
21765 flags
|= F_VFP_FLOAT
;
21767 #if defined OBJ_ELF
21768 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
21769 flags
|= EF_ARM_MAVERICK_FLOAT
;
21772 case EF_ARM_EABI_VER4
:
21773 case EF_ARM_EABI_VER5
:
21774 /* No additional flags to set. */
21781 bfd_set_private_flags (stdoutput
, flags
);
21783 /* We have run out flags in the COFF header to encode the
21784 status of ATPCS support, so instead we create a dummy,
21785 empty, debug section called .arm.atpcs. */
21790 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
21794 bfd_set_section_flags
21795 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
21796 bfd_set_section_size (stdoutput
, sec
, 0);
21797 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
21803 /* Record the CPU type as well. */
21804 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
21805 mach
= bfd_mach_arm_iWMMXt2
;
21806 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
21807 mach
= bfd_mach_arm_iWMMXt
;
21808 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
21809 mach
= bfd_mach_arm_XScale
;
21810 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
21811 mach
= bfd_mach_arm_ep9312
;
21812 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
21813 mach
= bfd_mach_arm_5TE
;
21814 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
21816 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
21817 mach
= bfd_mach_arm_5T
;
21819 mach
= bfd_mach_arm_5
;
21821 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
21823 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
21824 mach
= bfd_mach_arm_4T
;
21826 mach
= bfd_mach_arm_4
;
21828 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
21829 mach
= bfd_mach_arm_3M
;
21830 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
21831 mach
= bfd_mach_arm_3
;
21832 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
21833 mach
= bfd_mach_arm_2a
;
21834 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
21835 mach
= bfd_mach_arm_2
;
21837 mach
= bfd_mach_arm_unknown
;
21839 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
21842 /* Command line processing. */
21845 Invocation line includes a switch not recognized by the base assembler.
21846 See if it's a processor-specific option.
21848 This routine is somewhat complicated by the need for backwards
21849 compatibility (since older releases of gcc can't be changed).
21850 The new options try to make the interface as compatible as
21853 New options (supported) are:
21855 -mcpu=<cpu name> Assemble for selected processor
21856 -march=<architecture name> Assemble for selected architecture
21857 -mfpu=<fpu architecture> Assemble for selected FPU.
21858 -EB/-mbig-endian Big-endian
21859 -EL/-mlittle-endian Little-endian
21860 -k Generate PIC code
21861 -mthumb Start in Thumb mode
21862 -mthumb-interwork Code supports ARM/Thumb interworking
21864 -m[no-]warn-deprecated Warn about deprecated features
21866 For now we will also provide support for:
21868 -mapcs-32 32-bit Program counter
21869 -mapcs-26 26-bit Program counter
21870 -macps-float Floats passed in FP registers
21871 -mapcs-reentrant Reentrant code
21873 (sometime these will probably be replaced with -mapcs=<list of options>
21874 and -matpcs=<list of options>)
21876 The remaining options are only supported for back-wards compatibility.
21877 Cpu variants, the arm part is optional:
21878 -m[arm]1 Currently not supported.
21879 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
21880 -m[arm]3 Arm 3 processor
21881 -m[arm]6[xx], Arm 6 processors
21882 -m[arm]7[xx][t][[d]m] Arm 7 processors
21883 -m[arm]8[10] Arm 8 processors
21884 -m[arm]9[20][tdmi] Arm 9 processors
21885 -mstrongarm[110[0]] StrongARM processors
21886 -mxscale XScale processors
21887 -m[arm]v[2345[t[e]]] Arm architectures
21888 -mall All (except the ARM1)
21890 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
21891 -mfpe-old (No float load/store multiples)
21892 -mvfpxd VFP Single precision
21894 -mno-fpu Disable all floating point instructions
21896 The following CPU names are recognized:
21897 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
21898 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
21899 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
21900 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
21901 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
21902 arm10t arm10e, arm1020t, arm1020e, arm10200e,
21903 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
21907 const char * md_shortopts
= "m:k";
21909 #ifdef ARM_BI_ENDIAN
21910 #define OPTION_EB (OPTION_MD_BASE + 0)
21911 #define OPTION_EL (OPTION_MD_BASE + 1)
21913 #if TARGET_BYTES_BIG_ENDIAN
21914 #define OPTION_EB (OPTION_MD_BASE + 0)
21916 #define OPTION_EL (OPTION_MD_BASE + 1)
21919 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
21921 struct option md_longopts
[] =
21924 {"EB", no_argument
, NULL
, OPTION_EB
},
21927 {"EL", no_argument
, NULL
, OPTION_EL
},
21929 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
21930 {NULL
, no_argument
, NULL
, 0}
21933 size_t md_longopts_size
= sizeof (md_longopts
);
21935 struct arm_option_table
21937 char *option
; /* Option name to match. */
21938 char *help
; /* Help information. */
21939 int *var
; /* Variable to change. */
21940 int value
; /* What to change it to. */
21941 char *deprecated
; /* If non-null, print this message. */
21944 struct arm_option_table arm_opts
[] =
21946 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
21947 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
21948 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
21949 &support_interwork
, 1, NULL
},
21950 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
21951 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
21952 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
21954 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
21955 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
21956 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
21957 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
21960 /* These are recognized by the assembler, but have no affect on code. */
21961 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
21962 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
21964 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
21965 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
21966 &warn_on_deprecated
, 0, NULL
},
21967 {NULL
, NULL
, NULL
, 0, NULL
}
21970 struct arm_legacy_option_table
21972 char *option
; /* Option name to match. */
21973 const arm_feature_set
**var
; /* Variable to change. */
21974 const arm_feature_set value
; /* What to change it to. */
21975 char *deprecated
; /* If non-null, print this message. */
21978 const struct arm_legacy_option_table arm_legacy_opts
[] =
21980 /* DON'T add any new processors to this list -- we want the whole list
21981 to go away... Add them to the processors table instead. */
21982 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
21983 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
21984 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
21985 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
21986 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
21987 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
21988 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
21989 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
21990 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
21991 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
21992 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
21993 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
21994 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
21995 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
21996 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
21997 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
21998 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
21999 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
22000 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
22001 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
22002 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
22003 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
22004 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
22005 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
22006 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
22007 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
22008 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
22009 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
22010 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
22011 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
22012 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
22013 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
22014 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
22015 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
22016 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
22017 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
22018 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
22019 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
22020 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
22021 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
22022 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
22023 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
22024 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
22025 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
22026 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
22027 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
22028 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22029 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22030 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22031 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
22032 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
22033 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
22034 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
22035 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
22036 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
22037 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
22038 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
22039 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
22040 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
22041 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
22042 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
22043 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
22044 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
22045 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
22046 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
22047 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
22048 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
22049 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
22050 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
22051 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
22052 N_("use -mcpu=strongarm110")},
22053 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
22054 N_("use -mcpu=strongarm1100")},
22055 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
22056 N_("use -mcpu=strongarm1110")},
22057 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
22058 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
22059 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
22061 /* Architecture variants -- don't add any more to this list either. */
22062 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
22063 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
22064 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
22065 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
22066 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
22067 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
22068 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
22069 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
22070 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
22071 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
22072 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
22073 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
22074 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
22075 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
22076 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
22077 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
22078 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
22079 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
22081 /* Floating point variants -- don't add any more to this list either. */
22082 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
22083 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
22084 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
22085 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
22086 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
22088 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
22091 struct arm_cpu_option_table
22094 const arm_feature_set value
;
22095 /* For some CPUs we assume an FPU unless the user explicitly sets
22097 const arm_feature_set default_fpu
;
22098 /* The canonical name of the CPU, or NULL to use NAME converted to upper
22100 const char *canonical_name
;
22103 /* This list should, at a minimum, contain all the cpu names
22104 recognized by GCC. */
22105 static const struct arm_cpu_option_table arm_cpus
[] =
22107 {"all", ARM_ANY
, FPU_ARCH_FPA
, NULL
},
22108 {"arm1", ARM_ARCH_V1
, FPU_ARCH_FPA
, NULL
},
22109 {"arm2", ARM_ARCH_V2
, FPU_ARCH_FPA
, NULL
},
22110 {"arm250", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
22111 {"arm3", ARM_ARCH_V2S
, FPU_ARCH_FPA
, NULL
},
22112 {"arm6", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22113 {"arm60", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22114 {"arm600", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22115 {"arm610", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22116 {"arm620", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22117 {"arm7", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22118 {"arm7m", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22119 {"arm7d", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22120 {"arm7dm", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22121 {"arm7di", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22122 {"arm7dmi", ARM_ARCH_V3M
, FPU_ARCH_FPA
, NULL
},
22123 {"arm70", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22124 {"arm700", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22125 {"arm700i", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22126 {"arm710", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22127 {"arm710t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22128 {"arm720", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22129 {"arm720t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22130 {"arm740t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22131 {"arm710c", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22132 {"arm7100", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22133 {"arm7500", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22134 {"arm7500fe", ARM_ARCH_V3
, FPU_ARCH_FPA
, NULL
},
22135 {"arm7t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22136 {"arm7tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22137 {"arm7tdmi-s", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22138 {"arm8", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22139 {"arm810", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22140 {"strongarm", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22141 {"strongarm1", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22142 {"strongarm110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22143 {"strongarm1100", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22144 {"strongarm1110", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22145 {"arm9", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22146 {"arm920", ARM_ARCH_V4T
, FPU_ARCH_FPA
, "ARM920T"},
22147 {"arm920t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22148 {"arm922t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22149 {"arm940t", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22150 {"arm9tdmi", ARM_ARCH_V4T
, FPU_ARCH_FPA
, NULL
},
22151 {"fa526", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22152 {"fa626", ARM_ARCH_V4
, FPU_ARCH_FPA
, NULL
},
22153 /* For V5 or later processors we default to using VFP; but the user
22154 should really set the FPU type explicitly. */
22155 {"arm9e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22156 {"arm9e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22157 {"arm926ej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
22158 {"arm926ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM926EJ-S"},
22159 {"arm926ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
22160 {"arm946e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22161 {"arm946e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM946E-S"},
22162 {"arm946e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22163 {"arm966e-r0", ARM_ARCH_V5TExP
, FPU_ARCH_VFP_V2
, NULL
},
22164 {"arm966e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM966E-S"},
22165 {"arm966e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22166 {"arm968e-s", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22167 {"arm10t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22168 {"arm10tdmi", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22169 {"arm10e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22170 {"arm1020", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, "ARM1020E"},
22171 {"arm1020t", ARM_ARCH_V5T
, FPU_ARCH_VFP_V1
, NULL
},
22172 {"arm1020e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22173 {"arm1022e", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22174 {"arm1026ejs", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, "ARM1026EJ-S"},
22175 {"arm1026ej-s", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP_V2
, NULL
},
22176 {"fa626te", ARM_ARCH_V5TE
, FPU_NONE
, NULL
},
22177 {"fa726te", ARM_ARCH_V5TE
, FPU_ARCH_VFP_V2
, NULL
},
22178 {"arm1136js", ARM_ARCH_V6
, FPU_NONE
, "ARM1136J-S"},
22179 {"arm1136j-s", ARM_ARCH_V6
, FPU_NONE
, NULL
},
22180 {"arm1136jfs", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, "ARM1136JF-S"},
22181 {"arm1136jf-s", ARM_ARCH_V6
, FPU_ARCH_VFP_V2
, NULL
},
22182 {"mpcore", ARM_ARCH_V6K
, FPU_ARCH_VFP_V2
, NULL
},
22183 {"mpcorenovfp", ARM_ARCH_V6K
, FPU_NONE
, NULL
},
22184 {"arm1156t2-s", ARM_ARCH_V6T2
, FPU_NONE
, NULL
},
22185 {"arm1156t2f-s", ARM_ARCH_V6T2
, FPU_ARCH_VFP_V2
, NULL
},
22186 {"arm1176jz-s", ARM_ARCH_V6ZK
, FPU_NONE
, NULL
},
22187 {"arm1176jzf-s", ARM_ARCH_V6ZK
, FPU_ARCH_VFP_V2
, NULL
},
22188 {"cortex-a5", ARM_ARCH_V7A
, FPU_NONE
, NULL
},
22189 {"cortex-a8", ARM_ARCH_V7A
, ARM_FEATURE (0, FPU_VFP_V3
22190 | FPU_NEON_EXT_V1
),
22192 {"cortex-a9", ARM_ARCH_V7A
, ARM_FEATURE (0, FPU_VFP_V3
22193 | FPU_NEON_EXT_V1
),
22195 {"cortex-r4", ARM_ARCH_V7R
, FPU_NONE
, NULL
},
22196 {"cortex-r4f", ARM_ARCH_V7R
, FPU_ARCH_VFP_V3D16
, NULL
},
22197 {"cortex-m4", ARM_ARCH_V7EM
, FPU_NONE
, NULL
},
22198 {"cortex-m3", ARM_ARCH_V7M
, FPU_NONE
, NULL
},
22199 {"cortex-m1", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
22200 {"cortex-m0", ARM_ARCH_V6M
, FPU_NONE
, NULL
},
22201 /* ??? XSCALE is really an architecture. */
22202 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
22203 /* ??? iwmmxt is not a processor. */
22204 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP_V2
, NULL
},
22205 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP_V2
, NULL
},
22206 {"i80200", ARM_ARCH_XSCALE
, FPU_ARCH_VFP_V2
, NULL
},
22208 {"ep9312", ARM_FEATURE (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
), FPU_ARCH_MAVERICK
, "ARM920T"},
22209 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
22212 struct arm_arch_option_table
22215 const arm_feature_set value
;
22216 const arm_feature_set default_fpu
;
22219 /* This list should, at a minimum, contain all the architecture names
22220 recognized by GCC. */
22221 static const struct arm_arch_option_table arm_archs
[] =
22223 {"all", ARM_ANY
, FPU_ARCH_FPA
},
22224 {"armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
},
22225 {"armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
},
22226 {"armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
22227 {"armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
},
22228 {"armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
},
22229 {"armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
},
22230 {"armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
},
22231 {"armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
},
22232 {"armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
},
22233 {"armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
},
22234 {"armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
},
22235 {"armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
},
22236 {"armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
},
22237 {"armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
},
22238 {"armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
},
22239 {"armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
},
22240 {"armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
},
22241 {"armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
},
22242 {"armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
},
22243 {"armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
},
22244 {"armv6zk", ARM_ARCH_V6ZK
, FPU_ARCH_VFP
},
22245 {"armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
},
22246 {"armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
},
22247 {"armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
},
22248 {"armv6zkt2", ARM_ARCH_V6ZKT2
, FPU_ARCH_VFP
},
22249 {"armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
},
22250 {"armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
},
22251 /* The official spelling of the ARMv7 profile variants is the dashed form.
22252 Accept the non-dashed form for compatibility with old toolchains. */
22253 {"armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
22254 {"armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
22255 {"armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
22256 {"armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
},
22257 {"armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
},
22258 {"armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
},
22259 {"armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
},
22260 {"xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
},
22261 {"iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
},
22262 {"iwmmxt2", ARM_ARCH_IWMMXT2
,FPU_ARCH_VFP
},
22263 {NULL
, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
22266 /* ISA extensions in the co-processor space. */
22267 struct arm_option_cpu_value_table
22270 const arm_feature_set value
;
22273 static const struct arm_option_cpu_value_table arm_extensions
[] =
22275 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK
)},
22276 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE
)},
22277 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT
)},
22278 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2
)},
22279 {NULL
, ARM_ARCH_NONE
}
22282 /* This list should, at a minimum, contain all the fpu names
22283 recognized by GCC. */
22284 static const struct arm_option_cpu_value_table arm_fpus
[] =
22286 {"softfpa", FPU_NONE
},
22287 {"fpe", FPU_ARCH_FPE
},
22288 {"fpe2", FPU_ARCH_FPE
},
22289 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
22290 {"fpa", FPU_ARCH_FPA
},
22291 {"fpa10", FPU_ARCH_FPA
},
22292 {"fpa11", FPU_ARCH_FPA
},
22293 {"arm7500fe", FPU_ARCH_FPA
},
22294 {"softvfp", FPU_ARCH_VFP
},
22295 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
22296 {"vfp", FPU_ARCH_VFP_V2
},
22297 {"vfp9", FPU_ARCH_VFP_V2
},
22298 {"vfp3", FPU_ARCH_VFP_V3
}, /* For backwards compatbility. */
22299 {"vfp10", FPU_ARCH_VFP_V2
},
22300 {"vfp10-r0", FPU_ARCH_VFP_V1
},
22301 {"vfpxd", FPU_ARCH_VFP_V1xD
},
22302 {"vfpv2", FPU_ARCH_VFP_V2
},
22303 {"vfpv3", FPU_ARCH_VFP_V3
},
22304 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
22305 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
22306 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
22307 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
22308 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
22309 {"arm1020t", FPU_ARCH_VFP_V1
},
22310 {"arm1020e", FPU_ARCH_VFP_V2
},
22311 {"arm1136jfs", FPU_ARCH_VFP_V2
},
22312 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
22313 {"maverick", FPU_ARCH_MAVERICK
},
22314 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
22315 {"neon-fp16", FPU_ARCH_NEON_FP16
},
22316 {"vfpv4", FPU_ARCH_VFP_V4
},
22317 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
22318 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
22319 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
22320 {NULL
, ARM_ARCH_NONE
}
22323 struct arm_option_value_table
22329 static const struct arm_option_value_table arm_float_abis
[] =
22331 {"hard", ARM_FLOAT_ABI_HARD
},
22332 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
22333 {"soft", ARM_FLOAT_ABI_SOFT
},
22338 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
22339 static const struct arm_option_value_table arm_eabis
[] =
22341 {"gnu", EF_ARM_EABI_UNKNOWN
},
22342 {"4", EF_ARM_EABI_VER4
},
22343 {"5", EF_ARM_EABI_VER5
},
22348 struct arm_long_option_table
22350 char * option
; /* Substring to match. */
22351 char * help
; /* Help information. */
22352 int (* func
) (char * subopt
); /* Function to decode sub-option. */
22353 char * deprecated
; /* If non-null, print this message. */
22357 arm_parse_extension (char * str
, const arm_feature_set
**opt_p
)
22359 arm_feature_set
*ext_set
= (arm_feature_set
*)
22360 xmalloc (sizeof (arm_feature_set
));
22362 /* Copy the feature set, so that we can modify it. */
22363 *ext_set
= **opt_p
;
22366 while (str
!= NULL
&& *str
!= 0)
22368 const struct arm_option_cpu_value_table
* opt
;
22374 as_bad (_("invalid architectural extension"));
22379 ext
= strchr (str
, '+');
22382 optlen
= ext
- str
;
22384 optlen
= strlen (str
);
22388 as_bad (_("missing architectural extension"));
22392 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
22393 if (strncmp (opt
->name
, str
, optlen
) == 0)
22395 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->value
);
22399 if (opt
->name
== NULL
)
22401 as_bad (_("unknown architectural extension `%s'"), str
);
22412 arm_parse_cpu (char * str
)
22414 const struct arm_cpu_option_table
* opt
;
22415 char * ext
= strchr (str
, '+');
22419 optlen
= ext
- str
;
22421 optlen
= strlen (str
);
22425 as_bad (_("missing cpu name `%s'"), str
);
22429 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
22430 if (strncmp (opt
->name
, str
, optlen
) == 0)
22432 mcpu_cpu_opt
= &opt
->value
;
22433 mcpu_fpu_opt
= &opt
->default_fpu
;
22434 if (opt
->canonical_name
)
22435 strcpy (selected_cpu_name
, opt
->canonical_name
);
22440 for (i
= 0; i
< optlen
; i
++)
22441 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
22442 selected_cpu_name
[i
] = 0;
22446 return arm_parse_extension (ext
, &mcpu_cpu_opt
);
22451 as_bad (_("unknown cpu `%s'"), str
);
22456 arm_parse_arch (char * str
)
22458 const struct arm_arch_option_table
*opt
;
22459 char *ext
= strchr (str
, '+');
22463 optlen
= ext
- str
;
22465 optlen
= strlen (str
);
22469 as_bad (_("missing architecture name `%s'"), str
);
22473 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
22474 if (streq (opt
->name
, str
))
22476 march_cpu_opt
= &opt
->value
;
22477 march_fpu_opt
= &opt
->default_fpu
;
22478 strcpy (selected_cpu_name
, opt
->name
);
22481 return arm_parse_extension (ext
, &march_cpu_opt
);
22486 as_bad (_("unknown architecture `%s'\n"), str
);
22491 arm_parse_fpu (char * str
)
22493 const struct arm_option_cpu_value_table
* opt
;
22495 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
22496 if (streq (opt
->name
, str
))
22498 mfpu_opt
= &opt
->value
;
22502 as_bad (_("unknown floating point format `%s'\n"), str
);
22507 arm_parse_float_abi (char * str
)
22509 const struct arm_option_value_table
* opt
;
22511 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
22512 if (streq (opt
->name
, str
))
22514 mfloat_abi_opt
= opt
->value
;
22518 as_bad (_("unknown floating point abi `%s'\n"), str
);
22524 arm_parse_eabi (char * str
)
22526 const struct arm_option_value_table
*opt
;
22528 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
22529 if (streq (opt
->name
, str
))
22531 meabi_flags
= opt
->value
;
22534 as_bad (_("unknown EABI `%s'\n"), str
);
22540 arm_parse_it_mode (char * str
)
22542 bfd_boolean ret
= TRUE
;
22544 if (streq ("arm", str
))
22545 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
22546 else if (streq ("thumb", str
))
22547 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
22548 else if (streq ("always", str
))
22549 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
22550 else if (streq ("never", str
))
22551 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
22554 as_bad (_("unknown implicit IT mode `%s', should be "\
22555 "arm, thumb, always, or never."), str
);
22562 struct arm_long_option_table arm_long_opts
[] =
22564 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
22565 arm_parse_cpu
, NULL
},
22566 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
22567 arm_parse_arch
, NULL
},
22568 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
22569 arm_parse_fpu
, NULL
},
22570 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
22571 arm_parse_float_abi
, NULL
},
22573 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
22574 arm_parse_eabi
, NULL
},
22576 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
22577 arm_parse_it_mode
, NULL
},
22578 {NULL
, NULL
, 0, NULL
}
22582 md_parse_option (int c
, char * arg
)
22584 struct arm_option_table
*opt
;
22585 const struct arm_legacy_option_table
*fopt
;
22586 struct arm_long_option_table
*lopt
;
22592 target_big_endian
= 1;
22598 target_big_endian
= 0;
22602 case OPTION_FIX_V4BX
:
22607 /* Listing option. Just ignore these, we don't support additional
22612 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22614 if (c
== opt
->option
[0]
22615 && ((arg
== NULL
&& opt
->option
[1] == 0)
22616 || streq (arg
, opt
->option
+ 1)))
22618 /* If the option is deprecated, tell the user. */
22619 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
22620 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22621 arg
? arg
: "", _(opt
->deprecated
));
22623 if (opt
->var
!= NULL
)
22624 *opt
->var
= opt
->value
;
22630 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
22632 if (c
== fopt
->option
[0]
22633 && ((arg
== NULL
&& fopt
->option
[1] == 0)
22634 || streq (arg
, fopt
->option
+ 1)))
22636 /* If the option is deprecated, tell the user. */
22637 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
22638 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
22639 arg
? arg
: "", _(fopt
->deprecated
));
22641 if (fopt
->var
!= NULL
)
22642 *fopt
->var
= &fopt
->value
;
22648 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22650 /* These options are expected to have an argument. */
22651 if (c
== lopt
->option
[0]
22653 && strncmp (arg
, lopt
->option
+ 1,
22654 strlen (lopt
->option
+ 1)) == 0)
22656 /* If the option is deprecated, tell the user. */
22657 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
22658 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
22659 _(lopt
->deprecated
));
22661 /* Call the sup-option parser. */
22662 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
22673 md_show_usage (FILE * fp
)
22675 struct arm_option_table
*opt
;
22676 struct arm_long_option_table
*lopt
;
22678 fprintf (fp
, _(" ARM-specific assembler options:\n"));
22680 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
22681 if (opt
->help
!= NULL
)
22682 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
22684 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
22685 if (lopt
->help
!= NULL
)
22686 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
22690 -EB assemble code for a big-endian cpu\n"));
22695 -EL assemble code for a little-endian cpu\n"));
22699 --fix-v4bx Allow BX in ARMv4 code\n"));
22707 arm_feature_set flags
;
22708 } cpu_arch_ver_table
;
22710 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
22711 least features first. */
22712 static const cpu_arch_ver_table cpu_arch_ver
[] =
22718 {4, ARM_ARCH_V5TE
},
22719 {5, ARM_ARCH_V5TEJ
},
22723 {11, ARM_ARCH_V6M
},
22724 {8, ARM_ARCH_V6T2
},
22725 {10, ARM_ARCH_V7A
},
22726 {10, ARM_ARCH_V7R
},
22727 {10, ARM_ARCH_V7M
},
22731 /* Set an attribute if it has not already been set by the user. */
22733 aeabi_set_attribute_int (int tag
, int value
)
22736 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
22737 || !attributes_set_explicitly
[tag
])
22738 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
22742 aeabi_set_attribute_string (int tag
, const char *value
)
22745 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
22746 || !attributes_set_explicitly
[tag
])
22747 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
22750 /* Set the public EABI object attributes. */
22752 aeabi_set_public_attributes (void)
22755 arm_feature_set flags
;
22756 arm_feature_set tmp
;
22757 const cpu_arch_ver_table
*p
;
22759 /* Choose the architecture based on the capabilities of the requested cpu
22760 (if any) and/or the instructions actually used. */
22761 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
22762 ARM_MERGE_FEATURE_SETS (flags
, flags
, *mfpu_opt
);
22763 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_cpu
);
22764 /*Allow the user to override the reported architecture. */
22767 ARM_CLEAR_FEATURE (flags
, flags
, arm_arch_any
);
22768 ARM_MERGE_FEATURE_SETS (flags
, flags
, *object_arch
);
22773 for (p
= cpu_arch_ver
; p
->val
; p
++)
22775 if (ARM_CPU_HAS_FEATURE (tmp
, p
->flags
))
22778 ARM_CLEAR_FEATURE (tmp
, tmp
, p
->flags
);
22782 /* The table lookup above finds the last architecture to contribute
22783 a new feature. Unfortunately, Tag13 is a subset of the union of
22784 v6T2 and v7-M, so it is never seen as contributing a new feature.
22785 We can not search for the last entry which is entirely used,
22786 because if no CPU is specified we build up only those flags
22787 actually used. Perhaps we should separate out the specified
22788 and implicit cases. Avoid taking this path for -march=all by
22789 checking for contradictory v7-A / v7-M features. */
22791 && !ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
)
22792 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7m
)
22793 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v6_dsp
))
22796 /* Tag_CPU_name. */
22797 if (selected_cpu_name
[0])
22801 q
= selected_cpu_name
;
22802 if (strncmp (q
, "armv", 4) == 0)
22807 for (i
= 0; q
[i
]; i
++)
22808 q
[i
] = TOUPPER (q
[i
]);
22810 aeabi_set_attribute_string (Tag_CPU_name
, q
);
22813 /* Tag_CPU_arch. */
22814 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
22816 /* Tag_CPU_arch_profile. */
22817 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7a
))
22818 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'A');
22819 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v7r
))
22820 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'R');
22821 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_m
))
22822 aeabi_set_attribute_int (Tag_CPU_arch_profile
, 'M');
22824 /* Tag_ARM_ISA_use. */
22825 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
22827 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
22829 /* Tag_THUMB_ISA_use. */
22830 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
22832 aeabi_set_attribute_int (Tag_THUMB_ISA_use
,
22833 ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
) ? 2 : 1);
22835 /* Tag_VFP_arch. */
22836 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
22837 aeabi_set_attribute_int (Tag_VFP_arch
,
22838 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
22840 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
22841 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
22842 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
22843 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
22844 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
22845 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
22846 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
22847 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
22848 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
22850 /* Tag_WMMX_arch. */
22851 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
22852 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
22853 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
22854 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
22856 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
22857 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
22858 aeabi_set_attribute_int
22859 (Tag_Advanced_SIMD_arch
, (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
)
22862 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
22863 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
))
22864 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
22867 /* Add the default contents for the .ARM.attributes section. */
22871 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
22874 aeabi_set_public_attributes ();
22876 #endif /* OBJ_ELF */
22879 /* Parse a .cpu directive. */
22882 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
22884 const struct arm_cpu_option_table
*opt
;
22888 name
= input_line_pointer
;
22889 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22890 input_line_pointer
++;
22891 saved_char
= *input_line_pointer
;
22892 *input_line_pointer
= 0;
22894 /* Skip the first "all" entry. */
22895 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
22896 if (streq (opt
->name
, name
))
22898 mcpu_cpu_opt
= &opt
->value
;
22899 selected_cpu
= opt
->value
;
22900 if (opt
->canonical_name
)
22901 strcpy (selected_cpu_name
, opt
->canonical_name
);
22905 for (i
= 0; opt
->name
[i
]; i
++)
22906 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
22907 selected_cpu_name
[i
] = 0;
22909 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
22910 *input_line_pointer
= saved_char
;
22911 demand_empty_rest_of_line ();
22914 as_bad (_("unknown cpu `%s'"), name
);
22915 *input_line_pointer
= saved_char
;
22916 ignore_rest_of_line ();
22920 /* Parse a .arch directive. */
22923 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
22925 const struct arm_arch_option_table
*opt
;
22929 name
= input_line_pointer
;
22930 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22931 input_line_pointer
++;
22932 saved_char
= *input_line_pointer
;
22933 *input_line_pointer
= 0;
22935 /* Skip the first "all" entry. */
22936 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
22937 if (streq (opt
->name
, name
))
22939 mcpu_cpu_opt
= &opt
->value
;
22940 selected_cpu
= opt
->value
;
22941 strcpy (selected_cpu_name
, opt
->name
);
22942 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
22943 *input_line_pointer
= saved_char
;
22944 demand_empty_rest_of_line ();
22948 as_bad (_("unknown architecture `%s'\n"), name
);
22949 *input_line_pointer
= saved_char
;
22950 ignore_rest_of_line ();
22954 /* Parse a .object_arch directive. */
22957 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
22959 const struct arm_arch_option_table
*opt
;
22963 name
= input_line_pointer
;
22964 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22965 input_line_pointer
++;
22966 saved_char
= *input_line_pointer
;
22967 *input_line_pointer
= 0;
22969 /* Skip the first "all" entry. */
22970 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
22971 if (streq (opt
->name
, name
))
22973 object_arch
= &opt
->value
;
22974 *input_line_pointer
= saved_char
;
22975 demand_empty_rest_of_line ();
22979 as_bad (_("unknown architecture `%s'\n"), name
);
22980 *input_line_pointer
= saved_char
;
22981 ignore_rest_of_line ();
22984 /* Parse a .fpu directive. */
22987 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
22989 const struct arm_option_cpu_value_table
*opt
;
22993 name
= input_line_pointer
;
22994 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
22995 input_line_pointer
++;
22996 saved_char
= *input_line_pointer
;
22997 *input_line_pointer
= 0;
22999 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
23000 if (streq (opt
->name
, name
))
23002 mfpu_opt
= &opt
->value
;
23003 ARM_MERGE_FEATURE_SETS (cpu_variant
, *mcpu_cpu_opt
, *mfpu_opt
);
23004 *input_line_pointer
= saved_char
;
23005 demand_empty_rest_of_line ();
23009 as_bad (_("unknown floating point format `%s'\n"), name
);
23010 *input_line_pointer
= saved_char
;
23011 ignore_rest_of_line ();
23014 /* Copy symbol information. */
23017 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
23019 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
23023 /* Given a symbolic attribute NAME, return the proper integer value.
23024 Returns -1 if the attribute is not known. */
23027 arm_convert_symbolic_attribute (const char *name
)
23029 static const struct
23034 attribute_table
[] =
23036 /* When you modify this table you should
23037 also modify the list in doc/c-arm.texi. */
23038 #define T(tag) {#tag, tag}
23039 T (Tag_CPU_raw_name
),
23042 T (Tag_CPU_arch_profile
),
23043 T (Tag_ARM_ISA_use
),
23044 T (Tag_THUMB_ISA_use
),
23047 T (Tag_Advanced_SIMD_arch
),
23048 T (Tag_PCS_config
),
23049 T (Tag_ABI_PCS_R9_use
),
23050 T (Tag_ABI_PCS_RW_data
),
23051 T (Tag_ABI_PCS_RO_data
),
23052 T (Tag_ABI_PCS_GOT_use
),
23053 T (Tag_ABI_PCS_wchar_t
),
23054 T (Tag_ABI_FP_rounding
),
23055 T (Tag_ABI_FP_denormal
),
23056 T (Tag_ABI_FP_exceptions
),
23057 T (Tag_ABI_FP_user_exceptions
),
23058 T (Tag_ABI_FP_number_model
),
23059 T (Tag_ABI_align8_needed
),
23060 T (Tag_ABI_align8_preserved
),
23061 T (Tag_ABI_enum_size
),
23062 T (Tag_ABI_HardFP_use
),
23063 T (Tag_ABI_VFP_args
),
23064 T (Tag_ABI_WMMX_args
),
23065 T (Tag_ABI_optimization_goals
),
23066 T (Tag_ABI_FP_optimization_goals
),
23067 T (Tag_compatibility
),
23068 T (Tag_CPU_unaligned_access
),
23069 T (Tag_VFP_HP_extension
),
23070 T (Tag_ABI_FP_16bit_format
),
23071 T (Tag_MPextension_use
),
23073 T (Tag_nodefaults
),
23074 T (Tag_also_compatible_with
),
23075 T (Tag_conformance
),
23077 T (Tag_Virtualization_use
),
23078 /* We deliberately do not include Tag_MPextension_use_legacy. */
23086 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
23087 if (streq (name
, attribute_table
[i
].name
))
23088 return attribute_table
[i
].tag
;
23094 /* Apply sym value for relocations only in the case that
23095 they are for local symbols and you have the respective
23096 architectural feature for blx and simple switches. */
23098 arm_apply_sym_value (struct fix
* fixP
)
23101 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
23102 && !S_IS_EXTERNAL (fixP
->fx_addsy
))
23104 switch (fixP
->fx_r_type
)
23106 case BFD_RELOC_ARM_PCREL_BLX
:
23107 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23108 if (ARM_IS_FUNC (fixP
->fx_addsy
))
23112 case BFD_RELOC_ARM_PCREL_CALL
:
23113 case BFD_RELOC_THUMB_PCREL_BLX
:
23114 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
23125 #endif /* OBJ_ELF */