1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
305 static const arm_feature_set mve_ext
=
306 ARM_FEATURE_COPROC (FPU_MVE
);
307 static const arm_feature_set mve_fp_ext
=
308 ARM_FEATURE_COPROC (FPU_MVE_FP
);
310 static const arm_feature_set fpu_vfp_fp16
=
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
312 static const arm_feature_set fpu_neon_ext_fma
=
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
315 static const arm_feature_set fpu_vfp_ext_fma
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
317 static const arm_feature_set fpu_vfp_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
319 static const arm_feature_set fpu_vfp_ext_armv8xd
=
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
321 static const arm_feature_set fpu_neon_ext_armv8
=
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
323 static const arm_feature_set fpu_crypto_ext_armv8
=
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
325 static const arm_feature_set crc_ext_armv8
=
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
327 static const arm_feature_set fpu_neon_ext_v8_1
=
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
329 static const arm_feature_set fpu_neon_ext_dotprod
=
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
332 static int mfloat_abi_opt
= -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
335 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
338 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
342 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu
= FPU_NONE
;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name
[20];
350 extern FLONUM_TYPE generic_floating_point_number
;
352 /* Return if no cpu was selected on command-line. */
354 no_cpu_selected (void)
356 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
361 static int meabi_flags
= EABI_DEFAULT
;
363 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
366 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
371 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS
* GOT_symbol
;
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
384 static int thumb_mode
= 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
393 IMPLICIT_IT_MODE_NEVER
= 0x00,
394 IMPLICIT_IT_MODE_ARM
= 0x01,
395 IMPLICIT_IT_MODE_THUMB
= 0x02,
396 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
398 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
412 Important differences from the old Thumb mode:
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
423 static bfd_boolean unified_syntax
= FALSE
;
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars
[] = "#[]{}";
444 enum neon_el_type type
;
448 #define NEON_MAX_TYPE_ELS 4
452 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
456 enum pred_instruction_type
462 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN
, /* The IT insn has been parsed. */
467 VPT_INSN
, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN
, /* Instruction to indicate a MVE instruction without
469 a predication code. */
470 MVE_UNPREDICABLE_INSN
/* MVE instruction that is non-predicable. */
473 /* The maximum number of operands we need. */
474 #define ARM_IT_MAX_OPERANDS 6
475 #define ARM_IT_MAX_RELOCS 3
480 unsigned long instruction
;
484 /* "uncond_value" is set to the value in place of the conditional field in
485 unconditional versions of the instruction, or -1 if nothing is
488 struct neon_type vectype
;
489 /* This does not indicate an actual NEON instruction, only that
490 the mnemonic accepts neon-style type suffixes. */
492 /* Set to the opcode if the instruction needs relaxation.
493 Zero if the instruction is not relaxed. */
497 bfd_reloc_code_real_type type
;
500 } relocs
[ARM_IT_MAX_RELOCS
];
502 enum pred_instruction_type pred_insn_type
;
508 struct neon_type_el vectype
;
509 unsigned present
: 1; /* Operand present. */
510 unsigned isreg
: 1; /* Operand was a register. */
511 unsigned immisreg
: 2; /* .imm field is a second register.
512 0: imm, 1: gpr, 2: MVE Q-register. */
513 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
514 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
515 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
516 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
517 instructions. This allows us to disambiguate ARM <-> vector insns. */
518 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
519 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
520 unsigned isquad
: 1; /* Operand is SIMD quad register. */
521 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
522 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
523 unsigned writeback
: 1; /* Operand has trailing ! */
524 unsigned preind
: 1; /* Preindexed address. */
525 unsigned postind
: 1; /* Postindexed address. */
526 unsigned negative
: 1; /* Index register was negated. */
527 unsigned shifted
: 1; /* Shift applied to operation. */
528 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
529 } operands
[ARM_IT_MAX_OPERANDS
];
532 static struct arm_it inst
;
534 #define NUM_FLOAT_VALS 8
536 const char * fp_const
[] =
538 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
541 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
551 #define CP_T_X 0x00008000
552 #define CP_T_Y 0x00400000
554 #define CONDS_BIT 0x00100000
555 #define LOAD_BIT 0x00100000
557 #define DOUBLE_LOAD_FLAG 0x00000001
561 const char * template_name
;
565 #define COND_ALWAYS 0xE
569 const char * template_name
;
573 struct asm_barrier_opt
575 const char * template_name
;
577 const arm_feature_set arch
;
580 /* The bit that distinguishes CPSR and SPSR. */
581 #define SPSR_BIT (1 << 22)
583 /* The individual PSR flag bits. */
584 #define PSR_c (1 << 16)
585 #define PSR_x (1 << 17)
586 #define PSR_s (1 << 18)
587 #define PSR_f (1 << 19)
592 bfd_reloc_code_real_type reloc
;
597 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
598 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
603 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
606 /* Bits for DEFINED field in neon_typed_alias. */
607 #define NTA_HASTYPE 1
608 #define NTA_HASINDEX 2
610 struct neon_typed_alias
612 unsigned char defined
;
614 struct neon_type_el eltype
;
617 /* ARM register categories. This includes coprocessor numbers and various
618 architecture extensions' registers. Each entry should have an error message
619 in reg_expected_msgs below. */
648 /* Structure for a hash table entry for a register.
649 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
650 information which states whether a vector type or index is specified (for a
651 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
657 unsigned char builtin
;
658 struct neon_typed_alias
* neon
;
661 /* Diagnostics used when we don't get a register of the expected type. */
662 const char * const reg_expected_msgs
[] =
664 [REG_TYPE_RN
] = N_("ARM register expected"),
665 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
666 [REG_TYPE_CN
] = N_("co-processor register expected"),
667 [REG_TYPE_FN
] = N_("FPA register expected"),
668 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
669 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
670 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
671 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
672 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
673 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
674 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
676 [REG_TYPE_VFC
] = N_("VFP system register expected"),
677 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
678 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
679 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
680 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
681 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
682 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
683 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
684 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
685 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
686 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
687 [REG_TYPE_MQ
] = N_("MVE vector register expected"),
688 [REG_TYPE_RNB
] = N_("")
691 /* Some well known registers that we refer to directly elsewhere. */
697 /* ARM instructions take 4bytes in the object file, Thumb instructions
703 /* Basic string to match. */
704 const char * template_name
;
706 /* Parameters to instruction. */
707 unsigned int operands
[8];
709 /* Conditional tag - see opcode_lookup. */
710 unsigned int tag
: 4;
712 /* Basic instruction code. */
715 /* Thumb-format instruction code. */
718 /* Which architecture variant provides this instruction. */
719 const arm_feature_set
* avariant
;
720 const arm_feature_set
* tvariant
;
722 /* Function to call to encode instruction in ARM format. */
723 void (* aencode
) (void);
725 /* Function to call to encode instruction in Thumb format. */
726 void (* tencode
) (void);
728 /* Indicates whether this instruction may be vector predicated. */
729 unsigned int mayBeVecPred
: 1;
732 /* Defines for various bits that we will want to toggle. */
733 #define INST_IMMEDIATE 0x02000000
734 #define OFFSET_REG 0x02000000
735 #define HWOFFSET_IMM 0x00400000
736 #define SHIFT_BY_REG 0x00000010
737 #define PRE_INDEX 0x01000000
738 #define INDEX_UP 0x00800000
739 #define WRITE_BACK 0x00200000
740 #define LDM_TYPE_2_OR_3 0x00400000
741 #define CPSI_MMOD 0x00020000
743 #define LITERAL_MASK 0xf000f000
744 #define OPCODE_MASK 0xfe1fffff
745 #define V4_STR_BIT 0x00000020
746 #define VLDR_VMOV_SAME 0x0040f000
748 #define T2_SUBS_PC_LR 0xf3de8f00
750 #define DATA_OP_SHIFT 21
751 #define SBIT_SHIFT 20
753 #define T2_OPCODE_MASK 0xfe1fffff
754 #define T2_DATA_OP_SHIFT 21
755 #define T2_SBIT_SHIFT 20
757 #define A_COND_MASK 0xf0000000
758 #define A_PUSH_POP_OP_MASK 0x0fff0000
760 /* Opcodes for pushing/poping registers to/from the stack. */
761 #define A1_OPCODE_PUSH 0x092d0000
762 #define A2_OPCODE_PUSH 0x052d0004
763 #define A2_OPCODE_POP 0x049d0004
765 /* Codes to distinguish the arithmetic instructions. */
776 #define OPCODE_CMP 10
777 #define OPCODE_CMN 11
778 #define OPCODE_ORR 12
779 #define OPCODE_MOV 13
780 #define OPCODE_BIC 14
781 #define OPCODE_MVN 15
783 #define T2_OPCODE_AND 0
784 #define T2_OPCODE_BIC 1
785 #define T2_OPCODE_ORR 2
786 #define T2_OPCODE_ORN 3
787 #define T2_OPCODE_EOR 4
788 #define T2_OPCODE_ADD 8
789 #define T2_OPCODE_ADC 10
790 #define T2_OPCODE_SBC 11
791 #define T2_OPCODE_SUB 13
792 #define T2_OPCODE_RSB 14
794 #define T_OPCODE_MUL 0x4340
795 #define T_OPCODE_TST 0x4200
796 #define T_OPCODE_CMN 0x42c0
797 #define T_OPCODE_NEG 0x4240
798 #define T_OPCODE_MVN 0x43c0
800 #define T_OPCODE_ADD_R3 0x1800
801 #define T_OPCODE_SUB_R3 0x1a00
802 #define T_OPCODE_ADD_HI 0x4400
803 #define T_OPCODE_ADD_ST 0xb000
804 #define T_OPCODE_SUB_ST 0xb080
805 #define T_OPCODE_ADD_SP 0xa800
806 #define T_OPCODE_ADD_PC 0xa000
807 #define T_OPCODE_ADD_I8 0x3000
808 #define T_OPCODE_SUB_I8 0x3800
809 #define T_OPCODE_ADD_I3 0x1c00
810 #define T_OPCODE_SUB_I3 0x1e00
812 #define T_OPCODE_ASR_R 0x4100
813 #define T_OPCODE_LSL_R 0x4080
814 #define T_OPCODE_LSR_R 0x40c0
815 #define T_OPCODE_ROR_R 0x41c0
816 #define T_OPCODE_ASR_I 0x1000
817 #define T_OPCODE_LSL_I 0x0000
818 #define T_OPCODE_LSR_I 0x0800
820 #define T_OPCODE_MOV_I8 0x2000
821 #define T_OPCODE_CMP_I8 0x2800
822 #define T_OPCODE_CMP_LR 0x4280
823 #define T_OPCODE_MOV_HR 0x4600
824 #define T_OPCODE_CMP_HR 0x4500
826 #define T_OPCODE_LDR_PC 0x4800
827 #define T_OPCODE_LDR_SP 0x9800
828 #define T_OPCODE_STR_SP 0x9000
829 #define T_OPCODE_LDR_IW 0x6800
830 #define T_OPCODE_STR_IW 0x6000
831 #define T_OPCODE_LDR_IH 0x8800
832 #define T_OPCODE_STR_IH 0x8000
833 #define T_OPCODE_LDR_IB 0x7800
834 #define T_OPCODE_STR_IB 0x7000
835 #define T_OPCODE_LDR_RW 0x5800
836 #define T_OPCODE_STR_RW 0x5000
837 #define T_OPCODE_LDR_RH 0x5a00
838 #define T_OPCODE_STR_RH 0x5200
839 #define T_OPCODE_LDR_RB 0x5c00
840 #define T_OPCODE_STR_RB 0x5400
842 #define T_OPCODE_PUSH 0xb400
843 #define T_OPCODE_POP 0xbc00
845 #define T_OPCODE_BRANCH 0xe000
847 #define THUMB_SIZE 2 /* Size of thumb instruction. */
848 #define THUMB_PP_PC_LR 0x0100
849 #define THUMB_LOAD_BIT 0x0800
850 #define THUMB2_LOAD_BIT 0x00100000
852 #define BAD_SYNTAX _("syntax error")
853 #define BAD_ARGS _("bad arguments to instruction")
854 #define BAD_SP _("r13 not allowed here")
855 #define BAD_PC _("r15 not allowed here")
856 #define BAD_ODD _("Odd register not allowed here")
857 #define BAD_EVEN _("Even register not allowed here")
858 #define BAD_COND _("instruction cannot be conditional")
859 #define BAD_OVERLAP _("registers may not be the same")
860 #define BAD_HIREG _("lo register required")
861 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
862 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
863 #define BAD_BRANCH _("branch must be last instruction in IT block")
864 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
865 #define BAD_NOT_IT _("instruction not allowed in IT block")
866 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
867 #define BAD_FPU _("selected FPU does not support instruction")
868 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
869 #define BAD_OUT_VPT \
870 _("vector predicated instruction should be in VPT/VPST block")
871 #define BAD_IT_COND _("incorrect condition in IT block")
872 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
873 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
874 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
875 #define BAD_PC_ADDRESSING \
876 _("cannot use register index with PC-relative addressing")
877 #define BAD_PC_WRITEBACK \
878 _("cannot use writeback with PC-relative addressing")
879 #define BAD_RANGE _("branch out of range")
880 #define BAD_FP16 _("selected processor does not support fp16 instruction")
881 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
882 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
883 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
885 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
887 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
889 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
891 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
892 #define BAD_MVE_AUTO \
893 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
894 " use a valid -march or -mcpu option.")
895 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
896 "and source operands makes instruction UNPREDICTABLE")
897 #define BAD_EL_TYPE _("bad element type for instruction")
899 static struct hash_control
* arm_ops_hsh
;
900 static struct hash_control
* arm_cond_hsh
;
901 static struct hash_control
* arm_vcond_hsh
;
902 static struct hash_control
* arm_shift_hsh
;
903 static struct hash_control
* arm_psr_hsh
;
904 static struct hash_control
* arm_v7m_psr_hsh
;
905 static struct hash_control
* arm_reg_hsh
;
906 static struct hash_control
* arm_reloc_hsh
;
907 static struct hash_control
* arm_barrier_opt_hsh
;
909 /* Stuff needed to resolve the label ambiguity
918 symbolS
* last_label_seen
;
919 static int label_is_thumb_function_name
= FALSE
;
921 /* Literal pool structure. Held on a per-section
922 and per-sub-section basis. */
924 #define MAX_LITERAL_POOL_SIZE 1024
925 typedef struct literal_pool
927 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
928 unsigned int next_free_entry
;
934 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
936 struct literal_pool
* next
;
937 unsigned int alignment
;
940 /* Pointer to a linked list of literal pools. */
941 literal_pool
* list_of_pools
= NULL
;
943 typedef enum asmfunc_states
946 WAITING_ASMFUNC_NAME
,
950 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
953 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
955 static struct current_pred now_pred
;
959 now_pred_compatible (int cond
)
961 return (cond
& ~1) == (now_pred
.cc
& ~1);
965 conditional_insn (void)
967 return inst
.cond
!= COND_ALWAYS
;
970 static int in_pred_block (void);
972 static int handle_pred_state (void);
974 static void force_automatic_it_block_close (void);
976 static void it_fsm_post_encode (void);
978 #define set_pred_insn_type(type) \
981 inst.pred_insn_type = type; \
982 if (handle_pred_state () == FAIL) \
987 #define set_pred_insn_type_nonvoid(type, failret) \
990 inst.pred_insn_type = type; \
991 if (handle_pred_state () == FAIL) \
996 #define set_pred_insn_type_last() \
999 if (inst.cond == COND_ALWAYS) \
1000 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1002 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1008 /* This array holds the chars that always start a comment. If the
1009 pre-processor is disabled, these aren't very useful. */
1010 char arm_comment_chars
[] = "@";
1012 /* This array holds the chars that only start a comment at the beginning of
1013 a line. If the line seems to have the form '# 123 filename'
1014 .line and .file directives will appear in the pre-processed output. */
1015 /* Note that input_file.c hand checks for '#' at the beginning of the
1016 first line of the input file. This is because the compiler outputs
1017 #NO_APP at the beginning of its output. */
1018 /* Also note that comments like this one will always work. */
1019 const char line_comment_chars
[] = "#";
1021 char arm_line_separator_chars
[] = ";";
1023 /* Chars that can be used to separate mant
1024 from exp in floating point numbers. */
1025 const char EXP_CHARS
[] = "eE";
1027 /* Chars that mean this number is a floating point constant. */
1028 /* As in 0f12.456 */
1029 /* or 0d1.2345e12 */
1031 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
1033 /* Prefix characters that indicate the start of an immediate
1035 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1037 /* Separator character handling. */
1039 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1042 skip_past_char (char ** str
, char c
)
1044 /* PR gas/14987: Allow for whitespace before the expected character. */
1045 skip_whitespace (*str
);
1056 #define skip_past_comma(str) skip_past_char (str, ',')
1058 /* Arithmetic expressions (possibly involving symbols). */
1060 /* Return TRUE if anything in the expression is a bignum. */
1063 walk_no_bignums (symbolS
* sp
)
1065 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1068 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1070 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1071 || (symbol_get_value_expression (sp
)->X_op_symbol
1072 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1078 static bfd_boolean in_my_get_expression
= FALSE
;
1080 /* Third argument to my_get_expression. */
1081 #define GE_NO_PREFIX 0
1082 #define GE_IMM_PREFIX 1
1083 #define GE_OPT_PREFIX 2
1084 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1085 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1086 #define GE_OPT_PREFIX_BIG 3
1089 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1093 /* In unified syntax, all prefixes are optional. */
1095 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1098 switch (prefix_mode
)
1100 case GE_NO_PREFIX
: break;
1102 if (!is_immediate_prefix (**str
))
1104 inst
.error
= _("immediate expression requires a # prefix");
1110 case GE_OPT_PREFIX_BIG
:
1111 if (is_immediate_prefix (**str
))
1118 memset (ep
, 0, sizeof (expressionS
));
1120 save_in
= input_line_pointer
;
1121 input_line_pointer
= *str
;
1122 in_my_get_expression
= TRUE
;
1124 in_my_get_expression
= FALSE
;
1126 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1128 /* We found a bad or missing expression in md_operand(). */
1129 *str
= input_line_pointer
;
1130 input_line_pointer
= save_in
;
1131 if (inst
.error
== NULL
)
1132 inst
.error
= (ep
->X_op
== O_absent
1133 ? _("missing expression") :_("bad expression"));
1137 /* Get rid of any bignums now, so that we don't generate an error for which
1138 we can't establish a line number later on. Big numbers are never valid
1139 in instructions, which is where this routine is always called. */
1140 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1141 && (ep
->X_op
== O_big
1142 || (ep
->X_add_symbol
1143 && (walk_no_bignums (ep
->X_add_symbol
)
1145 && walk_no_bignums (ep
->X_op_symbol
))))))
1147 inst
.error
= _("invalid constant");
1148 *str
= input_line_pointer
;
1149 input_line_pointer
= save_in
;
1153 *str
= input_line_pointer
;
1154 input_line_pointer
= save_in
;
1158 /* Turn a string in input_line_pointer into a floating point constant
1159 of type TYPE, and store the appropriate bytes in *LITP. The number
1160 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1161 returned, or NULL on OK.
1163 Note that fp constants aren't represent in the normal way on the ARM.
1164 In big endian mode, things are as expected. However, in little endian
1165 mode fp constants are big-endian word-wise, and little-endian byte-wise
1166 within the words. For example, (double) 1.1 in big endian mode is
1167 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1168 the byte sequence 99 99 f1 3f 9a 99 99 99.
1170 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1173 md_atof (int type
, char * litP
, int * sizeP
)
1176 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1208 return _("Unrecognized or unsupported floating point constant");
1211 t
= atof_ieee (input_line_pointer
, type
, words
);
1213 input_line_pointer
= t
;
1214 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1216 if (target_big_endian
)
1218 for (i
= 0; i
< prec
; i
++)
1220 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1221 litP
+= sizeof (LITTLENUM_TYPE
);
1226 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1227 for (i
= prec
- 1; i
>= 0; i
--)
1229 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1230 litP
+= sizeof (LITTLENUM_TYPE
);
1233 /* For a 4 byte float the order of elements in `words' is 1 0.
1234 For an 8 byte float the order is 1 0 3 2. */
1235 for (i
= 0; i
< prec
; i
+= 2)
1237 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1238 sizeof (LITTLENUM_TYPE
));
1239 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1240 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1241 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1248 /* We handle all bad expressions here, so that we can report the faulty
1249 instruction in the error message. */
1252 md_operand (expressionS
* exp
)
1254 if (in_my_get_expression
)
1255 exp
->X_op
= O_illegal
;
1258 /* Immediate values. */
1261 /* Generic immediate-value read function for use in directives.
1262 Accepts anything that 'expression' can fold to a constant.
1263 *val receives the number. */
1266 immediate_for_directive (int *val
)
1269 exp
.X_op
= O_illegal
;
1271 if (is_immediate_prefix (*input_line_pointer
))
1273 input_line_pointer
++;
1277 if (exp
.X_op
!= O_constant
)
1279 as_bad (_("expected #constant"));
1280 ignore_rest_of_line ();
1283 *val
= exp
.X_add_number
;
1288 /* Register parsing. */
1290 /* Generic register parser. CCP points to what should be the
1291 beginning of a register name. If it is indeed a valid register
1292 name, advance CCP over it and return the reg_entry structure;
1293 otherwise return NULL. Does not issue diagnostics. */
1295 static struct reg_entry
*
1296 arm_reg_parse_multi (char **ccp
)
1300 struct reg_entry
*reg
;
1302 skip_whitespace (start
);
1304 #ifdef REGISTER_PREFIX
1305 if (*start
!= REGISTER_PREFIX
)
1309 #ifdef OPTIONAL_REGISTER_PREFIX
1310 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1315 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1320 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1322 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1332 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1333 enum arm_reg_type type
)
1335 /* Alternative syntaxes are accepted for a few register classes. */
1342 /* Generic coprocessor register names are allowed for these. */
1343 if (reg
&& reg
->type
== REG_TYPE_CN
)
1348 /* For backward compatibility, a bare number is valid here. */
1350 unsigned long processor
= strtoul (start
, ccp
, 10);
1351 if (*ccp
!= start
&& processor
<= 15)
1356 case REG_TYPE_MMXWC
:
1357 /* WC includes WCG. ??? I'm not sure this is true for all
1358 instructions that take WC registers. */
1359 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1370 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1371 return value is the register number or FAIL. */
1374 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1377 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1380 /* Do not allow a scalar (reg+index) to parse as a register. */
1381 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1384 if (reg
&& reg
->type
== type
)
1387 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1394 /* Parse a Neon type specifier. *STR should point at the leading '.'
1395 character. Does no verification at this stage that the type fits the opcode
1402 Can all be legally parsed by this function.
1404 Fills in neon_type struct pointer with parsed information, and updates STR
1405 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1406 type, FAIL if not. */
1409 parse_neon_type (struct neon_type
*type
, char **str
)
1416 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1418 enum neon_el_type thistype
= NT_untyped
;
1419 unsigned thissize
= -1u;
1426 /* Just a size without an explicit type. */
1430 switch (TOLOWER (*ptr
))
1432 case 'i': thistype
= NT_integer
; break;
1433 case 'f': thistype
= NT_float
; break;
1434 case 'p': thistype
= NT_poly
; break;
1435 case 's': thistype
= NT_signed
; break;
1436 case 'u': thistype
= NT_unsigned
; break;
1438 thistype
= NT_float
;
1443 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1449 /* .f is an abbreviation for .f32. */
1450 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1455 thissize
= strtoul (ptr
, &ptr
, 10);
1457 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1460 as_bad (_("bad size %d in type specifier"), thissize
);
1468 type
->el
[type
->elems
].type
= thistype
;
1469 type
->el
[type
->elems
].size
= thissize
;
1474 /* Empty/missing type is not a successful parse. */
1475 if (type
->elems
== 0)
1483 /* Errors may be set multiple times during parsing or bit encoding
1484 (particularly in the Neon bits), but usually the earliest error which is set
1485 will be the most meaningful. Avoid overwriting it with later (cascading)
1486 errors by calling this function. */
1489 first_error (const char *err
)
1495 /* Parse a single type, e.g. ".s32", leading period included. */
1497 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1500 struct neon_type optype
;
1504 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1506 if (optype
.elems
== 1)
1507 *vectype
= optype
.el
[0];
1510 first_error (_("only one type should be specified for operand"));
1516 first_error (_("vector type expected"));
1528 /* Special meanings for indices (which have a range of 0-7), which will fit into
1531 #define NEON_ALL_LANES 15
1532 #define NEON_INTERLEAVE_LANES 14
1534 /* Record a use of the given feature. */
1536 record_feature_use (const arm_feature_set
*feature
)
1539 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
1541 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
1544 /* If the given feature available in the selected CPU, mark it as used.
1545 Returns TRUE iff feature is available. */
1547 mark_feature_used (const arm_feature_set
*feature
)
1550 /* Do not support the use of MVE only instructions when in auto-detection or
1552 if (((feature
== &mve_ext
) || (feature
== &mve_fp_ext
))
1553 && ARM_CPU_IS_ANY (cpu_variant
))
1555 first_error (BAD_MVE_AUTO
);
1558 /* Ensure the option is valid on the current architecture. */
1559 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
1562 /* Add the appropriate architecture feature for the barrier option used.
1564 record_feature_use (feature
);
1569 /* Parse either a register or a scalar, with an optional type. Return the
1570 register number, and optionally fill in the actual type of the register
1571 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1572 type/index information in *TYPEINFO. */
1575 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1576 enum arm_reg_type
*rtype
,
1577 struct neon_typed_alias
*typeinfo
)
1580 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1581 struct neon_typed_alias atype
;
1582 struct neon_type_el parsetype
;
1586 atype
.eltype
.type
= NT_invtype
;
1587 atype
.eltype
.size
= -1;
1589 /* Try alternate syntax for some types of register. Note these are mutually
1590 exclusive with the Neon syntax extensions. */
1593 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1601 /* Undo polymorphism when a set of register types may be accepted. */
1602 if ((type
== REG_TYPE_NDQ
1603 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1604 || (type
== REG_TYPE_VFSD
1605 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1606 || (type
== REG_TYPE_NSDQ
1607 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1608 || reg
->type
== REG_TYPE_NQ
))
1609 || (type
== REG_TYPE_NSD
1610 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1611 || (type
== REG_TYPE_MMXWC
1612 && (reg
->type
== REG_TYPE_MMXWCG
)))
1613 type
= (enum arm_reg_type
) reg
->type
;
1615 if (type
== REG_TYPE_MQ
)
1617 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
1620 if (!reg
|| reg
->type
!= REG_TYPE_NQ
)
1623 if (reg
->number
> 14 && !mark_feature_used (&fpu_vfp_ext_d32
))
1625 first_error (_("expected MVE register [q0..q7]"));
1630 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
1631 && (type
== REG_TYPE_NQ
))
1635 if (type
!= reg
->type
)
1641 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1643 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1645 first_error (_("can't redefine type for operand"));
1648 atype
.defined
|= NTA_HASTYPE
;
1649 atype
.eltype
= parsetype
;
1652 if (skip_past_char (&str
, '[') == SUCCESS
)
1654 if (type
!= REG_TYPE_VFD
1655 && !(type
== REG_TYPE_VFS
1656 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1658 first_error (_("only D registers may be indexed"));
1662 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1664 first_error (_("can't change index for operand"));
1668 atype
.defined
|= NTA_HASINDEX
;
1670 if (skip_past_char (&str
, ']') == SUCCESS
)
1671 atype
.index
= NEON_ALL_LANES
;
1676 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1678 if (exp
.X_op
!= O_constant
)
1680 first_error (_("constant expression required"));
1684 if (skip_past_char (&str
, ']') == FAIL
)
1687 atype
.index
= exp
.X_add_number
;
1702 /* Like arm_reg_parse, but also allow the following extra features:
1703 - If RTYPE is non-zero, return the (possibly restricted) type of the
1704 register (e.g. Neon double or quad reg when either has been requested).
1705 - If this is a Neon vector type with additional type information, fill
1706 in the struct pointed to by VECTYPE (if non-NULL).
1707 This function will fault on encountering a scalar. */
1710 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1711 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1713 struct neon_typed_alias atype
;
1715 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1720 /* Do not allow regname(... to parse as a register. */
1724 /* Do not allow a scalar (reg+index) to parse as a register. */
1725 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1727 first_error (_("register operand expected, but got scalar"));
1732 *vectype
= atype
.eltype
;
1739 #define NEON_SCALAR_REG(X) ((X) >> 4)
1740 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1742 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1743 have enough information to be able to do a good job bounds-checking. So, we
1744 just do easy checks here, and do further checks later. */
1747 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1751 struct neon_typed_alias atype
;
1752 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1755 reg_type
= REG_TYPE_VFS
;
1757 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1759 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1762 if (atype
.index
== NEON_ALL_LANES
)
1764 first_error (_("scalar must have an index"));
1767 else if (atype
.index
>= 64 / elsize
)
1769 first_error (_("scalar index out of range"));
1774 *type
= atype
.eltype
;
1778 return reg
* 16 + atype
.index
;
1781 /* Types of registers in a list. */
1794 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1797 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1803 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1805 /* We come back here if we get ranges concatenated by '+' or '|'. */
1808 skip_whitespace (str
);
1821 const char apsr_str
[] = "apsr";
1822 int apsr_str_len
= strlen (apsr_str
);
1824 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1825 if (etype
== REGLIST_CLRM
)
1827 if (reg
== REG_SP
|| reg
== REG_PC
)
1829 else if (reg
== FAIL
1830 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1831 && !ISALPHA (*(str
+ apsr_str_len
)))
1834 str
+= apsr_str_len
;
1839 first_error (_("r0-r12, lr or APSR expected"));
1843 else /* etype == REGLIST_RN. */
1847 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1858 first_error (_("bad range in register list"));
1862 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1864 if (range
& (1 << i
))
1866 (_("Warning: duplicated register (r%d) in register list"),
1874 if (range
& (1 << reg
))
1875 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1877 else if (reg
<= cur_reg
)
1878 as_tsktsk (_("Warning: register range not in ascending order"));
1883 while (skip_past_comma (&str
) != FAIL
1884 || (in_range
= 1, *str
++ == '-'));
1887 if (skip_past_char (&str
, '}') == FAIL
)
1889 first_error (_("missing `}'"));
1893 else if (etype
== REGLIST_RN
)
1897 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1900 if (exp
.X_op
== O_constant
)
1902 if (exp
.X_add_number
1903 != (exp
.X_add_number
& 0x0000ffff))
1905 inst
.error
= _("invalid register mask");
1909 if ((range
& exp
.X_add_number
) != 0)
1911 int regno
= range
& exp
.X_add_number
;
1914 regno
= (1 << regno
) - 1;
1916 (_("Warning: duplicated register (r%d) in register list"),
1920 range
|= exp
.X_add_number
;
1924 if (inst
.relocs
[0].type
!= 0)
1926 inst
.error
= _("expression too complex");
1930 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1931 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1932 inst
.relocs
[0].pc_rel
= 0;
1936 if (*str
== '|' || *str
== '+')
1942 while (another_range
);
1948 /* Parse a VFP register list. If the string is invalid return FAIL.
1949 Otherwise return the number of registers, and set PBASE to the first
1950 register. Parses registers of type ETYPE.
1951 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1952 - Q registers can be used to specify pairs of D registers
1953 - { } can be omitted from around a singleton register list
1954 FIXME: This is not implemented, as it would require backtracking in
1957 This could be done (the meaning isn't really ambiguous), but doesn't
1958 fit in well with the current parsing framework.
1959 - 32 D registers may be used (also true for VFPv3).
1960 FIXME: Types are ignored in these register lists, which is probably a
1964 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
1965 bfd_boolean
*partial_match
)
1970 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1974 unsigned long mask
= 0;
1976 bfd_boolean vpr_seen
= FALSE
;
1977 bfd_boolean expect_vpr
=
1978 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
1980 if (skip_past_char (&str
, '{') == FAIL
)
1982 inst
.error
= _("expecting {");
1989 case REGLIST_VFP_S_VPR
:
1990 regtype
= REG_TYPE_VFS
;
1995 case REGLIST_VFP_D_VPR
:
1996 regtype
= REG_TYPE_VFD
;
1999 case REGLIST_NEON_D
:
2000 regtype
= REG_TYPE_NDQ
;
2007 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
2009 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2010 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
2014 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
2017 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
2024 base_reg
= max_regs
;
2025 *partial_match
= FALSE
;
2029 int setmask
= 1, addregs
= 1;
2030 const char vpr_str
[] = "vpr";
2031 int vpr_str_len
= strlen (vpr_str
);
2033 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
2037 if (new_base
== FAIL
2038 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
2039 && !ISALPHA (*(str
+ vpr_str_len
))
2045 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
2049 first_error (_("VPR expected last"));
2052 else if (new_base
== FAIL
)
2054 if (regtype
== REG_TYPE_VFS
)
2055 first_error (_("VFP single precision register or VPR "
2057 else /* regtype == REG_TYPE_VFD. */
2058 first_error (_("VFP/Neon double precision register or VPR "
2063 else if (new_base
== FAIL
)
2065 first_error (_(reg_expected_msgs
[regtype
]));
2069 *partial_match
= TRUE
;
2073 if (new_base
>= max_regs
)
2075 first_error (_("register out of range in list"));
2079 /* Note: a value of 2 * n is returned for the register Q<n>. */
2080 if (regtype
== REG_TYPE_NQ
)
2086 if (new_base
< base_reg
)
2087 base_reg
= new_base
;
2089 if (mask
& (setmask
<< new_base
))
2091 first_error (_("invalid register list"));
2095 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2097 as_tsktsk (_("register list not in ascending order"));
2101 mask
|= setmask
<< new_base
;
2104 if (*str
== '-') /* We have the start of a range expression */
2110 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2113 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2117 if (high_range
>= max_regs
)
2119 first_error (_("register out of range in list"));
2123 if (regtype
== REG_TYPE_NQ
)
2124 high_range
= high_range
+ 1;
2126 if (high_range
<= new_base
)
2128 inst
.error
= _("register range not in ascending order");
2132 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2134 if (mask
& (setmask
<< new_base
))
2136 inst
.error
= _("invalid register list");
2140 mask
|= setmask
<< new_base
;
2145 while (skip_past_comma (&str
) != FAIL
);
2149 /* Sanity check -- should have raised a parse error above. */
2150 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2155 if (expect_vpr
&& !vpr_seen
)
2157 first_error (_("VPR expected last"));
2161 /* Final test -- the registers must be consecutive. */
2163 for (i
= 0; i
< count
; i
++)
2165 if ((mask
& (1u << i
)) == 0)
2167 inst
.error
= _("non-contiguous register range");
2177 /* True if two alias types are the same. */
2180 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2188 if (a
->defined
!= b
->defined
)
2191 if ((a
->defined
& NTA_HASTYPE
) != 0
2192 && (a
->eltype
.type
!= b
->eltype
.type
2193 || a
->eltype
.size
!= b
->eltype
.size
))
2196 if ((a
->defined
& NTA_HASINDEX
) != 0
2197 && (a
->index
!= b
->index
))
2203 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2204 The base register is put in *PBASE.
2205 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2207 The register stride (minus one) is put in bit 4 of the return value.
2208 Bits [6:5] encode the list length (minus one).
2209 The type of the list elements is put in *ELTYPE, if non-NULL. */
2211 #define NEON_LANE(X) ((X) & 0xf)
2212 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2213 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2216 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2218 struct neon_type_el
*eltype
)
2225 int leading_brace
= 0;
2226 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2227 const char *const incr_error
= mve
? _("register stride must be 1") :
2228 _("register stride must be 1 or 2");
2229 const char *const type_error
= _("mismatched element/structure types in list");
2230 struct neon_typed_alias firsttype
;
2231 firsttype
.defined
= 0;
2232 firsttype
.eltype
.type
= NT_invtype
;
2233 firsttype
.eltype
.size
= -1;
2234 firsttype
.index
= -1;
2236 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2241 struct neon_typed_alias atype
;
2243 rtype
= REG_TYPE_MQ
;
2244 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2248 first_error (_(reg_expected_msgs
[rtype
]));
2255 if (rtype
== REG_TYPE_NQ
)
2261 else if (reg_incr
== -1)
2263 reg_incr
= getreg
- base_reg
;
2264 if (reg_incr
< 1 || reg_incr
> 2)
2266 first_error (_(incr_error
));
2270 else if (getreg
!= base_reg
+ reg_incr
* count
)
2272 first_error (_(incr_error
));
2276 if (! neon_alias_types_same (&atype
, &firsttype
))
2278 first_error (_(type_error
));
2282 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2286 struct neon_typed_alias htype
;
2287 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2289 lane
= NEON_INTERLEAVE_LANES
;
2290 else if (lane
!= NEON_INTERLEAVE_LANES
)
2292 first_error (_(type_error
));
2297 else if (reg_incr
!= 1)
2299 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2303 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2306 first_error (_(reg_expected_msgs
[rtype
]));
2309 if (! neon_alias_types_same (&htype
, &firsttype
))
2311 first_error (_(type_error
));
2314 count
+= hireg
+ dregs
- getreg
;
2318 /* If we're using Q registers, we can't use [] or [n] syntax. */
2319 if (rtype
== REG_TYPE_NQ
)
2325 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2329 else if (lane
!= atype
.index
)
2331 first_error (_(type_error
));
2335 else if (lane
== -1)
2336 lane
= NEON_INTERLEAVE_LANES
;
2337 else if (lane
!= NEON_INTERLEAVE_LANES
)
2339 first_error (_(type_error
));
2344 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2346 /* No lane set by [x]. We must be interleaving structures. */
2348 lane
= NEON_INTERLEAVE_LANES
;
2351 if (lane
== -1 || base_reg
== -1 || count
< 1 || (!mve
&& count
> 4)
2352 || (count
> 1 && reg_incr
== -1))
2354 first_error (_("error parsing element/structure list"));
2358 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2360 first_error (_("expected }"));
2368 *eltype
= firsttype
.eltype
;
2373 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2376 /* Parse an explicit relocation suffix on an expression. This is
2377 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2378 arm_reloc_hsh contains no entries, so this function can only
2379 succeed if there is no () after the word. Returns -1 on error,
2380 BFD_RELOC_UNUSED if there wasn't any suffix. */
2383 parse_reloc (char **str
)
2385 struct reloc_entry
*r
;
2389 return BFD_RELOC_UNUSED
;
2394 while (*q
&& *q
!= ')' && *q
!= ',')
2399 if ((r
= (struct reloc_entry
*)
2400 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2407 /* Directives: register aliases. */
2409 static struct reg_entry
*
2410 insert_reg_alias (char *str
, unsigned number
, int type
)
2412 struct reg_entry
*new_reg
;
2415 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2417 if (new_reg
->builtin
)
2418 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2420 /* Only warn about a redefinition if it's not defined as the
2422 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2423 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2428 name
= xstrdup (str
);
2429 new_reg
= XNEW (struct reg_entry
);
2431 new_reg
->name
= name
;
2432 new_reg
->number
= number
;
2433 new_reg
->type
= type
;
2434 new_reg
->builtin
= FALSE
;
2435 new_reg
->neon
= NULL
;
2437 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2444 insert_neon_reg_alias (char *str
, int number
, int type
,
2445 struct neon_typed_alias
*atype
)
2447 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2451 first_error (_("attempt to redefine typed alias"));
2457 reg
->neon
= XNEW (struct neon_typed_alias
);
2458 *reg
->neon
= *atype
;
2462 /* Look for the .req directive. This is of the form:
2464 new_register_name .req existing_register_name
2466 If we find one, or if it looks sufficiently like one that we want to
2467 handle any error here, return TRUE. Otherwise return FALSE. */
2470 create_register_alias (char * newname
, char *p
)
2472 struct reg_entry
*old
;
2473 char *oldname
, *nbuf
;
2476 /* The input scrubber ensures that whitespace after the mnemonic is
2477 collapsed to single spaces. */
2479 if (strncmp (oldname
, " .req ", 6) != 0)
2483 if (*oldname
== '\0')
2486 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2489 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2493 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2494 the desired alias name, and p points to its end. If not, then
2495 the desired alias name is in the global original_case_string. */
2496 #ifdef TC_CASE_SENSITIVE
2499 newname
= original_case_string
;
2500 nlen
= strlen (newname
);
2503 nbuf
= xmemdup0 (newname
, nlen
);
2505 /* Create aliases under the new name as stated; an all-lowercase
2506 version of the new name; and an all-uppercase version of the new
2508 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2510 for (p
= nbuf
; *p
; p
++)
2513 if (strncmp (nbuf
, newname
, nlen
))
2515 /* If this attempt to create an additional alias fails, do not bother
2516 trying to create the all-lower case alias. We will fail and issue
2517 a second, duplicate error message. This situation arises when the
2518 programmer does something like:
2521 The second .req creates the "Foo" alias but then fails to create
2522 the artificial FOO alias because it has already been created by the
2524 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2531 for (p
= nbuf
; *p
; p
++)
2534 if (strncmp (nbuf
, newname
, nlen
))
2535 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2542 /* Create a Neon typed/indexed register alias using directives, e.g.:
2547 These typed registers can be used instead of the types specified after the
2548 Neon mnemonic, so long as all operands given have types. Types can also be
2549 specified directly, e.g.:
2550 vadd d0.s32, d1.s32, d2.s32 */
2553 create_neon_reg_alias (char *newname
, char *p
)
2555 enum arm_reg_type basetype
;
2556 struct reg_entry
*basereg
;
2557 struct reg_entry mybasereg
;
2558 struct neon_type ntype
;
2559 struct neon_typed_alias typeinfo
;
2560 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2563 typeinfo
.defined
= 0;
2564 typeinfo
.eltype
.type
= NT_invtype
;
2565 typeinfo
.eltype
.size
= -1;
2566 typeinfo
.index
= -1;
2570 if (strncmp (p
, " .dn ", 5) == 0)
2571 basetype
= REG_TYPE_VFD
;
2572 else if (strncmp (p
, " .qn ", 5) == 0)
2573 basetype
= REG_TYPE_NQ
;
2582 basereg
= arm_reg_parse_multi (&p
);
2584 if (basereg
&& basereg
->type
!= basetype
)
2586 as_bad (_("bad type for register"));
2590 if (basereg
== NULL
)
2593 /* Try parsing as an integer. */
2594 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2595 if (exp
.X_op
!= O_constant
)
2597 as_bad (_("expression must be constant"));
2600 basereg
= &mybasereg
;
2601 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2607 typeinfo
= *basereg
->neon
;
2609 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2611 /* We got a type. */
2612 if (typeinfo
.defined
& NTA_HASTYPE
)
2614 as_bad (_("can't redefine the type of a register alias"));
2618 typeinfo
.defined
|= NTA_HASTYPE
;
2619 if (ntype
.elems
!= 1)
2621 as_bad (_("you must specify a single type only"));
2624 typeinfo
.eltype
= ntype
.el
[0];
2627 if (skip_past_char (&p
, '[') == SUCCESS
)
2630 /* We got a scalar index. */
2632 if (typeinfo
.defined
& NTA_HASINDEX
)
2634 as_bad (_("can't redefine the index of a scalar alias"));
2638 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2640 if (exp
.X_op
!= O_constant
)
2642 as_bad (_("scalar index must be constant"));
2646 typeinfo
.defined
|= NTA_HASINDEX
;
2647 typeinfo
.index
= exp
.X_add_number
;
2649 if (skip_past_char (&p
, ']') == FAIL
)
2651 as_bad (_("expecting ]"));
2656 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2657 the desired alias name, and p points to its end. If not, then
2658 the desired alias name is in the global original_case_string. */
2659 #ifdef TC_CASE_SENSITIVE
2660 namelen
= nameend
- newname
;
2662 newname
= original_case_string
;
2663 namelen
= strlen (newname
);
2666 namebuf
= xmemdup0 (newname
, namelen
);
2668 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2669 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2671 /* Insert name in all uppercase. */
2672 for (p
= namebuf
; *p
; p
++)
2675 if (strncmp (namebuf
, newname
, namelen
))
2676 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2677 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2679 /* Insert name in all lowercase. */
2680 for (p
= namebuf
; *p
; p
++)
2683 if (strncmp (namebuf
, newname
, namelen
))
2684 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2685 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2691 /* Should never be called, as .req goes between the alias and the
2692 register name, not at the beginning of the line. */
2695 s_req (int a ATTRIBUTE_UNUSED
)
2697 as_bad (_("invalid syntax for .req directive"));
2701 s_dn (int a ATTRIBUTE_UNUSED
)
2703 as_bad (_("invalid syntax for .dn directive"));
2707 s_qn (int a ATTRIBUTE_UNUSED
)
2709 as_bad (_("invalid syntax for .qn directive"));
2712 /* The .unreq directive deletes an alias which was previously defined
2713 by .req. For example:
2719 s_unreq (int a ATTRIBUTE_UNUSED
)
2724 name
= input_line_pointer
;
2726 while (*input_line_pointer
!= 0
2727 && *input_line_pointer
!= ' '
2728 && *input_line_pointer
!= '\n')
2729 ++input_line_pointer
;
2731 saved_char
= *input_line_pointer
;
2732 *input_line_pointer
= 0;
2735 as_bad (_("invalid syntax for .unreq directive"));
2738 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2742 as_bad (_("unknown register alias '%s'"), name
);
2743 else if (reg
->builtin
)
2744 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2751 hash_delete (arm_reg_hsh
, name
, FALSE
);
2752 free ((char *) reg
->name
);
2757 /* Also locate the all upper case and all lower case versions.
2758 Do not complain if we cannot find one or the other as it
2759 was probably deleted above. */
2761 nbuf
= strdup (name
);
2762 for (p
= nbuf
; *p
; p
++)
2764 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2767 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2768 free ((char *) reg
->name
);
2774 for (p
= nbuf
; *p
; p
++)
2776 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2779 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2780 free ((char *) reg
->name
);
2790 *input_line_pointer
= saved_char
;
2791 demand_empty_rest_of_line ();
2794 /* Directives: Instruction set selection. */
2797 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2798 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2799 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2800 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2802 /* Create a new mapping symbol for the transition to STATE. */
2805 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2808 const char * symname
;
2815 type
= BSF_NO_FLAGS
;
2819 type
= BSF_NO_FLAGS
;
2823 type
= BSF_NO_FLAGS
;
2829 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2830 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2835 THUMB_SET_FUNC (symbolP
, 0);
2836 ARM_SET_THUMB (symbolP
, 0);
2837 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2841 THUMB_SET_FUNC (symbolP
, 1);
2842 ARM_SET_THUMB (symbolP
, 1);
2843 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2851 /* Save the mapping symbols for future reference. Also check that
2852 we do not place two mapping symbols at the same offset within a
2853 frag. We'll handle overlap between frags in
2854 check_mapping_symbols.
2856 If .fill or other data filling directive generates zero sized data,
2857 the mapping symbol for the following code will have the same value
2858 as the one generated for the data filling directive. In this case,
2859 we replace the old symbol with the new one at the same address. */
2862 if (frag
->tc_frag_data
.first_map
!= NULL
)
2864 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2865 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2867 frag
->tc_frag_data
.first_map
= symbolP
;
2869 if (frag
->tc_frag_data
.last_map
!= NULL
)
2871 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2872 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2873 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2875 frag
->tc_frag_data
.last_map
= symbolP
;
2878 /* We must sometimes convert a region marked as code to data during
2879 code alignment, if an odd number of bytes have to be padded. The
2880 code mapping symbol is pushed to an aligned address. */
2883 insert_data_mapping_symbol (enum mstate state
,
2884 valueT value
, fragS
*frag
, offsetT bytes
)
2886 /* If there was already a mapping symbol, remove it. */
2887 if (frag
->tc_frag_data
.last_map
!= NULL
2888 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2890 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2894 know (frag
->tc_frag_data
.first_map
== symp
);
2895 frag
->tc_frag_data
.first_map
= NULL
;
2897 frag
->tc_frag_data
.last_map
= NULL
;
2898 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2901 make_mapping_symbol (MAP_DATA
, value
, frag
);
2902 make_mapping_symbol (state
, value
+ bytes
, frag
);
2905 static void mapping_state_2 (enum mstate state
, int max_chars
);
2907 /* Set the mapping state to STATE. Only call this when about to
2908 emit some STATE bytes to the file. */
2910 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2912 mapping_state (enum mstate state
)
2914 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2916 if (mapstate
== state
)
2917 /* The mapping symbol has already been emitted.
2918 There is nothing else to do. */
2921 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2923 All ARM instructions require 4-byte alignment.
2924 (Almost) all Thumb instructions require 2-byte alignment.
2926 When emitting instructions into any section, mark the section
2929 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2930 but themselves require 2-byte alignment; this applies to some
2931 PC- relative forms. However, these cases will involve implicit
2932 literal pool generation or an explicit .align >=2, both of
2933 which will cause the section to me marked with sufficient
2934 alignment. Thus, we don't handle those cases here. */
2935 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2937 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2938 /* This case will be evaluated later. */
2941 mapping_state_2 (state
, 0);
2944 /* Same as mapping_state, but MAX_CHARS bytes have already been
2945 allocated. Put the mapping symbol that far back. */
2948 mapping_state_2 (enum mstate state
, int max_chars
)
2950 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2952 if (!SEG_NORMAL (now_seg
))
2955 if (mapstate
== state
)
2956 /* The mapping symbol has already been emitted.
2957 There is nothing else to do. */
2960 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2961 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2963 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2964 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2967 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2970 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2971 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2975 #define mapping_state(x) ((void)0)
2976 #define mapping_state_2(x, y) ((void)0)
2979 /* Find the real, Thumb encoded start of a Thumb function. */
2983 find_real_start (symbolS
* symbolP
)
2986 const char * name
= S_GET_NAME (symbolP
);
2987 symbolS
* new_target
;
2989 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2990 #define STUB_NAME ".real_start_of"
2995 /* The compiler may generate BL instructions to local labels because
2996 it needs to perform a branch to a far away location. These labels
2997 do not have a corresponding ".real_start_of" label. We check
2998 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2999 the ".real_start_of" convention for nonlocal branches. */
3000 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
3003 real_start
= concat (STUB_NAME
, name
, NULL
);
3004 new_target
= symbol_find (real_start
);
3007 if (new_target
== NULL
)
3009 as_warn (_("Failed to find real start of function: %s\n"), name
);
3010 new_target
= symbolP
;
3018 opcode_select (int width
)
3025 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
3026 as_bad (_("selected processor does not support THUMB opcodes"));
3029 /* No need to force the alignment, since we will have been
3030 coming from ARM mode, which is word-aligned. */
3031 record_alignment (now_seg
, 1);
3038 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
3039 as_bad (_("selected processor does not support ARM opcodes"));
3044 frag_align (2, 0, 0);
3046 record_alignment (now_seg
, 1);
3051 as_bad (_("invalid instruction size selected (%d)"), width
);
3056 s_arm (int ignore ATTRIBUTE_UNUSED
)
3059 demand_empty_rest_of_line ();
3063 s_thumb (int ignore ATTRIBUTE_UNUSED
)
3066 demand_empty_rest_of_line ();
3070 s_code (int unused ATTRIBUTE_UNUSED
)
3074 temp
= get_absolute_expression ();
3079 opcode_select (temp
);
3083 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
3088 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
3090 /* If we are not already in thumb mode go into it, EVEN if
3091 the target processor does not support thumb instructions.
3092 This is used by gcc/config/arm/lib1funcs.asm for example
3093 to compile interworking support functions even if the
3094 target processor should not support interworking. */
3098 record_alignment (now_seg
, 1);
3101 demand_empty_rest_of_line ();
3105 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3109 /* The following label is the name/address of the start of a Thumb function.
3110 We need to know this for the interworking support. */
3111 label_is_thumb_function_name
= TRUE
;
3114 /* Perform a .set directive, but also mark the alias as
3115 being a thumb function. */
3118 s_thumb_set (int equiv
)
3120 /* XXX the following is a duplicate of the code for s_set() in read.c
3121 We cannot just call that code as we need to get at the symbol that
3128 /* Especial apologies for the random logic:
3129 This just grew, and could be parsed much more simply!
3131 delim
= get_symbol_name (& name
);
3132 end_name
= input_line_pointer
;
3133 (void) restore_line_pointer (delim
);
3135 if (*input_line_pointer
!= ',')
3138 as_bad (_("expected comma after name \"%s\""), name
);
3140 ignore_rest_of_line ();
3144 input_line_pointer
++;
3147 if (name
[0] == '.' && name
[1] == '\0')
3149 /* XXX - this should not happen to .thumb_set. */
3153 if ((symbolP
= symbol_find (name
)) == NULL
3154 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3157 /* When doing symbol listings, play games with dummy fragments living
3158 outside the normal fragment chain to record the file and line info
3160 if (listing
& LISTING_SYMBOLS
)
3162 extern struct list_info_struct
* listing_tail
;
3163 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3165 memset (dummy_frag
, 0, sizeof (fragS
));
3166 dummy_frag
->fr_type
= rs_fill
;
3167 dummy_frag
->line
= listing_tail
;
3168 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3169 dummy_frag
->fr_symbol
= symbolP
;
3173 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3176 /* "set" symbols are local unless otherwise specified. */
3177 SF_SET_LOCAL (symbolP
);
3178 #endif /* OBJ_COFF */
3179 } /* Make a new symbol. */
3181 symbol_table_insert (symbolP
);
3186 && S_IS_DEFINED (symbolP
)
3187 && S_GET_SEGMENT (symbolP
) != reg_section
)
3188 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3190 pseudo_set (symbolP
);
3192 demand_empty_rest_of_line ();
3194 /* XXX Now we come to the Thumb specific bit of code. */
3196 THUMB_SET_FUNC (symbolP
, 1);
3197 ARM_SET_THUMB (symbolP
, 1);
3198 #if defined OBJ_ELF || defined OBJ_COFF
3199 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3203 /* Directives: Mode selection. */
3205 /* .syntax [unified|divided] - choose the new unified syntax
3206 (same for Arm and Thumb encoding, modulo slight differences in what
3207 can be represented) or the old divergent syntax for each mode. */
3209 s_syntax (int unused ATTRIBUTE_UNUSED
)
3213 delim
= get_symbol_name (& name
);
3215 if (!strcasecmp (name
, "unified"))
3216 unified_syntax
= TRUE
;
3217 else if (!strcasecmp (name
, "divided"))
3218 unified_syntax
= FALSE
;
3221 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3224 (void) restore_line_pointer (delim
);
3225 demand_empty_rest_of_line ();
3228 /* Directives: sectioning and alignment. */
3231 s_bss (int ignore ATTRIBUTE_UNUSED
)
3233 /* We don't support putting frags in the BSS segment, we fake it by
3234 marking in_bss, then looking at s_skip for clues. */
3235 subseg_set (bss_section
, 0);
3236 demand_empty_rest_of_line ();
3238 #ifdef md_elf_section_change_hook
3239 md_elf_section_change_hook ();
3244 s_even (int ignore ATTRIBUTE_UNUSED
)
3246 /* Never make frag if expect extra pass. */
3248 frag_align (1, 0, 0);
3250 record_alignment (now_seg
, 1);
3252 demand_empty_rest_of_line ();
3255 /* Directives: CodeComposer Studio. */
3257 /* .ref (for CodeComposer Studio syntax only). */
3259 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3261 if (codecomposer_syntax
)
3262 ignore_rest_of_line ();
3264 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3267 /* If name is not NULL, then it is used for marking the beginning of a
3268 function, whereas if it is NULL then it means the function end. */
3270 asmfunc_debug (const char * name
)
3272 static const char * last_name
= NULL
;
3276 gas_assert (last_name
== NULL
);
3279 if (debug_type
== DEBUG_STABS
)
3280 stabs_generate_asm_func (name
, name
);
3284 gas_assert (last_name
!= NULL
);
3286 if (debug_type
== DEBUG_STABS
)
3287 stabs_generate_asm_endfunc (last_name
, last_name
);
3294 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3296 if (codecomposer_syntax
)
3298 switch (asmfunc_state
)
3300 case OUTSIDE_ASMFUNC
:
3301 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3304 case WAITING_ASMFUNC_NAME
:
3305 as_bad (_(".asmfunc repeated."));
3308 case WAITING_ENDASMFUNC
:
3309 as_bad (_(".asmfunc without function."));
3312 demand_empty_rest_of_line ();
3315 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3319 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3321 if (codecomposer_syntax
)
3323 switch (asmfunc_state
)
3325 case OUTSIDE_ASMFUNC
:
3326 as_bad (_(".endasmfunc without a .asmfunc."));
3329 case WAITING_ASMFUNC_NAME
:
3330 as_bad (_(".endasmfunc without function."));
3333 case WAITING_ENDASMFUNC
:
3334 asmfunc_state
= OUTSIDE_ASMFUNC
;
3335 asmfunc_debug (NULL
);
3338 demand_empty_rest_of_line ();
3341 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3345 s_ccs_def (int name
)
3347 if (codecomposer_syntax
)
3350 as_bad (_(".def pseudo-op only available with -mccs flag."));
3353 /* Directives: Literal pools. */
3355 static literal_pool
*
3356 find_literal_pool (void)
3358 literal_pool
* pool
;
3360 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3362 if (pool
->section
== now_seg
3363 && pool
->sub_section
== now_subseg
)
3370 static literal_pool
*
3371 find_or_make_literal_pool (void)
3373 /* Next literal pool ID number. */
3374 static unsigned int latest_pool_num
= 1;
3375 literal_pool
* pool
;
3377 pool
= find_literal_pool ();
3381 /* Create a new pool. */
3382 pool
= XNEW (literal_pool
);
3386 pool
->next_free_entry
= 0;
3387 pool
->section
= now_seg
;
3388 pool
->sub_section
= now_subseg
;
3389 pool
->next
= list_of_pools
;
3390 pool
->symbol
= NULL
;
3391 pool
->alignment
= 2;
3393 /* Add it to the list. */
3394 list_of_pools
= pool
;
3397 /* New pools, and emptied pools, will have a NULL symbol. */
3398 if (pool
->symbol
== NULL
)
3400 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3401 (valueT
) 0, &zero_address_frag
);
3402 pool
->id
= latest_pool_num
++;
3409 /* Add the literal in the global 'inst'
3410 structure to the relevant literal pool. */
3413 add_to_lit_pool (unsigned int nbytes
)
3415 #define PADDING_SLOT 0x1
3416 #define LIT_ENTRY_SIZE_MASK 0xFF
3417 literal_pool
* pool
;
3418 unsigned int entry
, pool_size
= 0;
3419 bfd_boolean padding_slot_p
= FALSE
;
3425 imm1
= inst
.operands
[1].imm
;
3426 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3427 : inst
.relocs
[0].exp
.X_unsigned
? 0
3428 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3429 if (target_big_endian
)
3432 imm2
= inst
.operands
[1].imm
;
3436 pool
= find_or_make_literal_pool ();
3438 /* Check if this literal value is already in the pool. */
3439 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3443 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3444 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3445 && (pool
->literals
[entry
].X_add_number
3446 == inst
.relocs
[0].exp
.X_add_number
)
3447 && (pool
->literals
[entry
].X_md
== nbytes
)
3448 && (pool
->literals
[entry
].X_unsigned
3449 == inst
.relocs
[0].exp
.X_unsigned
))
3452 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3453 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3454 && (pool
->literals
[entry
].X_add_number
3455 == inst
.relocs
[0].exp
.X_add_number
)
3456 && (pool
->literals
[entry
].X_add_symbol
3457 == inst
.relocs
[0].exp
.X_add_symbol
)
3458 && (pool
->literals
[entry
].X_op_symbol
3459 == inst
.relocs
[0].exp
.X_op_symbol
)
3460 && (pool
->literals
[entry
].X_md
== nbytes
))
3463 else if ((nbytes
== 8)
3464 && !(pool_size
& 0x7)
3465 && ((entry
+ 1) != pool
->next_free_entry
)
3466 && (pool
->literals
[entry
].X_op
== O_constant
)
3467 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3468 && (pool
->literals
[entry
].X_unsigned
3469 == inst
.relocs
[0].exp
.X_unsigned
)
3470 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3471 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3472 && (pool
->literals
[entry
+ 1].X_unsigned
3473 == inst
.relocs
[0].exp
.X_unsigned
))
3476 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3477 if (padding_slot_p
&& (nbytes
== 4))
3483 /* Do we need to create a new entry? */
3484 if (entry
== pool
->next_free_entry
)
3486 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3488 inst
.error
= _("literal pool overflow");
3494 /* For 8-byte entries, we align to an 8-byte boundary,
3495 and split it into two 4-byte entries, because on 32-bit
3496 host, 8-byte constants are treated as big num, thus
3497 saved in "generic_bignum" which will be overwritten
3498 by later assignments.
3500 We also need to make sure there is enough space for
3503 We also check to make sure the literal operand is a
3505 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3506 || inst
.relocs
[0].exp
.X_op
== O_big
))
3508 inst
.error
= _("invalid type for literal pool");
3511 else if (pool_size
& 0x7)
3513 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3515 inst
.error
= _("literal pool overflow");
3519 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3520 pool
->literals
[entry
].X_op
= O_constant
;
3521 pool
->literals
[entry
].X_add_number
= 0;
3522 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3523 pool
->next_free_entry
+= 1;
3526 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3528 inst
.error
= _("literal pool overflow");
3532 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3533 pool
->literals
[entry
].X_op
= O_constant
;
3534 pool
->literals
[entry
].X_add_number
= imm1
;
3535 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3536 pool
->literals
[entry
++].X_md
= 4;
3537 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3538 pool
->literals
[entry
].X_op
= O_constant
;
3539 pool
->literals
[entry
].X_add_number
= imm2
;
3540 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3541 pool
->literals
[entry
].X_md
= 4;
3542 pool
->alignment
= 3;
3543 pool
->next_free_entry
+= 1;
3547 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3548 pool
->literals
[entry
].X_md
= 4;
3552 /* PR ld/12974: Record the location of the first source line to reference
3553 this entry in the literal pool. If it turns out during linking that the
3554 symbol does not exist we will be able to give an accurate line number for
3555 the (first use of the) missing reference. */
3556 if (debug_type
== DEBUG_DWARF2
)
3557 dwarf2_where (pool
->locs
+ entry
);
3559 pool
->next_free_entry
+= 1;
3561 else if (padding_slot_p
)
3563 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3564 pool
->literals
[entry
].X_md
= nbytes
;
3567 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3568 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3569 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3575 tc_start_label_without_colon (void)
3577 bfd_boolean ret
= TRUE
;
3579 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3581 const char *label
= input_line_pointer
;
3583 while (!is_end_of_line
[(int) label
[-1]])
3588 as_bad (_("Invalid label '%s'"), label
);
3592 asmfunc_debug (label
);
3594 asmfunc_state
= WAITING_ENDASMFUNC
;
3600 /* Can't use symbol_new here, so have to create a symbol and then at
3601 a later date assign it a value. That's what these functions do. */
3604 symbol_locate (symbolS
* symbolP
,
3605 const char * name
, /* It is copied, the caller can modify. */
3606 segT segment
, /* Segment identifier (SEG_<something>). */
3607 valueT valu
, /* Symbol value. */
3608 fragS
* frag
) /* Associated fragment. */
3611 char * preserved_copy_of_name
;
3613 name_length
= strlen (name
) + 1; /* +1 for \0. */
3614 obstack_grow (¬es
, name
, name_length
);
3615 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3617 #ifdef tc_canonicalize_symbol_name
3618 preserved_copy_of_name
=
3619 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3622 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3624 S_SET_SEGMENT (symbolP
, segment
);
3625 S_SET_VALUE (symbolP
, valu
);
3626 symbol_clear_list_pointers (symbolP
);
3628 symbol_set_frag (symbolP
, frag
);
3630 /* Link to end of symbol chain. */
3632 extern int symbol_table_frozen
;
3634 if (symbol_table_frozen
)
3638 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3640 obj_symbol_new_hook (symbolP
);
3642 #ifdef tc_symbol_new_hook
3643 tc_symbol_new_hook (symbolP
);
3647 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3648 #endif /* DEBUG_SYMS */
3652 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3655 literal_pool
* pool
;
3658 pool
= find_literal_pool ();
3660 || pool
->symbol
== NULL
3661 || pool
->next_free_entry
== 0)
3664 /* Align pool as you have word accesses.
3665 Only make a frag if we have to. */
3667 frag_align (pool
->alignment
, 0, 0);
3669 record_alignment (now_seg
, 2);
3672 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3673 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3675 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3677 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3678 (valueT
) frag_now_fix (), frag_now
);
3679 symbol_table_insert (pool
->symbol
);
3681 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3683 #if defined OBJ_COFF || defined OBJ_ELF
3684 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3687 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3690 if (debug_type
== DEBUG_DWARF2
)
3691 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3693 /* First output the expression in the instruction to the pool. */
3694 emit_expr (&(pool
->literals
[entry
]),
3695 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3698 /* Mark the pool as empty. */
3699 pool
->next_free_entry
= 0;
3700 pool
->symbol
= NULL
;
3704 /* Forward declarations for functions below, in the MD interface
3706 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3707 static valueT
create_unwind_entry (int);
3708 static void start_unwind_section (const segT
, int);
3709 static void add_unwind_opcode (valueT
, int);
3710 static void flush_pending_unwind (void);
3712 /* Directives: Data. */
3715 s_arm_elf_cons (int nbytes
)
3719 #ifdef md_flush_pending_output
3720 md_flush_pending_output ();
3723 if (is_it_end_of_statement ())
3725 demand_empty_rest_of_line ();
3729 #ifdef md_cons_align
3730 md_cons_align (nbytes
);
3733 mapping_state (MAP_DATA
);
3737 char *base
= input_line_pointer
;
3741 if (exp
.X_op
!= O_symbol
)
3742 emit_expr (&exp
, (unsigned int) nbytes
);
3745 char *before_reloc
= input_line_pointer
;
3746 reloc
= parse_reloc (&input_line_pointer
);
3749 as_bad (_("unrecognized relocation suffix"));
3750 ignore_rest_of_line ();
3753 else if (reloc
== BFD_RELOC_UNUSED
)
3754 emit_expr (&exp
, (unsigned int) nbytes
);
3757 reloc_howto_type
*howto
= (reloc_howto_type
*)
3758 bfd_reloc_type_lookup (stdoutput
,
3759 (bfd_reloc_code_real_type
) reloc
);
3760 int size
= bfd_get_reloc_size (howto
);
3762 if (reloc
== BFD_RELOC_ARM_PLT32
)
3764 as_bad (_("(plt) is only valid on branch targets"));
3765 reloc
= BFD_RELOC_UNUSED
;
3770 as_bad (ngettext ("%s relocations do not fit in %d byte",
3771 "%s relocations do not fit in %d bytes",
3773 howto
->name
, nbytes
);
3776 /* We've parsed an expression stopping at O_symbol.
3777 But there may be more expression left now that we
3778 have parsed the relocation marker. Parse it again.
3779 XXX Surely there is a cleaner way to do this. */
3780 char *p
= input_line_pointer
;
3782 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3784 memcpy (save_buf
, base
, input_line_pointer
- base
);
3785 memmove (base
+ (input_line_pointer
- before_reloc
),
3786 base
, before_reloc
- base
);
3788 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3790 memcpy (base
, save_buf
, p
- base
);
3792 offset
= nbytes
- size
;
3793 p
= frag_more (nbytes
);
3794 memset (p
, 0, nbytes
);
3795 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3796 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3802 while (*input_line_pointer
++ == ',');
3804 /* Put terminator back into stream. */
3805 input_line_pointer
--;
3806 demand_empty_rest_of_line ();
3809 /* Emit an expression containing a 32-bit thumb instruction.
3810 Implementation based on put_thumb32_insn. */
3813 emit_thumb32_expr (expressionS
* exp
)
3815 expressionS exp_high
= *exp
;
3817 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3818 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3819 exp
->X_add_number
&= 0xffff;
3820 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3823 /* Guess the instruction size based on the opcode. */
3826 thumb_insn_size (int opcode
)
3828 if ((unsigned int) opcode
< 0xe800u
)
3830 else if ((unsigned int) opcode
>= 0xe8000000u
)
3837 emit_insn (expressionS
*exp
, int nbytes
)
3841 if (exp
->X_op
== O_constant
)
3846 size
= thumb_insn_size (exp
->X_add_number
);
3850 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3852 as_bad (_(".inst.n operand too big. "\
3853 "Use .inst.w instead"));
3858 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
3859 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN
, 0);
3861 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3863 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3864 emit_thumb32_expr (exp
);
3866 emit_expr (exp
, (unsigned int) size
);
3868 it_fsm_post_encode ();
3872 as_bad (_("cannot determine Thumb instruction size. " \
3873 "Use .inst.n/.inst.w instead"));
3876 as_bad (_("constant expression required"));
3881 /* Like s_arm_elf_cons but do not use md_cons_align and
3882 set the mapping state to MAP_ARM/MAP_THUMB. */
3885 s_arm_elf_inst (int nbytes
)
3887 if (is_it_end_of_statement ())
3889 demand_empty_rest_of_line ();
3893 /* Calling mapping_state () here will not change ARM/THUMB,
3894 but will ensure not to be in DATA state. */
3897 mapping_state (MAP_THUMB
);
3902 as_bad (_("width suffixes are invalid in ARM mode"));
3903 ignore_rest_of_line ();
3909 mapping_state (MAP_ARM
);
3918 if (! emit_insn (& exp
, nbytes
))
3920 ignore_rest_of_line ();
3924 while (*input_line_pointer
++ == ',');
3926 /* Put terminator back into stream. */
3927 input_line_pointer
--;
3928 demand_empty_rest_of_line ();
3931 /* Parse a .rel31 directive. */
3934 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3941 if (*input_line_pointer
== '1')
3942 highbit
= 0x80000000;
3943 else if (*input_line_pointer
!= '0')
3944 as_bad (_("expected 0 or 1"));
3946 input_line_pointer
++;
3947 if (*input_line_pointer
!= ',')
3948 as_bad (_("missing comma"));
3949 input_line_pointer
++;
3951 #ifdef md_flush_pending_output
3952 md_flush_pending_output ();
3955 #ifdef md_cons_align
3959 mapping_state (MAP_DATA
);
3964 md_number_to_chars (p
, highbit
, 4);
3965 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3966 BFD_RELOC_ARM_PREL31
);
3968 demand_empty_rest_of_line ();
3971 /* Directives: AEABI stack-unwind tables. */
3973 /* Parse an unwind_fnstart directive. Simply records the current location. */
3976 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3978 demand_empty_rest_of_line ();
3979 if (unwind
.proc_start
)
3981 as_bad (_("duplicate .fnstart directive"));
3985 /* Mark the start of the function. */
3986 unwind
.proc_start
= expr_build_dot ();
3988 /* Reset the rest of the unwind info. */
3989 unwind
.opcode_count
= 0;
3990 unwind
.table_entry
= NULL
;
3991 unwind
.personality_routine
= NULL
;
3992 unwind
.personality_index
= -1;
3993 unwind
.frame_size
= 0;
3994 unwind
.fp_offset
= 0;
3995 unwind
.fp_reg
= REG_SP
;
3997 unwind
.sp_restored
= 0;
4001 /* Parse a handlerdata directive. Creates the exception handling table entry
4002 for the function. */
4005 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
4007 demand_empty_rest_of_line ();
4008 if (!unwind
.proc_start
)
4009 as_bad (MISSING_FNSTART
);
4011 if (unwind
.table_entry
)
4012 as_bad (_("duplicate .handlerdata directive"));
4014 create_unwind_entry (1);
4017 /* Parse an unwind_fnend directive. Generates the index table entry. */
4020 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
4025 unsigned int marked_pr_dependency
;
4027 demand_empty_rest_of_line ();
4029 if (!unwind
.proc_start
)
4031 as_bad (_(".fnend directive without .fnstart"));
4035 /* Add eh table entry. */
4036 if (unwind
.table_entry
== NULL
)
4037 val
= create_unwind_entry (0);
4041 /* Add index table entry. This is two words. */
4042 start_unwind_section (unwind
.saved_seg
, 1);
4043 frag_align (2, 0, 0);
4044 record_alignment (now_seg
, 2);
4046 ptr
= frag_more (8);
4048 where
= frag_now_fix () - 8;
4050 /* Self relative offset of the function start. */
4051 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
4052 BFD_RELOC_ARM_PREL31
);
4054 /* Indicate dependency on EHABI-defined personality routines to the
4055 linker, if it hasn't been done already. */
4056 marked_pr_dependency
4057 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
4058 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
4059 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
4061 static const char *const name
[] =
4063 "__aeabi_unwind_cpp_pr0",
4064 "__aeabi_unwind_cpp_pr1",
4065 "__aeabi_unwind_cpp_pr2"
4067 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
4068 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
4069 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
4070 |= 1 << unwind
.personality_index
;
4074 /* Inline exception table entry. */
4075 md_number_to_chars (ptr
+ 4, val
, 4);
4077 /* Self relative offset of the table entry. */
4078 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
4079 BFD_RELOC_ARM_PREL31
);
4081 /* Restore the original section. */
4082 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
4084 unwind
.proc_start
= NULL
;
4088 /* Parse an unwind_cantunwind directive. */
4091 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
4093 demand_empty_rest_of_line ();
4094 if (!unwind
.proc_start
)
4095 as_bad (MISSING_FNSTART
);
4097 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4098 as_bad (_("personality routine specified for cantunwind frame"));
4100 unwind
.personality_index
= -2;
4104 /* Parse a personalityindex directive. */
4107 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4111 if (!unwind
.proc_start
)
4112 as_bad (MISSING_FNSTART
);
4114 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4115 as_bad (_("duplicate .personalityindex directive"));
4119 if (exp
.X_op
!= O_constant
4120 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4122 as_bad (_("bad personality routine number"));
4123 ignore_rest_of_line ();
4127 unwind
.personality_index
= exp
.X_add_number
;
4129 demand_empty_rest_of_line ();
4133 /* Parse a personality directive. */
4136 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4140 if (!unwind
.proc_start
)
4141 as_bad (MISSING_FNSTART
);
4143 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4144 as_bad (_("duplicate .personality directive"));
4146 c
= get_symbol_name (& name
);
4147 p
= input_line_pointer
;
4149 ++ input_line_pointer
;
4150 unwind
.personality_routine
= symbol_find_or_make (name
);
4152 demand_empty_rest_of_line ();
4156 /* Parse a directive saving core registers. */
4159 s_arm_unwind_save_core (void)
4165 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4168 as_bad (_("expected register list"));
4169 ignore_rest_of_line ();
4173 demand_empty_rest_of_line ();
4175 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4176 into .unwind_save {..., sp...}. We aren't bothered about the value of
4177 ip because it is clobbered by calls. */
4178 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4179 && (range
& 0x3000) == 0x1000)
4181 unwind
.opcode_count
--;
4182 unwind
.sp_restored
= 0;
4183 range
= (range
| 0x2000) & ~0x1000;
4184 unwind
.pending_offset
= 0;
4190 /* See if we can use the short opcodes. These pop a block of up to 8
4191 registers starting with r4, plus maybe r14. */
4192 for (n
= 0; n
< 8; n
++)
4194 /* Break at the first non-saved register. */
4195 if ((range
& (1 << (n
+ 4))) == 0)
4198 /* See if there are any other bits set. */
4199 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4201 /* Use the long form. */
4202 op
= 0x8000 | ((range
>> 4) & 0xfff);
4203 add_unwind_opcode (op
, 2);
4207 /* Use the short form. */
4209 op
= 0xa8; /* Pop r14. */
4211 op
= 0xa0; /* Do not pop r14. */
4213 add_unwind_opcode (op
, 1);
4220 op
= 0xb100 | (range
& 0xf);
4221 add_unwind_opcode (op
, 2);
4224 /* Record the number of bytes pushed. */
4225 for (n
= 0; n
< 16; n
++)
4227 if (range
& (1 << n
))
4228 unwind
.frame_size
+= 4;
4233 /* Parse a directive saving FPA registers. */
4236 s_arm_unwind_save_fpa (int reg
)
4242 /* Get Number of registers to transfer. */
4243 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4246 exp
.X_op
= O_illegal
;
4248 if (exp
.X_op
!= O_constant
)
4250 as_bad (_("expected , <constant>"));
4251 ignore_rest_of_line ();
4255 num_regs
= exp
.X_add_number
;
4257 if (num_regs
< 1 || num_regs
> 4)
4259 as_bad (_("number of registers must be in the range [1:4]"));
4260 ignore_rest_of_line ();
4264 demand_empty_rest_of_line ();
4269 op
= 0xb4 | (num_regs
- 1);
4270 add_unwind_opcode (op
, 1);
4275 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4276 add_unwind_opcode (op
, 2);
4278 unwind
.frame_size
+= num_regs
* 12;
4282 /* Parse a directive saving VFP registers for ARMv6 and above. */
4285 s_arm_unwind_save_vfp_armv6 (void)
4290 int num_vfpv3_regs
= 0;
4291 int num_regs_below_16
;
4292 bfd_boolean partial_match
;
4294 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4298 as_bad (_("expected register list"));
4299 ignore_rest_of_line ();
4303 demand_empty_rest_of_line ();
4305 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4306 than FSTMX/FLDMX-style ones). */
4308 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4310 num_vfpv3_regs
= count
;
4311 else if (start
+ count
> 16)
4312 num_vfpv3_regs
= start
+ count
- 16;
4314 if (num_vfpv3_regs
> 0)
4316 int start_offset
= start
> 16 ? start
- 16 : 0;
4317 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4318 add_unwind_opcode (op
, 2);
4321 /* Generate opcode for registers numbered in the range 0 .. 15. */
4322 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4323 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4324 if (num_regs_below_16
> 0)
4326 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4327 add_unwind_opcode (op
, 2);
4330 unwind
.frame_size
+= count
* 8;
4334 /* Parse a directive saving VFP registers for pre-ARMv6. */
4337 s_arm_unwind_save_vfp (void)
4342 bfd_boolean partial_match
;
4344 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4348 as_bad (_("expected register list"));
4349 ignore_rest_of_line ();
4353 demand_empty_rest_of_line ();
4358 op
= 0xb8 | (count
- 1);
4359 add_unwind_opcode (op
, 1);
4364 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4365 add_unwind_opcode (op
, 2);
4367 unwind
.frame_size
+= count
* 8 + 4;
4371 /* Parse a directive saving iWMMXt data registers. */
4374 s_arm_unwind_save_mmxwr (void)
4382 if (*input_line_pointer
== '{')
4383 input_line_pointer
++;
4387 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4391 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4396 as_tsktsk (_("register list not in ascending order"));
4399 if (*input_line_pointer
== '-')
4401 input_line_pointer
++;
4402 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4405 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4408 else if (reg
>= hi_reg
)
4410 as_bad (_("bad register range"));
4413 for (; reg
< hi_reg
; reg
++)
4417 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4419 skip_past_char (&input_line_pointer
, '}');
4421 demand_empty_rest_of_line ();
4423 /* Generate any deferred opcodes because we're going to be looking at
4425 flush_pending_unwind ();
4427 for (i
= 0; i
< 16; i
++)
4429 if (mask
& (1 << i
))
4430 unwind
.frame_size
+= 8;
4433 /* Attempt to combine with a previous opcode. We do this because gcc
4434 likes to output separate unwind directives for a single block of
4436 if (unwind
.opcode_count
> 0)
4438 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4439 if ((i
& 0xf8) == 0xc0)
4442 /* Only merge if the blocks are contiguous. */
4445 if ((mask
& 0xfe00) == (1 << 9))
4447 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4448 unwind
.opcode_count
--;
4451 else if (i
== 6 && unwind
.opcode_count
>= 2)
4453 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4457 op
= 0xffff << (reg
- 1);
4459 && ((mask
& op
) == (1u << (reg
- 1))))
4461 op
= (1 << (reg
+ i
+ 1)) - 1;
4462 op
&= ~((1 << reg
) - 1);
4464 unwind
.opcode_count
-= 2;
4471 /* We want to generate opcodes in the order the registers have been
4472 saved, ie. descending order. */
4473 for (reg
= 15; reg
>= -1; reg
--)
4475 /* Save registers in blocks. */
4477 || !(mask
& (1 << reg
)))
4479 /* We found an unsaved reg. Generate opcodes to save the
4486 op
= 0xc0 | (hi_reg
- 10);
4487 add_unwind_opcode (op
, 1);
4492 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4493 add_unwind_opcode (op
, 2);
4502 ignore_rest_of_line ();
4506 s_arm_unwind_save_mmxwcg (void)
4513 if (*input_line_pointer
== '{')
4514 input_line_pointer
++;
4516 skip_whitespace (input_line_pointer
);
4520 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4524 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4530 as_tsktsk (_("register list not in ascending order"));
4533 if (*input_line_pointer
== '-')
4535 input_line_pointer
++;
4536 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4539 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4542 else if (reg
>= hi_reg
)
4544 as_bad (_("bad register range"));
4547 for (; reg
< hi_reg
; reg
++)
4551 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4553 skip_past_char (&input_line_pointer
, '}');
4555 demand_empty_rest_of_line ();
4557 /* Generate any deferred opcodes because we're going to be looking at
4559 flush_pending_unwind ();
4561 for (reg
= 0; reg
< 16; reg
++)
4563 if (mask
& (1 << reg
))
4564 unwind
.frame_size
+= 4;
4567 add_unwind_opcode (op
, 2);
4570 ignore_rest_of_line ();
4574 /* Parse an unwind_save directive.
4575 If the argument is non-zero, this is a .vsave directive. */
4578 s_arm_unwind_save (int arch_v6
)
4581 struct reg_entry
*reg
;
4582 bfd_boolean had_brace
= FALSE
;
4584 if (!unwind
.proc_start
)
4585 as_bad (MISSING_FNSTART
);
4587 /* Figure out what sort of save we have. */
4588 peek
= input_line_pointer
;
4596 reg
= arm_reg_parse_multi (&peek
);
4600 as_bad (_("register expected"));
4601 ignore_rest_of_line ();
4610 as_bad (_("FPA .unwind_save does not take a register list"));
4611 ignore_rest_of_line ();
4614 input_line_pointer
= peek
;
4615 s_arm_unwind_save_fpa (reg
->number
);
4619 s_arm_unwind_save_core ();
4624 s_arm_unwind_save_vfp_armv6 ();
4626 s_arm_unwind_save_vfp ();
4629 case REG_TYPE_MMXWR
:
4630 s_arm_unwind_save_mmxwr ();
4633 case REG_TYPE_MMXWCG
:
4634 s_arm_unwind_save_mmxwcg ();
4638 as_bad (_(".unwind_save does not support this kind of register"));
4639 ignore_rest_of_line ();
4644 /* Parse an unwind_movsp directive. */
4647 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4653 if (!unwind
.proc_start
)
4654 as_bad (MISSING_FNSTART
);
4656 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4659 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4660 ignore_rest_of_line ();
4664 /* Optional constant. */
4665 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4667 if (immediate_for_directive (&offset
) == FAIL
)
4673 demand_empty_rest_of_line ();
4675 if (reg
== REG_SP
|| reg
== REG_PC
)
4677 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4681 if (unwind
.fp_reg
!= REG_SP
)
4682 as_bad (_("unexpected .unwind_movsp directive"));
4684 /* Generate opcode to restore the value. */
4686 add_unwind_opcode (op
, 1);
4688 /* Record the information for later. */
4689 unwind
.fp_reg
= reg
;
4690 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4691 unwind
.sp_restored
= 1;
4694 /* Parse an unwind_pad directive. */
4697 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4701 if (!unwind
.proc_start
)
4702 as_bad (MISSING_FNSTART
);
4704 if (immediate_for_directive (&offset
) == FAIL
)
4709 as_bad (_("stack increment must be multiple of 4"));
4710 ignore_rest_of_line ();
4714 /* Don't generate any opcodes, just record the details for later. */
4715 unwind
.frame_size
+= offset
;
4716 unwind
.pending_offset
+= offset
;
4718 demand_empty_rest_of_line ();
4721 /* Parse an unwind_setfp directive. */
4724 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4730 if (!unwind
.proc_start
)
4731 as_bad (MISSING_FNSTART
);
4733 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4734 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4737 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4739 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4741 as_bad (_("expected <reg>, <reg>"));
4742 ignore_rest_of_line ();
4746 /* Optional constant. */
4747 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4749 if (immediate_for_directive (&offset
) == FAIL
)
4755 demand_empty_rest_of_line ();
4757 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4759 as_bad (_("register must be either sp or set by a previous"
4760 "unwind_movsp directive"));
4764 /* Don't generate any opcodes, just record the information for later. */
4765 unwind
.fp_reg
= fp_reg
;
4767 if (sp_reg
== REG_SP
)
4768 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4770 unwind
.fp_offset
-= offset
;
4773 /* Parse an unwind_raw directive. */
4776 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4779 /* This is an arbitrary limit. */
4780 unsigned char op
[16];
4783 if (!unwind
.proc_start
)
4784 as_bad (MISSING_FNSTART
);
4787 if (exp
.X_op
== O_constant
4788 && skip_past_comma (&input_line_pointer
) != FAIL
)
4790 unwind
.frame_size
+= exp
.X_add_number
;
4794 exp
.X_op
= O_illegal
;
4796 if (exp
.X_op
!= O_constant
)
4798 as_bad (_("expected <offset>, <opcode>"));
4799 ignore_rest_of_line ();
4805 /* Parse the opcode. */
4810 as_bad (_("unwind opcode too long"));
4811 ignore_rest_of_line ();
4813 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4815 as_bad (_("invalid unwind opcode"));
4816 ignore_rest_of_line ();
4819 op
[count
++] = exp
.X_add_number
;
4821 /* Parse the next byte. */
4822 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4828 /* Add the opcode bytes in reverse order. */
4830 add_unwind_opcode (op
[count
], 1);
4832 demand_empty_rest_of_line ();
4836 /* Parse a .eabi_attribute directive. */
4839 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4841 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4843 if (tag
>= 0 && tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4844 attributes_set_explicitly
[tag
] = 1;
4847 /* Emit a tls fix for the symbol. */
4850 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4854 #ifdef md_flush_pending_output
4855 md_flush_pending_output ();
4858 #ifdef md_cons_align
4862 /* Since we're just labelling the code, there's no need to define a
4865 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4866 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4867 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4868 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4870 #endif /* OBJ_ELF */
4872 static void s_arm_arch (int);
4873 static void s_arm_object_arch (int);
4874 static void s_arm_cpu (int);
4875 static void s_arm_fpu (int);
4876 static void s_arm_arch_extension (int);
4881 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4888 if (exp
.X_op
== O_symbol
)
4889 exp
.X_op
= O_secrel
;
4891 emit_expr (&exp
, 4);
4893 while (*input_line_pointer
++ == ',');
4895 input_line_pointer
--;
4896 demand_empty_rest_of_line ();
4900 /* This table describes all the machine specific pseudo-ops the assembler
4901 has to support. The fields are:
4902 pseudo-op name without dot
4903 function to call to execute this pseudo-op
4904 Integer arg to pass to the function. */
4906 const pseudo_typeS md_pseudo_table
[] =
4908 /* Never called because '.req' does not start a line. */
4909 { "req", s_req
, 0 },
4910 /* Following two are likewise never called. */
4913 { "unreq", s_unreq
, 0 },
4914 { "bss", s_bss
, 0 },
4915 { "align", s_align_ptwo
, 2 },
4916 { "arm", s_arm
, 0 },
4917 { "thumb", s_thumb
, 0 },
4918 { "code", s_code
, 0 },
4919 { "force_thumb", s_force_thumb
, 0 },
4920 { "thumb_func", s_thumb_func
, 0 },
4921 { "thumb_set", s_thumb_set
, 0 },
4922 { "even", s_even
, 0 },
4923 { "ltorg", s_ltorg
, 0 },
4924 { "pool", s_ltorg
, 0 },
4925 { "syntax", s_syntax
, 0 },
4926 { "cpu", s_arm_cpu
, 0 },
4927 { "arch", s_arm_arch
, 0 },
4928 { "object_arch", s_arm_object_arch
, 0 },
4929 { "fpu", s_arm_fpu
, 0 },
4930 { "arch_extension", s_arm_arch_extension
, 0 },
4932 { "word", s_arm_elf_cons
, 4 },
4933 { "long", s_arm_elf_cons
, 4 },
4934 { "inst.n", s_arm_elf_inst
, 2 },
4935 { "inst.w", s_arm_elf_inst
, 4 },
4936 { "inst", s_arm_elf_inst
, 0 },
4937 { "rel31", s_arm_rel31
, 0 },
4938 { "fnstart", s_arm_unwind_fnstart
, 0 },
4939 { "fnend", s_arm_unwind_fnend
, 0 },
4940 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4941 { "personality", s_arm_unwind_personality
, 0 },
4942 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4943 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4944 { "save", s_arm_unwind_save
, 0 },
4945 { "vsave", s_arm_unwind_save
, 1 },
4946 { "movsp", s_arm_unwind_movsp
, 0 },
4947 { "pad", s_arm_unwind_pad
, 0 },
4948 { "setfp", s_arm_unwind_setfp
, 0 },
4949 { "unwind_raw", s_arm_unwind_raw
, 0 },
4950 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4951 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4955 /* These are used for dwarf. */
4959 /* These are used for dwarf2. */
4960 { "file", dwarf2_directive_file
, 0 },
4961 { "loc", dwarf2_directive_loc
, 0 },
4962 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4964 { "extend", float_cons
, 'x' },
4965 { "ldouble", float_cons
, 'x' },
4966 { "packed", float_cons
, 'p' },
4968 {"secrel32", pe_directive_secrel
, 0},
4971 /* These are for compatibility with CodeComposer Studio. */
4972 {"ref", s_ccs_ref
, 0},
4973 {"def", s_ccs_def
, 0},
4974 {"asmfunc", s_ccs_asmfunc
, 0},
4975 {"endasmfunc", s_ccs_endasmfunc
, 0},
4980 /* Parser functions used exclusively in instruction operands. */
4982 /* Generic immediate-value read function for use in insn parsing.
4983 STR points to the beginning of the immediate (the leading #);
4984 VAL receives the value; if the value is outside [MIN, MAX]
4985 issue an error. PREFIX_OPT is true if the immediate prefix is
4989 parse_immediate (char **str
, int *val
, int min
, int max
,
4990 bfd_boolean prefix_opt
)
4994 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4995 if (exp
.X_op
!= O_constant
)
4997 inst
.error
= _("constant expression required");
5001 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
5003 inst
.error
= _("immediate value out of range");
5007 *val
= exp
.X_add_number
;
5011 /* Less-generic immediate-value read function with the possibility of loading a
5012 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5013 instructions. Puts the result directly in inst.operands[i]. */
5016 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
5017 bfd_boolean allow_symbol_p
)
5020 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
5023 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
5025 if (exp_p
->X_op
== O_constant
)
5027 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
5028 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5029 O_constant. We have to be careful not to break compilation for
5030 32-bit X_add_number, though. */
5031 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
5033 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5034 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
5036 inst
.operands
[i
].regisimm
= 1;
5039 else if (exp_p
->X_op
== O_big
5040 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
5042 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
5044 /* Bignums have their least significant bits in
5045 generic_bignum[0]. Make sure we put 32 bits in imm and
5046 32 bits in reg, in a (hopefully) portable way. */
5047 gas_assert (parts
!= 0);
5049 /* Make sure that the number is not too big.
5050 PR 11972: Bignums can now be sign-extended to the
5051 size of a .octa so check that the out of range bits
5052 are all zero or all one. */
5053 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
5055 LITTLENUM_TYPE m
= -1;
5057 if (generic_bignum
[parts
* 2] != 0
5058 && generic_bignum
[parts
* 2] != m
)
5061 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
5062 if (generic_bignum
[j
] != generic_bignum
[j
-1])
5066 inst
.operands
[i
].imm
= 0;
5067 for (j
= 0; j
< parts
; j
++, idx
++)
5068 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
5069 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5070 inst
.operands
[i
].reg
= 0;
5071 for (j
= 0; j
< parts
; j
++, idx
++)
5072 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
5073 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5074 inst
.operands
[i
].regisimm
= 1;
5076 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
5084 /* Returns the pseudo-register number of an FPA immediate constant,
5085 or FAIL if there isn't a valid constant here. */
5088 parse_fpa_immediate (char ** str
)
5090 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5096 /* First try and match exact strings, this is to guarantee
5097 that some formats will work even for cross assembly. */
5099 for (i
= 0; fp_const
[i
]; i
++)
5101 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5105 *str
+= strlen (fp_const
[i
]);
5106 if (is_end_of_line
[(unsigned char) **str
])
5112 /* Just because we didn't get a match doesn't mean that the constant
5113 isn't valid, just that it is in a format that we don't
5114 automatically recognize. Try parsing it with the standard
5115 expression routines. */
5117 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5119 /* Look for a raw floating point number. */
5120 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5121 && is_end_of_line
[(unsigned char) *save_in
])
5123 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5125 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5127 if (words
[j
] != fp_values
[i
][j
])
5131 if (j
== MAX_LITTLENUMS
)
5139 /* Try and parse a more complex expression, this will probably fail
5140 unless the code uses a floating point prefix (eg "0f"). */
5141 save_in
= input_line_pointer
;
5142 input_line_pointer
= *str
;
5143 if (expression (&exp
) == absolute_section
5144 && exp
.X_op
== O_big
5145 && exp
.X_add_number
< 0)
5147 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5149 #define X_PRECISION 5
5150 #define E_PRECISION 15L
5151 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5153 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5155 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5157 if (words
[j
] != fp_values
[i
][j
])
5161 if (j
== MAX_LITTLENUMS
)
5163 *str
= input_line_pointer
;
5164 input_line_pointer
= save_in
;
5171 *str
= input_line_pointer
;
5172 input_line_pointer
= save_in
;
5173 inst
.error
= _("invalid FPA immediate expression");
5177 /* Returns 1 if a number has "quarter-precision" float format
5178 0baBbbbbbc defgh000 00000000 00000000. */
5181 is_quarter_float (unsigned imm
)
5183 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5184 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5188 /* Detect the presence of a floating point or integer zero constant,
5192 parse_ifimm_zero (char **in
)
5196 if (!is_immediate_prefix (**in
))
5198 /* In unified syntax, all prefixes are optional. */
5199 if (!unified_syntax
)
5205 /* Accept #0x0 as a synonym for #0. */
5206 if (strncmp (*in
, "0x", 2) == 0)
5209 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5214 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5215 &generic_floating_point_number
);
5218 && generic_floating_point_number
.sign
== '+'
5219 && (generic_floating_point_number
.low
5220 > generic_floating_point_number
.leader
))
5226 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5227 0baBbbbbbc defgh000 00000000 00000000.
5228 The zero and minus-zero cases need special handling, since they can't be
5229 encoded in the "quarter-precision" float format, but can nonetheless be
5230 loaded as integer constants. */
5233 parse_qfloat_immediate (char **ccp
, int *immed
)
5237 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5238 int found_fpchar
= 0;
5240 skip_past_char (&str
, '#');
5242 /* We must not accidentally parse an integer as a floating-point number. Make
5243 sure that the value we parse is not an integer by checking for special
5244 characters '.' or 'e'.
5245 FIXME: This is a horrible hack, but doing better is tricky because type
5246 information isn't in a very usable state at parse time. */
5248 skip_whitespace (fpnum
);
5250 if (strncmp (fpnum
, "0x", 2) == 0)
5254 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5255 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5265 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5267 unsigned fpword
= 0;
5270 /* Our FP word must be 32 bits (single-precision FP). */
5271 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5273 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5277 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5290 /* Shift operands. */
5293 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
, SHIFT_UXTW
5296 struct asm_shift_name
5299 enum shift_kind kind
;
5302 /* Third argument to parse_shift. */
5303 enum parse_shift_mode
5305 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5306 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5307 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5308 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5309 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5310 SHIFT_UXTW_IMMEDIATE
/* Shift must be UXTW immediate. */
5313 /* Parse a <shift> specifier on an ARM data processing instruction.
5314 This has three forms:
5316 (LSL|LSR|ASL|ASR|ROR) Rs
5317 (LSL|LSR|ASL|ASR|ROR) #imm
5320 Note that ASL is assimilated to LSL in the instruction encoding, and
5321 RRX to ROR #0 (which cannot be written as such). */
5324 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5326 const struct asm_shift_name
*shift_name
;
5327 enum shift_kind shift
;
5332 for (p
= *str
; ISALPHA (*p
); p
++)
5337 inst
.error
= _("shift expression expected");
5341 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5344 if (shift_name
== NULL
)
5346 inst
.error
= _("shift expression expected");
5350 shift
= shift_name
->kind
;
5354 case NO_SHIFT_RESTRICT
:
5355 case SHIFT_IMMEDIATE
:
5356 if (shift
== SHIFT_UXTW
)
5358 inst
.error
= _("'UXTW' not allowed here");
5363 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5364 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5366 inst
.error
= _("'LSL' or 'ASR' required");
5371 case SHIFT_LSL_IMMEDIATE
:
5372 if (shift
!= SHIFT_LSL
)
5374 inst
.error
= _("'LSL' required");
5379 case SHIFT_ASR_IMMEDIATE
:
5380 if (shift
!= SHIFT_ASR
)
5382 inst
.error
= _("'ASR' required");
5386 case SHIFT_UXTW_IMMEDIATE
:
5387 if (shift
!= SHIFT_UXTW
)
5389 inst
.error
= _("'UXTW' required");
5397 if (shift
!= SHIFT_RRX
)
5399 /* Whitespace can appear here if the next thing is a bare digit. */
5400 skip_whitespace (p
);
5402 if (mode
== NO_SHIFT_RESTRICT
5403 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5405 inst
.operands
[i
].imm
= reg
;
5406 inst
.operands
[i
].immisreg
= 1;
5408 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5411 inst
.operands
[i
].shift_kind
= shift
;
5412 inst
.operands
[i
].shifted
= 1;
5417 /* Parse a <shifter_operand> for an ARM data processing instruction:
5420 #<immediate>, <rotate>
5424 where <shift> is defined by parse_shift above, and <rotate> is a
5425 multiple of 2 between 0 and 30. Validation of immediate operands
5426 is deferred to md_apply_fix. */
5429 parse_shifter_operand (char **str
, int i
)
5434 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5436 inst
.operands
[i
].reg
= value
;
5437 inst
.operands
[i
].isreg
= 1;
5439 /* parse_shift will override this if appropriate */
5440 inst
.relocs
[0].exp
.X_op
= O_constant
;
5441 inst
.relocs
[0].exp
.X_add_number
= 0;
5443 if (skip_past_comma (str
) == FAIL
)
5446 /* Shift operation on register. */
5447 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5450 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5453 if (skip_past_comma (str
) == SUCCESS
)
5455 /* #x, y -- ie explicit rotation by Y. */
5456 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5459 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5461 inst
.error
= _("constant expression expected");
5465 value
= exp
.X_add_number
;
5466 if (value
< 0 || value
> 30 || value
% 2 != 0)
5468 inst
.error
= _("invalid rotation");
5471 if (inst
.relocs
[0].exp
.X_add_number
< 0
5472 || inst
.relocs
[0].exp
.X_add_number
> 255)
5474 inst
.error
= _("invalid constant");
5478 /* Encode as specified. */
5479 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5483 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5484 inst
.relocs
[0].pc_rel
= 0;
5488 /* Group relocation information. Each entry in the table contains the
5489 textual name of the relocation as may appear in assembler source
5490 and must end with a colon.
5491 Along with this textual name are the relocation codes to be used if
5492 the corresponding instruction is an ALU instruction (ADD or SUB only),
5493 an LDR, an LDRS, or an LDC. */
5495 struct group_reloc_table_entry
5506 /* Varieties of non-ALU group relocation. */
5514 static struct group_reloc_table_entry group_reloc_table
[] =
5515 { /* Program counter relative: */
5517 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5522 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5523 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5524 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5525 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5527 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5532 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5533 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5534 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5535 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5537 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5538 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5539 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5540 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5541 /* Section base relative */
5543 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5548 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5549 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5550 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5551 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5553 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5558 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5559 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5560 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5561 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5563 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5564 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5565 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5566 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5567 /* Absolute thumb alu relocations. */
5569 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5574 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5579 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5584 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5589 /* Given the address of a pointer pointing to the textual name of a group
5590 relocation as may appear in assembler source, attempt to find its details
5591 in group_reloc_table. The pointer will be updated to the character after
5592 the trailing colon. On failure, FAIL will be returned; SUCCESS
5593 otherwise. On success, *entry will be updated to point at the relevant
5594 group_reloc_table entry. */
5597 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5600 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5602 int length
= strlen (group_reloc_table
[i
].name
);
5604 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5605 && (*str
)[length
] == ':')
5607 *out
= &group_reloc_table
[i
];
5608 *str
+= (length
+ 1);
5616 /* Parse a <shifter_operand> for an ARM data processing instruction
5617 (as for parse_shifter_operand) where group relocations are allowed:
5620 #<immediate>, <rotate>
5621 #:<group_reloc>:<expression>
5625 where <group_reloc> is one of the strings defined in group_reloc_table.
5626 The hashes are optional.
5628 Everything else is as for parse_shifter_operand. */
5630 static parse_operand_result
5631 parse_shifter_operand_group_reloc (char **str
, int i
)
5633 /* Determine if we have the sequence of characters #: or just :
5634 coming next. If we do, then we check for a group relocation.
5635 If we don't, punt the whole lot to parse_shifter_operand. */
5637 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5638 || (*str
)[0] == ':')
5640 struct group_reloc_table_entry
*entry
;
5642 if ((*str
)[0] == '#')
5647 /* Try to parse a group relocation. Anything else is an error. */
5648 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5650 inst
.error
= _("unknown group relocation");
5651 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5654 /* We now have the group relocation table entry corresponding to
5655 the name in the assembler source. Next, we parse the expression. */
5656 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5657 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5659 /* Record the relocation type (always the ALU variant here). */
5660 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5661 gas_assert (inst
.relocs
[0].type
!= 0);
5663 return PARSE_OPERAND_SUCCESS
;
5666 return parse_shifter_operand (str
, i
) == SUCCESS
5667 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5669 /* Never reached. */
5672 /* Parse a Neon alignment expression. Information is written to
5673 inst.operands[i]. We assume the initial ':' has been skipped.
5675 align .imm = align << 8, .immisalign=1, .preind=0 */
5676 static parse_operand_result
5677 parse_neon_alignment (char **str
, int i
)
5682 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5684 if (exp
.X_op
!= O_constant
)
5686 inst
.error
= _("alignment must be constant");
5687 return PARSE_OPERAND_FAIL
;
5690 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5691 inst
.operands
[i
].immisalign
= 1;
5692 /* Alignments are not pre-indexes. */
5693 inst
.operands
[i
].preind
= 0;
5696 return PARSE_OPERAND_SUCCESS
;
5699 /* Parse all forms of an ARM address expression. Information is written
5700 to inst.operands[i] and/or inst.relocs[0].
5702 Preindexed addressing (.preind=1):
5704 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5705 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5706 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5707 .shift_kind=shift .relocs[0].exp=shift_imm
5709 These three may have a trailing ! which causes .writeback to be set also.
5711 Postindexed addressing (.postind=1, .writeback=1):
5713 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5714 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5715 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5716 .shift_kind=shift .relocs[0].exp=shift_imm
5718 Unindexed addressing (.preind=0, .postind=0):
5720 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5724 [Rn]{!} shorthand for [Rn,#0]{!}
5725 =immediate .isreg=0 .relocs[0].exp=immediate
5726 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5728 It is the caller's responsibility to check for addressing modes not
5729 supported by the instruction, and to set inst.relocs[0].type. */
5731 static parse_operand_result
5732 parse_address_main (char **str
, int i
, int group_relocations
,
5733 group_reloc_type group_type
)
5738 if (skip_past_char (&p
, '[') == FAIL
)
5740 if (skip_past_char (&p
, '=') == FAIL
)
5742 /* Bare address - translate to PC-relative offset. */
5743 inst
.relocs
[0].pc_rel
= 1;
5744 inst
.operands
[i
].reg
= REG_PC
;
5745 inst
.operands
[i
].isreg
= 1;
5746 inst
.operands
[i
].preind
= 1;
5748 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5749 return PARSE_OPERAND_FAIL
;
5751 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5752 /*allow_symbol_p=*/TRUE
))
5753 return PARSE_OPERAND_FAIL
;
5756 return PARSE_OPERAND_SUCCESS
;
5759 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5760 skip_whitespace (p
);
5762 if (group_type
== GROUP_MVE
)
5764 enum arm_reg_type rtype
= REG_TYPE_MQ
;
5765 struct neon_type_el et
;
5766 if ((reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
5768 inst
.operands
[i
].isquad
= 1;
5770 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5772 inst
.error
= BAD_ADDR_MODE
;
5773 return PARSE_OPERAND_FAIL
;
5776 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5778 if (group_type
== GROUP_MVE
)
5779 inst
.error
= BAD_ADDR_MODE
;
5781 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5782 return PARSE_OPERAND_FAIL
;
5784 inst
.operands
[i
].reg
= reg
;
5785 inst
.operands
[i
].isreg
= 1;
5787 if (skip_past_comma (&p
) == SUCCESS
)
5789 inst
.operands
[i
].preind
= 1;
5792 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5794 enum arm_reg_type rtype
= REG_TYPE_MQ
;
5795 struct neon_type_el et
;
5796 if (group_type
== GROUP_MVE
5797 && (reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
5799 inst
.operands
[i
].immisreg
= 2;
5800 inst
.operands
[i
].imm
= reg
;
5802 if (skip_past_comma (&p
) == SUCCESS
)
5804 if (parse_shift (&p
, i
, SHIFT_UXTW_IMMEDIATE
) == SUCCESS
)
5806 inst
.operands
[i
].imm
|= inst
.relocs
[0].exp
.X_add_number
<< 5;
5807 inst
.relocs
[0].exp
.X_add_number
= 0;
5810 return PARSE_OPERAND_FAIL
;
5813 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5815 inst
.operands
[i
].imm
= reg
;
5816 inst
.operands
[i
].immisreg
= 1;
5818 if (skip_past_comma (&p
) == SUCCESS
)
5819 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5820 return PARSE_OPERAND_FAIL
;
5822 else if (skip_past_char (&p
, ':') == SUCCESS
)
5824 /* FIXME: '@' should be used here, but it's filtered out by generic
5825 code before we get to see it here. This may be subject to
5827 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5829 if (result
!= PARSE_OPERAND_SUCCESS
)
5834 if (inst
.operands
[i
].negative
)
5836 inst
.operands
[i
].negative
= 0;
5840 if (group_relocations
5841 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5843 struct group_reloc_table_entry
*entry
;
5845 /* Skip over the #: or : sequence. */
5851 /* Try to parse a group relocation. Anything else is an
5853 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5855 inst
.error
= _("unknown group relocation");
5856 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5859 /* We now have the group relocation table entry corresponding to
5860 the name in the assembler source. Next, we parse the
5862 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5863 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5865 /* Record the relocation type. */
5870 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5875 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5880 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5887 if (inst
.relocs
[0].type
== 0)
5889 inst
.error
= _("this group relocation is not allowed on this instruction");
5890 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5897 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5898 return PARSE_OPERAND_FAIL
;
5899 /* If the offset is 0, find out if it's a +0 or -0. */
5900 if (inst
.relocs
[0].exp
.X_op
== O_constant
5901 && inst
.relocs
[0].exp
.X_add_number
== 0)
5903 skip_whitespace (q
);
5907 skip_whitespace (q
);
5910 inst
.operands
[i
].negative
= 1;
5915 else if (skip_past_char (&p
, ':') == SUCCESS
)
5917 /* FIXME: '@' should be used here, but it's filtered out by generic code
5918 before we get to see it here. This may be subject to change. */
5919 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5921 if (result
!= PARSE_OPERAND_SUCCESS
)
5925 if (skip_past_char (&p
, ']') == FAIL
)
5927 inst
.error
= _("']' expected");
5928 return PARSE_OPERAND_FAIL
;
5931 if (skip_past_char (&p
, '!') == SUCCESS
)
5932 inst
.operands
[i
].writeback
= 1;
5934 else if (skip_past_comma (&p
) == SUCCESS
)
5936 if (skip_past_char (&p
, '{') == SUCCESS
)
5938 /* [Rn], {expr} - unindexed, with option */
5939 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5940 0, 255, TRUE
) == FAIL
)
5941 return PARSE_OPERAND_FAIL
;
5943 if (skip_past_char (&p
, '}') == FAIL
)
5945 inst
.error
= _("'}' expected at end of 'option' field");
5946 return PARSE_OPERAND_FAIL
;
5948 if (inst
.operands
[i
].preind
)
5950 inst
.error
= _("cannot combine index with option");
5951 return PARSE_OPERAND_FAIL
;
5954 return PARSE_OPERAND_SUCCESS
;
5958 inst
.operands
[i
].postind
= 1;
5959 inst
.operands
[i
].writeback
= 1;
5961 if (inst
.operands
[i
].preind
)
5963 inst
.error
= _("cannot combine pre- and post-indexing");
5964 return PARSE_OPERAND_FAIL
;
5968 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5970 enum arm_reg_type rtype
= REG_TYPE_MQ
;
5971 struct neon_type_el et
;
5972 if (group_type
== GROUP_MVE
5973 && (reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
5975 inst
.operands
[i
].immisreg
= 2;
5976 inst
.operands
[i
].imm
= reg
;
5978 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5980 /* We might be using the immediate for alignment already. If we
5981 are, OR the register number into the low-order bits. */
5982 if (inst
.operands
[i
].immisalign
)
5983 inst
.operands
[i
].imm
|= reg
;
5985 inst
.operands
[i
].imm
= reg
;
5986 inst
.operands
[i
].immisreg
= 1;
5988 if (skip_past_comma (&p
) == SUCCESS
)
5989 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5990 return PARSE_OPERAND_FAIL
;
5996 if (inst
.operands
[i
].negative
)
5998 inst
.operands
[i
].negative
= 0;
6001 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
6002 return PARSE_OPERAND_FAIL
;
6003 /* If the offset is 0, find out if it's a +0 or -0. */
6004 if (inst
.relocs
[0].exp
.X_op
== O_constant
6005 && inst
.relocs
[0].exp
.X_add_number
== 0)
6007 skip_whitespace (q
);
6011 skip_whitespace (q
);
6014 inst
.operands
[i
].negative
= 1;
6020 /* If at this point neither .preind nor .postind is set, we have a
6021 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6022 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
6024 inst
.operands
[i
].preind
= 1;
6025 inst
.relocs
[0].exp
.X_op
= O_constant
;
6026 inst
.relocs
[0].exp
.X_add_number
= 0;
6029 return PARSE_OPERAND_SUCCESS
;
6033 parse_address (char **str
, int i
)
6035 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
6039 static parse_operand_result
6040 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
6042 return parse_address_main (str
, i
, 1, type
);
6045 /* Parse an operand for a MOVW or MOVT instruction. */
6047 parse_half (char **str
)
6052 skip_past_char (&p
, '#');
6053 if (strncasecmp (p
, ":lower16:", 9) == 0)
6054 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
6055 else if (strncasecmp (p
, ":upper16:", 9) == 0)
6056 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
6058 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
6061 skip_whitespace (p
);
6064 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
6067 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
6069 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
6071 inst
.error
= _("constant expression expected");
6074 if (inst
.relocs
[0].exp
.X_add_number
< 0
6075 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
6077 inst
.error
= _("immediate value out of range");
6085 /* Miscellaneous. */
6087 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6088 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6090 parse_psr (char **str
, bfd_boolean lhs
)
6093 unsigned long psr_field
;
6094 const struct asm_psr
*psr
;
6096 bfd_boolean is_apsr
= FALSE
;
6097 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
6099 /* PR gas/12698: If the user has specified -march=all then m_profile will
6100 be TRUE, but we want to ignore it in this case as we are building for any
6101 CPU type, including non-m variants. */
6102 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
6105 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6106 feature for ease of use and backwards compatibility. */
6108 if (strncasecmp (p
, "SPSR", 4) == 0)
6111 goto unsupported_psr
;
6113 psr_field
= SPSR_BIT
;
6115 else if (strncasecmp (p
, "CPSR", 4) == 0)
6118 goto unsupported_psr
;
6122 else if (strncasecmp (p
, "APSR", 4) == 0)
6124 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6125 and ARMv7-R architecture CPUs. */
6134 while (ISALNUM (*p
) || *p
== '_');
6136 if (strncasecmp (start
, "iapsr", 5) == 0
6137 || strncasecmp (start
, "eapsr", 5) == 0
6138 || strncasecmp (start
, "xpsr", 4) == 0
6139 || strncasecmp (start
, "psr", 3) == 0)
6140 p
= start
+ strcspn (start
, "rR") + 1;
6142 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
6148 /* If APSR is being written, a bitfield may be specified. Note that
6149 APSR itself is handled above. */
6150 if (psr
->field
<= 3)
6152 psr_field
= psr
->field
;
6158 /* M-profile MSR instructions have the mask field set to "10", except
6159 *PSR variants which modify APSR, which may use a different mask (and
6160 have been handled already). Do that by setting the PSR_f field
6162 return psr
->field
| (lhs
? PSR_f
: 0);
6165 goto unsupported_psr
;
6171 /* A suffix follows. */
6177 while (ISALNUM (*p
) || *p
== '_');
6181 /* APSR uses a notation for bits, rather than fields. */
6182 unsigned int nzcvq_bits
= 0;
6183 unsigned int g_bit
= 0;
6186 for (bit
= start
; bit
!= p
; bit
++)
6188 switch (TOLOWER (*bit
))
6191 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6195 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6199 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6203 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6207 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6211 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6215 inst
.error
= _("unexpected bit specified after APSR");
6220 if (nzcvq_bits
== 0x1f)
6225 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6227 inst
.error
= _("selected processor does not "
6228 "support DSP extension");
6235 if ((nzcvq_bits
& 0x20) != 0
6236 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6237 || (g_bit
& 0x2) != 0)
6239 inst
.error
= _("bad bitmask specified after APSR");
6245 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6250 psr_field
|= psr
->field
;
6256 goto error
; /* Garbage after "[CS]PSR". */
6258 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6259 is deprecated, but allow it anyway. */
6263 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6266 else if (!m_profile
)
6267 /* These bits are never right for M-profile devices: don't set them
6268 (only code paths which read/write APSR reach here). */
6269 psr_field
|= (PSR_c
| PSR_f
);
6275 inst
.error
= _("selected processor does not support requested special "
6276 "purpose register");
6280 inst
.error
= _("flag for {c}psr instruction expected");
6285 parse_sys_vldr_vstr (char **str
)
6294 {"FPSCR", 0x1, 0x0},
6295 {"FPSCR_nzcvqc", 0x2, 0x0},
6298 {"FPCXTNS", 0x6, 0x1},
6299 {"FPCXTS", 0x7, 0x1}
6301 char *op_end
= strchr (*str
, ',');
6302 size_t op_strlen
= op_end
- *str
;
6304 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6306 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6308 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6317 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6318 value suitable for splatting into the AIF field of the instruction. */
6321 parse_cps_flags (char **str
)
6330 case '\0': case ',':
6333 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6334 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6335 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6338 inst
.error
= _("unrecognized CPS flag");
6343 if (saw_a_flag
== 0)
6345 inst
.error
= _("missing CPS flags");
6353 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6354 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6357 parse_endian_specifier (char **str
)
6362 if (strncasecmp (s
, "BE", 2))
6364 else if (strncasecmp (s
, "LE", 2))
6368 inst
.error
= _("valid endian specifiers are be or le");
6372 if (ISALNUM (s
[2]) || s
[2] == '_')
6374 inst
.error
= _("valid endian specifiers are be or le");
6379 return little_endian
;
6382 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6383 value suitable for poking into the rotate field of an sxt or sxta
6384 instruction, or FAIL on error. */
6387 parse_ror (char **str
)
6392 if (strncasecmp (s
, "ROR", 3) == 0)
6396 inst
.error
= _("missing rotation field after comma");
6400 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6405 case 0: *str
= s
; return 0x0;
6406 case 8: *str
= s
; return 0x1;
6407 case 16: *str
= s
; return 0x2;
6408 case 24: *str
= s
; return 0x3;
6411 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6416 /* Parse a conditional code (from conds[] below). The value returned is in the
6417 range 0 .. 14, or FAIL. */
6419 parse_cond (char **str
)
6422 const struct asm_cond
*c
;
6424 /* Condition codes are always 2 characters, so matching up to
6425 3 characters is sufficient. */
6430 while (ISALPHA (*q
) && n
< 3)
6432 cond
[n
] = TOLOWER (*q
);
6437 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6440 inst
.error
= _("condition required");
6448 /* Parse an option for a barrier instruction. Returns the encoding for the
6451 parse_barrier (char **str
)
6454 const struct asm_barrier_opt
*o
;
6457 while (ISALPHA (*q
))
6460 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6465 if (!mark_feature_used (&o
->arch
))
6472 /* Parse the operands of a table branch instruction. Similar to a memory
6475 parse_tb (char **str
)
6480 if (skip_past_char (&p
, '[') == FAIL
)
6482 inst
.error
= _("'[' expected");
6486 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6488 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6491 inst
.operands
[0].reg
= reg
;
6493 if (skip_past_comma (&p
) == FAIL
)
6495 inst
.error
= _("',' expected");
6499 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6501 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6504 inst
.operands
[0].imm
= reg
;
6506 if (skip_past_comma (&p
) == SUCCESS
)
6508 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6510 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6512 inst
.error
= _("invalid shift");
6515 inst
.operands
[0].shifted
= 1;
6518 if (skip_past_char (&p
, ']') == FAIL
)
6520 inst
.error
= _("']' expected");
6527 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6528 information on the types the operands can take and how they are encoded.
6529 Up to four operands may be read; this function handles setting the
6530 ".present" field for each read operand itself.
6531 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6532 else returns FAIL. */
6535 parse_neon_mov (char **str
, int *which_operand
)
6537 int i
= *which_operand
, val
;
6538 enum arm_reg_type rtype
;
6540 struct neon_type_el optype
;
6542 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6544 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6545 inst
.operands
[i
].reg
= val
;
6546 inst
.operands
[i
].isscalar
= 1;
6547 inst
.operands
[i
].vectype
= optype
;
6548 inst
.operands
[i
++].present
= 1;
6550 if (skip_past_comma (&ptr
) == FAIL
)
6553 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6556 inst
.operands
[i
].reg
= val
;
6557 inst
.operands
[i
].isreg
= 1;
6558 inst
.operands
[i
].present
= 1;
6560 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6563 /* Cases 0, 1, 2, 3, 5 (D only). */
6564 if (skip_past_comma (&ptr
) == FAIL
)
6567 inst
.operands
[i
].reg
= val
;
6568 inst
.operands
[i
].isreg
= 1;
6569 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6570 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6571 inst
.operands
[i
].isvec
= 1;
6572 inst
.operands
[i
].vectype
= optype
;
6573 inst
.operands
[i
++].present
= 1;
6575 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6577 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6578 Case 13: VMOV <Sd>, <Rm> */
6579 inst
.operands
[i
].reg
= val
;
6580 inst
.operands
[i
].isreg
= 1;
6581 inst
.operands
[i
].present
= 1;
6583 if (rtype
== REG_TYPE_NQ
)
6585 first_error (_("can't use Neon quad register here"));
6588 else if (rtype
!= REG_TYPE_VFS
)
6591 if (skip_past_comma (&ptr
) == FAIL
)
6593 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6595 inst
.operands
[i
].reg
= val
;
6596 inst
.operands
[i
].isreg
= 1;
6597 inst
.operands
[i
].present
= 1;
6600 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6603 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6604 Case 1: VMOV<c><q> <Dd>, <Dm>
6605 Case 8: VMOV.F32 <Sd>, <Sm>
6606 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6608 inst
.operands
[i
].reg
= val
;
6609 inst
.operands
[i
].isreg
= 1;
6610 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6611 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6612 inst
.operands
[i
].isvec
= 1;
6613 inst
.operands
[i
].vectype
= optype
;
6614 inst
.operands
[i
].present
= 1;
6616 if (skip_past_comma (&ptr
) == SUCCESS
)
6621 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6624 inst
.operands
[i
].reg
= val
;
6625 inst
.operands
[i
].isreg
= 1;
6626 inst
.operands
[i
++].present
= 1;
6628 if (skip_past_comma (&ptr
) == FAIL
)
6631 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6634 inst
.operands
[i
].reg
= val
;
6635 inst
.operands
[i
].isreg
= 1;
6636 inst
.operands
[i
].present
= 1;
6639 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6640 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6641 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6642 Case 10: VMOV.F32 <Sd>, #<imm>
6643 Case 11: VMOV.F64 <Dd>, #<imm> */
6644 inst
.operands
[i
].immisfloat
= 1;
6645 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6647 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6648 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6652 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6656 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6659 inst
.operands
[i
].reg
= val
;
6660 inst
.operands
[i
].isreg
= 1;
6661 inst
.operands
[i
++].present
= 1;
6663 if (skip_past_comma (&ptr
) == FAIL
)
6666 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6668 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6669 inst
.operands
[i
].reg
= val
;
6670 inst
.operands
[i
].isscalar
= 1;
6671 inst
.operands
[i
].present
= 1;
6672 inst
.operands
[i
].vectype
= optype
;
6674 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6676 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6677 inst
.operands
[i
].reg
= val
;
6678 inst
.operands
[i
].isreg
= 1;
6679 inst
.operands
[i
++].present
= 1;
6681 if (skip_past_comma (&ptr
) == FAIL
)
6684 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6687 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6691 inst
.operands
[i
].reg
= val
;
6692 inst
.operands
[i
].isreg
= 1;
6693 inst
.operands
[i
].isvec
= 1;
6694 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6695 inst
.operands
[i
].vectype
= optype
;
6696 inst
.operands
[i
].present
= 1;
6698 if (rtype
== REG_TYPE_VFS
)
6702 if (skip_past_comma (&ptr
) == FAIL
)
6704 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6707 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6710 inst
.operands
[i
].reg
= val
;
6711 inst
.operands
[i
].isreg
= 1;
6712 inst
.operands
[i
].isvec
= 1;
6713 inst
.operands
[i
].issingle
= 1;
6714 inst
.operands
[i
].vectype
= optype
;
6715 inst
.operands
[i
].present
= 1;
6718 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6722 inst
.operands
[i
].reg
= val
;
6723 inst
.operands
[i
].isreg
= 1;
6724 inst
.operands
[i
].isvec
= 1;
6725 inst
.operands
[i
].issingle
= 1;
6726 inst
.operands
[i
].vectype
= optype
;
6727 inst
.operands
[i
].present
= 1;
6732 first_error (_("parse error"));
6736 /* Successfully parsed the operands. Update args. */
6742 first_error (_("expected comma"));
6746 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6750 /* Use this macro when the operand constraints are different
6751 for ARM and THUMB (e.g. ldrd). */
6752 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6753 ((arm_operand) | ((thumb_operand) << 16))
6755 /* Matcher codes for parse_operands. */
6756 enum operand_parse_code
6758 OP_stop
, /* end of line */
6760 OP_RR
, /* ARM register */
6761 OP_RRnpc
, /* ARM register, not r15 */
6762 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6763 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6764 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6765 optional trailing ! */
6766 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6767 OP_RCP
, /* Coprocessor number */
6768 OP_RCN
, /* Coprocessor register */
6769 OP_RF
, /* FPA register */
6770 OP_RVS
, /* VFP single precision register */
6771 OP_RVD
, /* VFP double precision register (0..15) */
6772 OP_RND
, /* Neon double precision register (0..31) */
6773 OP_RNDMQ
, /* Neon double precision (0..31) or MVE vector register. */
6774 OP_RNDMQR
, /* Neon double precision (0..31), MVE vector or ARM register.
6776 OP_RNQ
, /* Neon quad precision register */
6777 OP_RNQMQ
, /* Neon quad or MVE vector register. */
6778 OP_RVSD
, /* VFP single or double precision register */
6779 OP_RNSD
, /* Neon single or double precision register */
6780 OP_RNDQ
, /* Neon double or quad precision register */
6781 OP_RNDQMQ
, /* Neon double, quad or MVE vector register. */
6782 OP_RNSDQ
, /* Neon single, double or quad precision register */
6783 OP_RNSC
, /* Neon scalar D[X] */
6784 OP_RVC
, /* VFP control register */
6785 OP_RMF
, /* Maverick F register */
6786 OP_RMD
, /* Maverick D register */
6787 OP_RMFX
, /* Maverick FX register */
6788 OP_RMDX
, /* Maverick DX register */
6789 OP_RMAX
, /* Maverick AX register */
6790 OP_RMDS
, /* Maverick DSPSC register */
6791 OP_RIWR
, /* iWMMXt wR register */
6792 OP_RIWC
, /* iWMMXt wC register */
6793 OP_RIWG
, /* iWMMXt wCG register */
6794 OP_RXA
, /* XScale accumulator register */
6796 OP_RNSDQMQ
, /* Neon single, double or quad register or MVE vector register
6798 OP_RNSDQMQR
, /* Neon single, double or quad register, MVE vector register or
6800 OP_RMQ
, /* MVE vector register. */
6802 /* New operands for Armv8.1-M Mainline. */
6803 OP_LR
, /* ARM LR register */
6804 OP_RRe
, /* ARM register, only even numbered. */
6805 OP_RRo
, /* ARM register, only odd numbered, not r13 or r15. */
6806 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6808 OP_REGLST
, /* ARM register list */
6809 OP_CLRMLST
, /* CLRM register list */
6810 OP_VRSLST
, /* VFP single-precision register list */
6811 OP_VRDLST
, /* VFP double-precision register list */
6812 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6813 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6814 OP_NSTRLST
, /* Neon element/structure list */
6815 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
6816 OP_MSTRLST2
, /* MVE vector list with two elements. */
6817 OP_MSTRLST4
, /* MVE vector list with four elements. */
6819 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6820 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6821 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6822 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6823 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6824 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6825 OP_RNSDQ_RNSC_MQ
, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
6827 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6828 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6829 OP_VMOV
, /* Neon VMOV operands. */
6830 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6831 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6832 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6833 OP_VLDR
, /* VLDR operand. */
6835 OP_I0
, /* immediate zero */
6836 OP_I7
, /* immediate value 0 .. 7 */
6837 OP_I15
, /* 0 .. 15 */
6838 OP_I16
, /* 1 .. 16 */
6839 OP_I16z
, /* 0 .. 16 */
6840 OP_I31
, /* 0 .. 31 */
6841 OP_I31w
, /* 0 .. 31, optional trailing ! */
6842 OP_I32
, /* 1 .. 32 */
6843 OP_I32z
, /* 0 .. 32 */
6844 OP_I63
, /* 0 .. 63 */
6845 OP_I63s
, /* -64 .. 63 */
6846 OP_I64
, /* 1 .. 64 */
6847 OP_I64z
, /* 0 .. 64 */
6848 OP_I255
, /* 0 .. 255 */
6850 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6851 OP_I7b
, /* 0 .. 7 */
6852 OP_I15b
, /* 0 .. 15 */
6853 OP_I31b
, /* 0 .. 31 */
6855 OP_SH
, /* shifter operand */
6856 OP_SHG
, /* shifter operand with possible group relocation */
6857 OP_ADDR
, /* Memory address expression (any mode) */
6858 OP_ADDRMVE
, /* Memory address expression for MVE's VSTR/VLDR. */
6859 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6860 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6861 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6862 OP_EXP
, /* arbitrary expression */
6863 OP_EXPi
, /* same, with optional immediate prefix */
6864 OP_EXPr
, /* same, with optional relocation suffix */
6865 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6866 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6867 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6868 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6870 OP_CPSF
, /* CPS flags */
6871 OP_ENDI
, /* Endianness specifier */
6872 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6873 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6874 OP_COND
, /* conditional code */
6875 OP_TB
, /* Table branch. */
6877 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6879 OP_RRnpc_I0
, /* ARM register or literal 0 */
6880 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6881 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6882 OP_RF_IF
, /* FPA register or immediate */
6883 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6884 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6886 /* Optional operands. */
6887 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6888 OP_oI31b
, /* 0 .. 31 */
6889 OP_oI32b
, /* 1 .. 32 */
6890 OP_oI32z
, /* 0 .. 32 */
6891 OP_oIffffb
, /* 0 .. 65535 */
6892 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6894 OP_oRR
, /* ARM register */
6895 OP_oLR
, /* ARM LR register */
6896 OP_oRRnpc
, /* ARM register, not the PC */
6897 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6898 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6899 OP_oRND
, /* Optional Neon double precision register */
6900 OP_oRNQ
, /* Optional Neon quad precision register */
6901 OP_oRNDQMQ
, /* Optional Neon double, quad or MVE vector register. */
6902 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6903 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6904 OP_oRNSDQMQ
, /* Optional single, double or quad register or MVE vector
6906 OP_oSHll
, /* LSL immediate */
6907 OP_oSHar
, /* ASR immediate */
6908 OP_oSHllar
, /* LSL or ASR immediate */
6909 OP_oROR
, /* ROR 0/8/16/24 */
6910 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6912 /* Some pre-defined mixed (ARM/THUMB) operands. */
6913 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6914 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6915 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6917 OP_FIRST_OPTIONAL
= OP_oI7b
6920 /* Generic instruction operand parser. This does no encoding and no
6921 semantic validation; it merely squirrels values away in the inst
6922 structure. Returns SUCCESS or FAIL depending on whether the
6923 specified grammar matched. */
6925 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6927 unsigned const int *upat
= pattern
;
6928 char *backtrack_pos
= 0;
6929 const char *backtrack_error
= 0;
6930 int i
, val
= 0, backtrack_index
= 0;
6931 enum arm_reg_type rtype
;
6932 parse_operand_result result
;
6933 unsigned int op_parse_code
;
6934 bfd_boolean partial_match
;
6936 #define po_char_or_fail(chr) \
6939 if (skip_past_char (&str, chr) == FAIL) \
6944 #define po_reg_or_fail(regtype) \
6947 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6948 & inst.operands[i].vectype); \
6951 first_error (_(reg_expected_msgs[regtype])); \
6954 inst.operands[i].reg = val; \
6955 inst.operands[i].isreg = 1; \
6956 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6957 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6958 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6959 || rtype == REG_TYPE_VFD \
6960 || rtype == REG_TYPE_NQ); \
6964 #define po_reg_or_goto(regtype, label) \
6967 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6968 & inst.operands[i].vectype); \
6972 inst.operands[i].reg = val; \
6973 inst.operands[i].isreg = 1; \
6974 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6975 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6976 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6977 || rtype == REG_TYPE_VFD \
6978 || rtype == REG_TYPE_NQ); \
6982 #define po_imm_or_fail(min, max, popt) \
6985 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6987 inst.operands[i].imm = val; \
6991 #define po_scalar_or_goto(elsz, label) \
6994 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6997 inst.operands[i].reg = val; \
6998 inst.operands[i].isscalar = 1; \
7002 #define po_misc_or_fail(expr) \
7010 #define po_misc_or_fail_no_backtrack(expr) \
7014 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7015 backtrack_pos = 0; \
7016 if (result != PARSE_OPERAND_SUCCESS) \
7021 #define po_barrier_or_imm(str) \
7024 val = parse_barrier (&str); \
7025 if (val == FAIL && ! ISALPHA (*str)) \
7028 /* ISB can only take SY as an option. */ \
7029 || ((inst.instruction & 0xf0) == 0x60 \
7032 inst.error = _("invalid barrier type"); \
7033 backtrack_pos = 0; \
7039 skip_whitespace (str
);
7041 for (i
= 0; upat
[i
] != OP_stop
; i
++)
7043 op_parse_code
= upat
[i
];
7044 if (op_parse_code
>= 1<<16)
7045 op_parse_code
= thumb
? (op_parse_code
>> 16)
7046 : (op_parse_code
& ((1<<16)-1));
7048 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
7050 /* Remember where we are in case we need to backtrack. */
7051 gas_assert (!backtrack_pos
);
7052 backtrack_pos
= str
;
7053 backtrack_error
= inst
.error
;
7054 backtrack_index
= i
;
7057 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
7058 po_char_or_fail (',');
7060 switch (op_parse_code
)
7072 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
7073 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
7074 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
7075 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
7076 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
7077 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
7080 po_reg_or_goto (REG_TYPE_RN
, try_rndmq
);
7084 po_reg_or_goto (REG_TYPE_MQ
, try_rnd
);
7087 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
7089 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
7091 /* Also accept generic coprocessor regs for unknown registers. */
7093 po_reg_or_fail (REG_TYPE_CN
);
7095 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
7096 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
7097 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
7098 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
7099 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
7100 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
7101 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
7102 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
7103 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
7104 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
7107 po_reg_or_goto (REG_TYPE_MQ
, try_nq
);
7110 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
7111 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
7114 po_reg_or_goto (REG_TYPE_MQ
, try_rndq
);
7118 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
7119 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
7121 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
7123 po_reg_or_goto (REG_TYPE_RN
, try_mq
);
7128 po_reg_or_goto (REG_TYPE_MQ
, try_nsdq2
);
7131 po_reg_or_fail (REG_TYPE_NSDQ
);
7135 po_reg_or_fail (REG_TYPE_MQ
);
7137 /* Neon scalar. Using an element size of 8 means that some invalid
7138 scalars are accepted here, so deal with those in later code. */
7139 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
7143 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
7146 po_imm_or_fail (0, 0, TRUE
);
7151 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
7156 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
7159 if (parse_ifimm_zero (&str
))
7160 inst
.operands
[i
].imm
= 0;
7164 = _("only floating point zero is allowed as immediate value");
7172 po_scalar_or_goto (8, try_rr
);
7175 po_reg_or_fail (REG_TYPE_RN
);
7179 case OP_RNSDQ_RNSC_MQ
:
7180 po_reg_or_goto (REG_TYPE_MQ
, try_rnsdq_rnsc
);
7185 po_scalar_or_goto (8, try_nsdq
);
7188 po_reg_or_fail (REG_TYPE_NSDQ
);
7194 po_scalar_or_goto (8, try_s_scalar
);
7197 po_scalar_or_goto (4, try_nsd
);
7200 po_reg_or_fail (REG_TYPE_NSD
);
7206 po_scalar_or_goto (8, try_ndq
);
7209 po_reg_or_fail (REG_TYPE_NDQ
);
7215 po_scalar_or_goto (8, try_vfd
);
7218 po_reg_or_fail (REG_TYPE_VFD
);
7223 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7224 not careful then bad things might happen. */
7225 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7230 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7233 /* There's a possibility of getting a 64-bit immediate here, so
7234 we need special handling. */
7235 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
7238 inst
.error
= _("immediate value is out of range");
7246 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7249 po_imm_or_fail (0, 63, TRUE
);
7254 po_char_or_fail ('[');
7255 po_reg_or_fail (REG_TYPE_RN
);
7256 po_char_or_fail (']');
7262 po_reg_or_fail (REG_TYPE_RN
);
7263 if (skip_past_char (&str
, '!') == SUCCESS
)
7264 inst
.operands
[i
].writeback
= 1;
7268 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
7269 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
7270 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
7271 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
7272 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
7273 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
7274 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7275 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7276 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7277 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7278 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7279 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7281 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7283 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7284 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7286 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7287 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7288 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7289 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7291 /* Immediate variants */
7293 po_char_or_fail ('{');
7294 po_imm_or_fail (0, 255, TRUE
);
7295 po_char_or_fail ('}');
7299 /* The expression parser chokes on a trailing !, so we have
7300 to find it first and zap it. */
7303 while (*s
&& *s
!= ',')
7308 inst
.operands
[i
].writeback
= 1;
7310 po_imm_or_fail (0, 31, TRUE
);
7318 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7323 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7328 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7330 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7332 val
= parse_reloc (&str
);
7335 inst
.error
= _("unrecognized relocation suffix");
7338 else if (val
!= BFD_RELOC_UNUSED
)
7340 inst
.operands
[i
].imm
= val
;
7341 inst
.operands
[i
].hasreloc
= 1;
7347 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7349 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7351 inst
.operands
[i
].hasreloc
= 1;
7353 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7355 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7356 inst
.operands
[i
].hasreloc
= 0;
7360 /* Operand for MOVW or MOVT. */
7362 po_misc_or_fail (parse_half (&str
));
7365 /* Register or expression. */
7366 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7367 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7369 /* Register or immediate. */
7370 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7371 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7373 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7375 if (!is_immediate_prefix (*str
))
7378 val
= parse_fpa_immediate (&str
);
7381 /* FPA immediates are encoded as registers 8-15.
7382 parse_fpa_immediate has already applied the offset. */
7383 inst
.operands
[i
].reg
= val
;
7384 inst
.operands
[i
].isreg
= 1;
7387 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7388 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7390 /* Two kinds of register. */
7393 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7395 || (rege
->type
!= REG_TYPE_MMXWR
7396 && rege
->type
!= REG_TYPE_MMXWC
7397 && rege
->type
!= REG_TYPE_MMXWCG
))
7399 inst
.error
= _("iWMMXt data or control register expected");
7402 inst
.operands
[i
].reg
= rege
->number
;
7403 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7409 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7411 || (rege
->type
!= REG_TYPE_MMXWC
7412 && rege
->type
!= REG_TYPE_MMXWCG
))
7414 inst
.error
= _("iWMMXt control register expected");
7417 inst
.operands
[i
].reg
= rege
->number
;
7418 inst
.operands
[i
].isreg
= 1;
7423 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7424 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7425 case OP_oROR
: val
= parse_ror (&str
); break;
7426 case OP_COND
: val
= parse_cond (&str
); break;
7427 case OP_oBARRIER_I15
:
7428 po_barrier_or_imm (str
); break;
7430 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7436 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7437 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7439 inst
.error
= _("Banked registers are not available with this "
7445 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7449 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7452 val
= parse_sys_vldr_vstr (&str
);
7456 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7459 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7461 if (strncasecmp (str
, "APSR_", 5) == 0)
7468 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7469 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7470 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7471 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7472 default: found
= 16;
7476 inst
.operands
[i
].isvec
= 1;
7477 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7478 inst
.operands
[i
].reg
= REG_PC
;
7485 po_misc_or_fail (parse_tb (&str
));
7488 /* Register lists. */
7490 val
= parse_reg_list (&str
, REGLIST_RN
);
7493 inst
.operands
[i
].writeback
= 1;
7499 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7503 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7508 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7513 /* Allow Q registers too. */
7514 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7515 REGLIST_NEON_D
, &partial_match
);
7519 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7520 REGLIST_VFP_S
, &partial_match
);
7521 inst
.operands
[i
].issingle
= 1;
7526 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7527 REGLIST_VFP_D_VPR
, &partial_match
);
7528 if (val
== FAIL
&& !partial_match
)
7531 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7532 REGLIST_VFP_S_VPR
, &partial_match
);
7533 inst
.operands
[i
].issingle
= 1;
7538 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7539 REGLIST_NEON_D
, &partial_match
);
7544 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7545 1, &inst
.operands
[i
].vectype
);
7546 if (val
!= (((op_parse_code
== OP_MSTRLST2
) ? 3 : 7) << 5 | 0xe))
7550 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7551 0, &inst
.operands
[i
].vectype
);
7554 /* Addressing modes */
7556 po_misc_or_fail (parse_address_group_reloc (&str
, i
, GROUP_MVE
));
7560 po_misc_or_fail (parse_address (&str
, i
));
7564 po_misc_or_fail_no_backtrack (
7565 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7569 po_misc_or_fail_no_backtrack (
7570 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7574 po_misc_or_fail_no_backtrack (
7575 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7579 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7583 po_misc_or_fail_no_backtrack (
7584 parse_shifter_operand_group_reloc (&str
, i
));
7588 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7592 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7596 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7600 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7603 /* Various value-based sanity checks and shared operations. We
7604 do not signal immediate failures for the register constraints;
7605 this allows a syntax error to take precedence. */
7606 switch (op_parse_code
)
7614 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7615 inst
.error
= BAD_PC
;
7620 if (inst
.operands
[i
].isreg
)
7622 if (inst
.operands
[i
].reg
== REG_PC
)
7623 inst
.error
= BAD_PC
;
7624 else if (inst
.operands
[i
].reg
== REG_SP
7625 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7626 relaxed since ARMv8-A. */
7627 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7630 inst
.error
= BAD_SP
;
7636 if (inst
.operands
[i
].isreg
7637 && inst
.operands
[i
].reg
== REG_PC
7638 && (inst
.operands
[i
].writeback
|| thumb
))
7639 inst
.error
= BAD_PC
;
7643 if (inst
.operands
[i
].isreg
)
7652 case OP_oBARRIER_I15
:
7665 inst
.operands
[i
].imm
= val
;
7670 if (inst
.operands
[i
].reg
!= REG_LR
)
7671 inst
.error
= _("operand must be LR register");
7675 if (inst
.operands
[i
].isreg
7676 && (inst
.operands
[i
].reg
& 0x00000001) != 0)
7677 inst
.error
= BAD_ODD
;
7681 if (inst
.operands
[i
].isreg
)
7683 if ((inst
.operands
[i
].reg
& 0x00000001) != 1)
7684 inst
.error
= BAD_EVEN
;
7685 else if (inst
.operands
[i
].reg
== REG_SP
)
7686 as_tsktsk (MVE_BAD_SP
);
7687 else if (inst
.operands
[i
].reg
== REG_PC
)
7688 inst
.error
= BAD_PC
;
7696 /* If we get here, this operand was successfully parsed. */
7697 inst
.operands
[i
].present
= 1;
7701 inst
.error
= BAD_ARGS
;
7706 /* The parse routine should already have set inst.error, but set a
7707 default here just in case. */
7709 inst
.error
= BAD_SYNTAX
;
7713 /* Do not backtrack over a trailing optional argument that
7714 absorbed some text. We will only fail again, with the
7715 'garbage following instruction' error message, which is
7716 probably less helpful than the current one. */
7717 if (backtrack_index
== i
&& backtrack_pos
!= str
7718 && upat
[i
+1] == OP_stop
)
7721 inst
.error
= BAD_SYNTAX
;
7725 /* Try again, skipping the optional argument at backtrack_pos. */
7726 str
= backtrack_pos
;
7727 inst
.error
= backtrack_error
;
7728 inst
.operands
[backtrack_index
].present
= 0;
7729 i
= backtrack_index
;
7733 /* Check that we have parsed all the arguments. */
7734 if (*str
!= '\0' && !inst
.error
)
7735 inst
.error
= _("garbage following instruction");
7737 return inst
.error
? FAIL
: SUCCESS
;
7740 #undef po_char_or_fail
7741 #undef po_reg_or_fail
7742 #undef po_reg_or_goto
7743 #undef po_imm_or_fail
7744 #undef po_scalar_or_fail
7745 #undef po_barrier_or_imm
7747 /* Shorthand macro for instruction encoding functions issuing errors. */
7748 #define constraint(expr, err) \
7759 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7760 instructions are unpredictable if these registers are used. This
7761 is the BadReg predicate in ARM's Thumb-2 documentation.
7763 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7764 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7765 #define reject_bad_reg(reg) \
7767 if (reg == REG_PC) \
7769 inst.error = BAD_PC; \
7772 else if (reg == REG_SP \
7773 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7775 inst.error = BAD_SP; \
7780 /* If REG is R13 (the stack pointer), warn that its use is
7782 #define warn_deprecated_sp(reg) \
7784 if (warn_on_deprecated && reg == REG_SP) \
7785 as_tsktsk (_("use of r13 is deprecated")); \
7788 /* Functions for operand encoding. ARM, then Thumb. */
7790 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7792 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7794 The only binary encoding difference is the Coprocessor number. Coprocessor
7795 9 is used for half-precision calculations or conversions. The format of the
7796 instruction is the same as the equivalent Coprocessor 10 instruction that
7797 exists for Single-Precision operation. */
7800 do_scalar_fp16_v82_encode (void)
7802 if (inst
.cond
< COND_ALWAYS
)
7803 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7804 " the behaviour is UNPREDICTABLE"));
7805 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7808 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7809 mark_feature_used (&arm_ext_fp16
);
7812 /* If VAL can be encoded in the immediate field of an ARM instruction,
7813 return the encoded form. Otherwise, return FAIL. */
7816 encode_arm_immediate (unsigned int val
)
7823 for (i
= 2; i
< 32; i
+= 2)
7824 if ((a
= rotate_left (val
, i
)) <= 0xff)
7825 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7830 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7831 return the encoded form. Otherwise, return FAIL. */
7833 encode_thumb32_immediate (unsigned int val
)
7840 for (i
= 1; i
<= 24; i
++)
7843 if ((val
& ~(0xff << i
)) == 0)
7844 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7848 if (val
== ((a
<< 16) | a
))
7850 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7854 if (val
== ((a
<< 16) | a
))
7855 return 0x200 | (a
>> 8);
7859 /* Encode a VFP SP or DP register number into inst.instruction. */
7862 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7864 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7867 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7870 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7873 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7878 first_error (_("D register out of range for selected VFP version"));
7886 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7890 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7894 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7898 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7902 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7906 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7914 /* Encode a <shift> in an ARM-format instruction. The immediate,
7915 if any, is handled by md_apply_fix. */
7917 encode_arm_shift (int i
)
7919 /* register-shifted register. */
7920 if (inst
.operands
[i
].immisreg
)
7923 for (op_index
= 0; op_index
<= i
; ++op_index
)
7925 /* Check the operand only when it's presented. In pre-UAL syntax,
7926 if the destination register is the same as the first operand, two
7927 register form of the instruction can be used. */
7928 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7929 && inst
.operands
[op_index
].reg
== REG_PC
)
7930 as_warn (UNPRED_REG ("r15"));
7933 if (inst
.operands
[i
].imm
== REG_PC
)
7934 as_warn (UNPRED_REG ("r15"));
7937 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7938 inst
.instruction
|= SHIFT_ROR
<< 5;
7941 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7942 if (inst
.operands
[i
].immisreg
)
7944 inst
.instruction
|= SHIFT_BY_REG
;
7945 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7948 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7953 encode_arm_shifter_operand (int i
)
7955 if (inst
.operands
[i
].isreg
)
7957 inst
.instruction
|= inst
.operands
[i
].reg
;
7958 encode_arm_shift (i
);
7962 inst
.instruction
|= INST_IMMEDIATE
;
7963 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7964 inst
.instruction
|= inst
.operands
[i
].imm
;
7968 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7970 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7973 Generate an error if the operand is not a register. */
7974 constraint (!inst
.operands
[i
].isreg
,
7975 _("Instruction does not support =N addresses"));
7977 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7979 if (inst
.operands
[i
].preind
)
7983 inst
.error
= _("instruction does not accept preindexed addressing");
7986 inst
.instruction
|= PRE_INDEX
;
7987 if (inst
.operands
[i
].writeback
)
7988 inst
.instruction
|= WRITE_BACK
;
7991 else if (inst
.operands
[i
].postind
)
7993 gas_assert (inst
.operands
[i
].writeback
);
7995 inst
.instruction
|= WRITE_BACK
;
7997 else /* unindexed - only for coprocessor */
7999 inst
.error
= _("instruction does not accept unindexed addressing");
8003 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
8004 && (((inst
.instruction
& 0x000f0000) >> 16)
8005 == ((inst
.instruction
& 0x0000f000) >> 12)))
8006 as_warn ((inst
.instruction
& LOAD_BIT
)
8007 ? _("destination register same as write-back base")
8008 : _("source register same as write-back base"));
8011 /* inst.operands[i] was set up by parse_address. Encode it into an
8012 ARM-format mode 2 load or store instruction. If is_t is true,
8013 reject forms that cannot be used with a T instruction (i.e. not
8016 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
8018 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8020 encode_arm_addr_mode_common (i
, is_t
);
8022 if (inst
.operands
[i
].immisreg
)
8024 constraint ((inst
.operands
[i
].imm
== REG_PC
8025 || (is_pc
&& inst
.operands
[i
].writeback
)),
8027 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
8028 inst
.instruction
|= inst
.operands
[i
].imm
;
8029 if (!inst
.operands
[i
].negative
)
8030 inst
.instruction
|= INDEX_UP
;
8031 if (inst
.operands
[i
].shifted
)
8033 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
8034 inst
.instruction
|= SHIFT_ROR
<< 5;
8037 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
8038 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
8042 else /* immediate offset in inst.relocs[0] */
8044 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
8046 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
8048 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8049 cannot use PC in addressing.
8050 PC cannot be used in writeback addressing, either. */
8051 constraint ((is_t
|| inst
.operands
[i
].writeback
),
8054 /* Use of PC in str is deprecated for ARMv7. */
8055 if (warn_on_deprecated
8057 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
8058 as_tsktsk (_("use of PC in this instruction is deprecated"));
8061 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8063 /* Prefer + for zero encoded value. */
8064 if (!inst
.operands
[i
].negative
)
8065 inst
.instruction
|= INDEX_UP
;
8066 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
8071 /* inst.operands[i] was set up by parse_address. Encode it into an
8072 ARM-format mode 3 load or store instruction. Reject forms that
8073 cannot be used with such instructions. If is_t is true, reject
8074 forms that cannot be used with a T instruction (i.e. not
8077 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
8079 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
8081 inst
.error
= _("instruction does not accept scaled register index");
8085 encode_arm_addr_mode_common (i
, is_t
);
8087 if (inst
.operands
[i
].immisreg
)
8089 constraint ((inst
.operands
[i
].imm
== REG_PC
8090 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
8092 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
8094 inst
.instruction
|= inst
.operands
[i
].imm
;
8095 if (!inst
.operands
[i
].negative
)
8096 inst
.instruction
|= INDEX_UP
;
8098 else /* immediate offset in inst.relocs[0] */
8100 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
8101 && inst
.operands
[i
].writeback
),
8103 inst
.instruction
|= HWOFFSET_IMM
;
8104 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8106 /* Prefer + for zero encoded value. */
8107 if (!inst
.operands
[i
].negative
)
8108 inst
.instruction
|= INDEX_UP
;
8110 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
8115 /* Write immediate bits [7:0] to the following locations:
8117 |28/24|23 19|18 16|15 4|3 0|
8118 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8120 This function is used by VMOV/VMVN/VORR/VBIC. */
8123 neon_write_immbits (unsigned immbits
)
8125 inst
.instruction
|= immbits
& 0xf;
8126 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
8127 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
8130 /* Invert low-order SIZE bits of XHI:XLO. */
8133 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
8135 unsigned immlo
= xlo
? *xlo
: 0;
8136 unsigned immhi
= xhi
? *xhi
: 0;
8141 immlo
= (~immlo
) & 0xff;
8145 immlo
= (~immlo
) & 0xffff;
8149 immhi
= (~immhi
) & 0xffffffff;
8153 immlo
= (~immlo
) & 0xffffffff;
8167 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8171 neon_bits_same_in_bytes (unsigned imm
)
8173 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
8174 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
8175 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
8176 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
8179 /* For immediate of above form, return 0bABCD. */
8182 neon_squash_bits (unsigned imm
)
8184 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
8185 | ((imm
& 0x01000000) >> 21);
8188 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8191 neon_qfloat_bits (unsigned imm
)
8193 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
8196 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8197 the instruction. *OP is passed as the initial value of the op field, and
8198 may be set to a different value depending on the constant (i.e.
8199 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8200 MVN). If the immediate looks like a repeated pattern then also
8201 try smaller element sizes. */
8204 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
8205 unsigned *immbits
, int *op
, int size
,
8206 enum neon_el_type type
)
8208 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8210 if (type
== NT_float
&& !float_p
)
8213 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
8215 if (size
!= 32 || *op
== 1)
8217 *immbits
= neon_qfloat_bits (immlo
);
8223 if (neon_bits_same_in_bytes (immhi
)
8224 && neon_bits_same_in_bytes (immlo
))
8228 *immbits
= (neon_squash_bits (immhi
) << 4)
8229 | neon_squash_bits (immlo
);
8240 if (immlo
== (immlo
& 0x000000ff))
8245 else if (immlo
== (immlo
& 0x0000ff00))
8247 *immbits
= immlo
>> 8;
8250 else if (immlo
== (immlo
& 0x00ff0000))
8252 *immbits
= immlo
>> 16;
8255 else if (immlo
== (immlo
& 0xff000000))
8257 *immbits
= immlo
>> 24;
8260 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8262 *immbits
= (immlo
>> 8) & 0xff;
8265 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8267 *immbits
= (immlo
>> 16) & 0xff;
8271 if ((immlo
& 0xffff) != (immlo
>> 16))
8278 if (immlo
== (immlo
& 0x000000ff))
8283 else if (immlo
== (immlo
& 0x0000ff00))
8285 *immbits
= immlo
>> 8;
8289 if ((immlo
& 0xff) != (immlo
>> 8))
8294 if (immlo
== (immlo
& 0x000000ff))
8296 /* Don't allow MVN with 8-bit immediate. */
8306 #if defined BFD_HOST_64_BIT
8307 /* Returns TRUE if double precision value V may be cast
8308 to single precision without loss of accuracy. */
8311 is_double_a_single (bfd_int64_t v
)
8313 int exp
= (int)((v
>> 52) & 0x7FF);
8314 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8316 return (exp
== 0 || exp
== 0x7FF
8317 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8318 && (mantissa
& 0x1FFFFFFFl
) == 0;
8321 /* Returns a double precision value casted to single precision
8322 (ignoring the least significant bits in exponent and mantissa). */
8325 double_to_single (bfd_int64_t v
)
8327 int sign
= (int) ((v
>> 63) & 1l);
8328 int exp
= (int) ((v
>> 52) & 0x7FF);
8329 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8335 exp
= exp
- 1023 + 127;
8344 /* No denormalized numbers. */
8350 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8352 #endif /* BFD_HOST_64_BIT */
8361 static void do_vfp_nsyn_opcode (const char *);
8363 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8364 Determine whether it can be performed with a move instruction; if
8365 it can, convert inst.instruction to that move instruction and
8366 return TRUE; if it can't, convert inst.instruction to a literal-pool
8367 load and return FALSE. If this is not a valid thing to do in the
8368 current context, set inst.error and return TRUE.
8370 inst.operands[i] describes the destination register. */
8373 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8376 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8377 bfd_boolean arm_p
= (t
== CONST_ARM
);
8380 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8384 if ((inst
.instruction
& tbit
) == 0)
8386 inst
.error
= _("invalid pseudo operation");
8390 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8391 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8392 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8394 inst
.error
= _("constant expression expected");
8398 if (inst
.relocs
[0].exp
.X_op
== O_constant
8399 || inst
.relocs
[0].exp
.X_op
== O_big
)
8401 #if defined BFD_HOST_64_BIT
8406 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8408 LITTLENUM_TYPE w
[X_PRECISION
];
8411 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8413 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8415 /* FIXME: Should we check words w[2..5] ? */
8420 #if defined BFD_HOST_64_BIT
8422 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8423 << LITTLENUM_NUMBER_OF_BITS
)
8424 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8425 << LITTLENUM_NUMBER_OF_BITS
)
8426 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8427 << LITTLENUM_NUMBER_OF_BITS
)
8428 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8430 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8431 | (l
[0] & LITTLENUM_MASK
);
8435 v
= inst
.relocs
[0].exp
.X_add_number
;
8437 if (!inst
.operands
[i
].issingle
)
8441 /* LDR should not use lead in a flag-setting instruction being
8442 chosen so we do not check whether movs can be used. */
8444 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8445 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8446 && inst
.operands
[i
].reg
!= 13
8447 && inst
.operands
[i
].reg
!= 15)
8449 /* Check if on thumb2 it can be done with a mov.w, mvn or
8450 movw instruction. */
8451 unsigned int newimm
;
8452 bfd_boolean isNegated
;
8454 newimm
= encode_thumb32_immediate (v
);
8455 if (newimm
!= (unsigned int) FAIL
)
8459 newimm
= encode_thumb32_immediate (~v
);
8460 if (newimm
!= (unsigned int) FAIL
)
8464 /* The number can be loaded with a mov.w or mvn
8466 if (newimm
!= (unsigned int) FAIL
8467 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8469 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8470 | (inst
.operands
[i
].reg
<< 8));
8471 /* Change to MOVN. */
8472 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8473 inst
.instruction
|= (newimm
& 0x800) << 15;
8474 inst
.instruction
|= (newimm
& 0x700) << 4;
8475 inst
.instruction
|= (newimm
& 0x0ff);
8478 /* The number can be loaded with a movw instruction. */
8479 else if ((v
& ~0xFFFF) == 0
8480 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8482 int imm
= v
& 0xFFFF;
8484 inst
.instruction
= 0xf2400000; /* MOVW. */
8485 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8486 inst
.instruction
|= (imm
& 0xf000) << 4;
8487 inst
.instruction
|= (imm
& 0x0800) << 15;
8488 inst
.instruction
|= (imm
& 0x0700) << 4;
8489 inst
.instruction
|= (imm
& 0x00ff);
8496 int value
= encode_arm_immediate (v
);
8500 /* This can be done with a mov instruction. */
8501 inst
.instruction
&= LITERAL_MASK
;
8502 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8503 inst
.instruction
|= value
& 0xfff;
8507 value
= encode_arm_immediate (~ v
);
8510 /* This can be done with a mvn instruction. */
8511 inst
.instruction
&= LITERAL_MASK
;
8512 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8513 inst
.instruction
|= value
& 0xfff;
8517 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8520 unsigned immbits
= 0;
8521 unsigned immlo
= inst
.operands
[1].imm
;
8522 unsigned immhi
= inst
.operands
[1].regisimm
8523 ? inst
.operands
[1].reg
8524 : inst
.relocs
[0].exp
.X_unsigned
8526 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8527 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8528 &op
, 64, NT_invtype
);
8532 neon_invert_size (&immlo
, &immhi
, 64);
8534 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8535 &op
, 64, NT_invtype
);
8540 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8546 /* Fill other bits in vmov encoding for both thumb and arm. */
8548 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8550 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8551 neon_write_immbits (immbits
);
8559 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8560 if (inst
.operands
[i
].issingle
8561 && is_quarter_float (inst
.operands
[1].imm
)
8562 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8564 inst
.operands
[1].imm
=
8565 neon_qfloat_bits (v
);
8566 do_vfp_nsyn_opcode ("fconsts");
8570 /* If our host does not support a 64-bit type then we cannot perform
8571 the following optimization. This mean that there will be a
8572 discrepancy between the output produced by an assembler built for
8573 a 32-bit-only host and the output produced from a 64-bit host, but
8574 this cannot be helped. */
8575 #if defined BFD_HOST_64_BIT
8576 else if (!inst
.operands
[1].issingle
8577 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8579 if (is_double_a_single (v
)
8580 && is_quarter_float (double_to_single (v
)))
8582 inst
.operands
[1].imm
=
8583 neon_qfloat_bits (double_to_single (v
));
8584 do_vfp_nsyn_opcode ("fconstd");
8592 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8593 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8596 inst
.operands
[1].reg
= REG_PC
;
8597 inst
.operands
[1].isreg
= 1;
8598 inst
.operands
[1].preind
= 1;
8599 inst
.relocs
[0].pc_rel
= 1;
8600 inst
.relocs
[0].type
= (thumb_p
8601 ? BFD_RELOC_ARM_THUMB_OFFSET
8603 ? BFD_RELOC_ARM_HWLITERAL
8604 : BFD_RELOC_ARM_LITERAL
));
8608 /* inst.operands[i] was set up by parse_address. Encode it into an
8609 ARM-format instruction. Reject all forms which cannot be encoded
8610 into a coprocessor load/store instruction. If wb_ok is false,
8611 reject use of writeback; if unind_ok is false, reject use of
8612 unindexed addressing. If reloc_override is not 0, use it instead
8613 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8614 (in which case it is preserved). */
8617 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8619 if (!inst
.operands
[i
].isreg
)
8622 if (! inst
.operands
[0].isvec
)
8624 inst
.error
= _("invalid co-processor operand");
8627 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8631 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8633 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8635 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8637 gas_assert (!inst
.operands
[i
].writeback
);
8640 inst
.error
= _("instruction does not support unindexed addressing");
8643 inst
.instruction
|= inst
.operands
[i
].imm
;
8644 inst
.instruction
|= INDEX_UP
;
8648 if (inst
.operands
[i
].preind
)
8649 inst
.instruction
|= PRE_INDEX
;
8651 if (inst
.operands
[i
].writeback
)
8653 if (inst
.operands
[i
].reg
== REG_PC
)
8655 inst
.error
= _("pc may not be used with write-back");
8660 inst
.error
= _("instruction does not support writeback");
8663 inst
.instruction
|= WRITE_BACK
;
8667 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8668 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8669 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8670 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8673 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8675 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8678 /* Prefer + for zero encoded value. */
8679 if (!inst
.operands
[i
].negative
)
8680 inst
.instruction
|= INDEX_UP
;
8685 /* Functions for instruction encoding, sorted by sub-architecture.
8686 First some generics; their names are taken from the conventional
8687 bit positions for register arguments in ARM format instructions. */
8697 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8703 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8709 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8710 inst
.instruction
|= inst
.operands
[1].reg
;
8716 inst
.instruction
|= inst
.operands
[0].reg
;
8717 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8723 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8724 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8730 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8731 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8737 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8738 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8742 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8744 if (ARM_CPU_IS_ANY (cpu_variant
))
8746 as_tsktsk ("%s", msg
);
8749 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8761 unsigned Rn
= inst
.operands
[2].reg
;
8762 /* Enforce restrictions on SWP instruction. */
8763 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8765 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8766 _("Rn must not overlap other operands"));
8768 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8770 if (!check_obsolete (&arm_ext_v8
,
8771 _("swp{b} use is obsoleted for ARMv8 and later"))
8772 && warn_on_deprecated
8773 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8774 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8777 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8778 inst
.instruction
|= inst
.operands
[1].reg
;
8779 inst
.instruction
|= Rn
<< 16;
8785 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8786 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8787 inst
.instruction
|= inst
.operands
[2].reg
;
8793 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8794 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8795 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8796 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8798 inst
.instruction
|= inst
.operands
[0].reg
;
8799 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8800 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8806 inst
.instruction
|= inst
.operands
[0].imm
;
8812 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8813 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8816 /* ARM instructions, in alphabetical order by function name (except
8817 that wrapper functions appear immediately after the function they
8820 /* This is a pseudo-op of the form "adr rd, label" to be converted
8821 into a relative address of the form "add rd, pc, #label-.-8". */
8826 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8828 /* Frag hacking will turn this into a sub instruction if the offset turns
8829 out to be negative. */
8830 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8831 inst
.relocs
[0].pc_rel
= 1;
8832 inst
.relocs
[0].exp
.X_add_number
-= 8;
8834 if (support_interwork
8835 && inst
.relocs
[0].exp
.X_op
== O_symbol
8836 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8837 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8838 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8839 inst
.relocs
[0].exp
.X_add_number
|= 1;
8842 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8843 into a relative address of the form:
8844 add rd, pc, #low(label-.-8)"
8845 add rd, rd, #high(label-.-8)" */
8850 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8852 /* Frag hacking will turn this into a sub instruction if the offset turns
8853 out to be negative. */
8854 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8855 inst
.relocs
[0].pc_rel
= 1;
8856 inst
.size
= INSN_SIZE
* 2;
8857 inst
.relocs
[0].exp
.X_add_number
-= 8;
8859 if (support_interwork
8860 && inst
.relocs
[0].exp
.X_op
== O_symbol
8861 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8862 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8863 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8864 inst
.relocs
[0].exp
.X_add_number
|= 1;
8870 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8871 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8873 if (!inst
.operands
[1].present
)
8874 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8875 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8876 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8877 encode_arm_shifter_operand (2);
8883 if (inst
.operands
[0].present
)
8884 inst
.instruction
|= inst
.operands
[0].imm
;
8886 inst
.instruction
|= 0xf;
8892 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8893 constraint (msb
> 32, _("bit-field extends past end of register"));
8894 /* The instruction encoding stores the LSB and MSB,
8895 not the LSB and width. */
8896 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8897 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8898 inst
.instruction
|= (msb
- 1) << 16;
8906 /* #0 in second position is alternative syntax for bfc, which is
8907 the same instruction but with REG_PC in the Rm field. */
8908 if (!inst
.operands
[1].isreg
)
8909 inst
.operands
[1].reg
= REG_PC
;
8911 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8912 constraint (msb
> 32, _("bit-field extends past end of register"));
8913 /* The instruction encoding stores the LSB and MSB,
8914 not the LSB and width. */
8915 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8916 inst
.instruction
|= inst
.operands
[1].reg
;
8917 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8918 inst
.instruction
|= (msb
- 1) << 16;
8924 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8925 _("bit-field extends past end of register"));
8926 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8927 inst
.instruction
|= inst
.operands
[1].reg
;
8928 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8929 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8932 /* ARM V5 breakpoint instruction (argument parse)
8933 BKPT <16 bit unsigned immediate>
8934 Instruction is not conditional.
8935 The bit pattern given in insns[] has the COND_ALWAYS condition,
8936 and it is an error if the caller tried to override that. */
8941 /* Top 12 of 16 bits to bits 19:8. */
8942 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8944 /* Bottom 4 of 16 bits to bits 3:0. */
8945 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8949 encode_branch (int default_reloc
)
8951 if (inst
.operands
[0].hasreloc
)
8953 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8954 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8955 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8956 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8957 ? BFD_RELOC_ARM_PLT32
8958 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8961 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8962 inst
.relocs
[0].pc_rel
= 1;
8969 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8970 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8973 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8980 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8982 if (inst
.cond
== COND_ALWAYS
)
8983 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8985 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8989 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8992 /* ARM V5 branch-link-exchange instruction (argument parse)
8993 BLX <target_addr> ie BLX(1)
8994 BLX{<condition>} <Rm> ie BLX(2)
8995 Unfortunately, there are two different opcodes for this mnemonic.
8996 So, the insns[].value is not used, and the code here zaps values
8997 into inst.instruction.
8998 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
9003 if (inst
.operands
[0].isreg
)
9005 /* Arg is a register; the opcode provided by insns[] is correct.
9006 It is not illegal to do "blx pc", just useless. */
9007 if (inst
.operands
[0].reg
== REG_PC
)
9008 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9010 inst
.instruction
|= inst
.operands
[0].reg
;
9014 /* Arg is an address; this instruction cannot be executed
9015 conditionally, and the opcode must be adjusted.
9016 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9017 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9018 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9019 inst
.instruction
= 0xfa000000;
9020 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
9027 bfd_boolean want_reloc
;
9029 if (inst
.operands
[0].reg
== REG_PC
)
9030 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9032 inst
.instruction
|= inst
.operands
[0].reg
;
9033 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9034 it is for ARMv4t or earlier. */
9035 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
9036 if (!ARM_FEATURE_ZERO (selected_object_arch
)
9037 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
9041 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
9046 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
9050 /* ARM v5TEJ. Jump to Jazelle code. */
9055 if (inst
.operands
[0].reg
== REG_PC
)
9056 as_tsktsk (_("use of r15 in bxj is not really useful"));
9058 inst
.instruction
|= inst
.operands
[0].reg
;
9061 /* Co-processor data operation:
9062 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9063 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9067 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9068 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
9069 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9070 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9071 inst
.instruction
|= inst
.operands
[4].reg
;
9072 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9078 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9079 encode_arm_shifter_operand (1);
9082 /* Transfer between coprocessor and ARM registers.
9083 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9088 No special properties. */
9090 struct deprecated_coproc_regs_s
9097 arm_feature_set deprecated
;
9098 arm_feature_set obsoleted
;
9099 const char *dep_msg
;
9100 const char *obs_msg
;
9103 #define DEPR_ACCESS_V8 \
9104 N_("This coprocessor register access is deprecated in ARMv8")
9106 /* Table of all deprecated coprocessor registers. */
9107 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
9109 {15, 0, 7, 10, 5, /* CP15DMB. */
9110 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9111 DEPR_ACCESS_V8
, NULL
},
9112 {15, 0, 7, 10, 4, /* CP15DSB. */
9113 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9114 DEPR_ACCESS_V8
, NULL
},
9115 {15, 0, 7, 5, 4, /* CP15ISB. */
9116 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9117 DEPR_ACCESS_V8
, NULL
},
9118 {14, 6, 1, 0, 0, /* TEEHBR. */
9119 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9120 DEPR_ACCESS_V8
, NULL
},
9121 {14, 6, 0, 0, 0, /* TEECR. */
9122 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9123 DEPR_ACCESS_V8
, NULL
},
9126 #undef DEPR_ACCESS_V8
9128 static const size_t deprecated_coproc_reg_count
=
9129 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
9137 Rd
= inst
.operands
[2].reg
;
9140 if (inst
.instruction
== 0xee000010
9141 || inst
.instruction
== 0xfe000010)
9143 reject_bad_reg (Rd
);
9144 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9146 constraint (Rd
== REG_SP
, BAD_SP
);
9151 if (inst
.instruction
== 0xe000010)
9152 constraint (Rd
== REG_PC
, BAD_PC
);
9155 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
9157 const struct deprecated_coproc_regs_s
*r
=
9158 deprecated_coproc_regs
+ i
;
9160 if (inst
.operands
[0].reg
== r
->cp
9161 && inst
.operands
[1].imm
== r
->opc1
9162 && inst
.operands
[3].reg
== r
->crn
9163 && inst
.operands
[4].reg
== r
->crm
9164 && inst
.operands
[5].imm
== r
->opc2
)
9166 if (! ARM_CPU_IS_ANY (cpu_variant
)
9167 && warn_on_deprecated
9168 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
9169 as_tsktsk ("%s", r
->dep_msg
);
9173 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9174 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
9175 inst
.instruction
|= Rd
<< 12;
9176 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9177 inst
.instruction
|= inst
.operands
[4].reg
;
9178 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9181 /* Transfer between coprocessor register and pair of ARM registers.
9182 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9187 Two XScale instructions are special cases of these:
9189 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9190 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9192 Result unpredictable if Rd or Rn is R15. */
9199 Rd
= inst
.operands
[2].reg
;
9200 Rn
= inst
.operands
[3].reg
;
9204 reject_bad_reg (Rd
);
9205 reject_bad_reg (Rn
);
9209 constraint (Rd
== REG_PC
, BAD_PC
);
9210 constraint (Rn
== REG_PC
, BAD_PC
);
9213 /* Only check the MRRC{2} variants. */
9214 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
9216 /* If Rd == Rn, error that the operation is
9217 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9218 constraint (Rd
== Rn
, BAD_OVERLAP
);
9221 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9222 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9223 inst
.instruction
|= Rd
<< 12;
9224 inst
.instruction
|= Rn
<< 16;
9225 inst
.instruction
|= inst
.operands
[4].reg
;
9231 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9232 if (inst
.operands
[1].present
)
9234 inst
.instruction
|= CPSI_MMOD
;
9235 inst
.instruction
|= inst
.operands
[1].imm
;
9242 inst
.instruction
|= inst
.operands
[0].imm
;
9248 unsigned Rd
, Rn
, Rm
;
9250 Rd
= inst
.operands
[0].reg
;
9251 Rn
= (inst
.operands
[1].present
9252 ? inst
.operands
[1].reg
: Rd
);
9253 Rm
= inst
.operands
[2].reg
;
9255 constraint ((Rd
== REG_PC
), BAD_PC
);
9256 constraint ((Rn
== REG_PC
), BAD_PC
);
9257 constraint ((Rm
== REG_PC
), BAD_PC
);
9259 inst
.instruction
|= Rd
<< 16;
9260 inst
.instruction
|= Rn
<< 0;
9261 inst
.instruction
|= Rm
<< 8;
9267 /* There is no IT instruction in ARM mode. We
9268 process it to do the validation as if in
9269 thumb mode, just in case the code gets
9270 assembled for thumb using the unified syntax. */
9275 set_pred_insn_type (IT_INSN
);
9276 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
9277 now_pred
.cc
= inst
.operands
[0].imm
;
9281 /* If there is only one register in the register list,
9282 then return its register number. Otherwise return -1. */
9284 only_one_reg_in_list (int range
)
9286 int i
= ffs (range
) - 1;
9287 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9291 encode_ldmstm(int from_push_pop_mnem
)
9293 int base_reg
= inst
.operands
[0].reg
;
9294 int range
= inst
.operands
[1].imm
;
9297 inst
.instruction
|= base_reg
<< 16;
9298 inst
.instruction
|= range
;
9300 if (inst
.operands
[1].writeback
)
9301 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9303 if (inst
.operands
[0].writeback
)
9305 inst
.instruction
|= WRITE_BACK
;
9306 /* Check for unpredictable uses of writeback. */
9307 if (inst
.instruction
& LOAD_BIT
)
9309 /* Not allowed in LDM type 2. */
9310 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9311 && ((range
& (1 << REG_PC
)) == 0))
9312 as_warn (_("writeback of base register is UNPREDICTABLE"));
9313 /* Only allowed if base reg not in list for other types. */
9314 else if (range
& (1 << base_reg
))
9315 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9319 /* Not allowed for type 2. */
9320 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9321 as_warn (_("writeback of base register is UNPREDICTABLE"));
9322 /* Only allowed if base reg not in list, or first in list. */
9323 else if ((range
& (1 << base_reg
))
9324 && (range
& ((1 << base_reg
) - 1)))
9325 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9329 /* If PUSH/POP has only one register, then use the A2 encoding. */
9330 one_reg
= only_one_reg_in_list (range
);
9331 if (from_push_pop_mnem
&& one_reg
>= 0)
9333 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9335 if (is_push
&& one_reg
== 13 /* SP */)
9336 /* PR 22483: The A2 encoding cannot be used when
9337 pushing the stack pointer as this is UNPREDICTABLE. */
9340 inst
.instruction
&= A_COND_MASK
;
9341 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9342 inst
.instruction
|= one_reg
<< 12;
9349 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9352 /* ARMv5TE load-consecutive (argument parse)
9361 constraint (inst
.operands
[0].reg
% 2 != 0,
9362 _("first transfer register must be even"));
9363 constraint (inst
.operands
[1].present
9364 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9365 _("can only transfer two consecutive registers"));
9366 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9367 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9369 if (!inst
.operands
[1].present
)
9370 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9372 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9373 register and the first register written; we have to diagnose
9374 overlap between the base and the second register written here. */
9376 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9377 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9378 as_warn (_("base register written back, and overlaps "
9379 "second transfer register"));
9381 if (!(inst
.instruction
& V4_STR_BIT
))
9383 /* For an index-register load, the index register must not overlap the
9384 destination (even if not write-back). */
9385 if (inst
.operands
[2].immisreg
9386 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9387 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9388 as_warn (_("index register overlaps transfer register"));
9390 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9391 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9397 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9398 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9399 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9400 || inst
.operands
[1].negative
9401 /* This can arise if the programmer has written
9403 or if they have mistakenly used a register name as the last
9406 It is very difficult to distinguish between these two cases
9407 because "rX" might actually be a label. ie the register
9408 name has been occluded by a symbol of the same name. So we
9409 just generate a general 'bad addressing mode' type error
9410 message and leave it up to the programmer to discover the
9411 true cause and fix their mistake. */
9412 || (inst
.operands
[1].reg
== REG_PC
),
9415 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9416 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9417 _("offset must be zero in ARM encoding"));
9419 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9421 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9422 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9423 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9429 constraint (inst
.operands
[0].reg
% 2 != 0,
9430 _("even register required"));
9431 constraint (inst
.operands
[1].present
9432 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9433 _("can only load two consecutive registers"));
9434 /* If op 1 were present and equal to PC, this function wouldn't
9435 have been called in the first place. */
9436 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9438 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9439 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9442 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9443 which is not a multiple of four is UNPREDICTABLE. */
9445 check_ldr_r15_aligned (void)
9447 constraint (!(inst
.operands
[1].immisreg
)
9448 && (inst
.operands
[0].reg
== REG_PC
9449 && inst
.operands
[1].reg
== REG_PC
9450 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9451 _("ldr to register 15 must be 4-byte aligned"));
9457 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9458 if (!inst
.operands
[1].isreg
)
9459 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9461 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9462 check_ldr_r15_aligned ();
9468 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9470 if (inst
.operands
[1].preind
)
9472 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9473 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9474 _("this instruction requires a post-indexed address"));
9476 inst
.operands
[1].preind
= 0;
9477 inst
.operands
[1].postind
= 1;
9478 inst
.operands
[1].writeback
= 1;
9480 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9481 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9484 /* Halfword and signed-byte load/store operations. */
9489 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9490 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9491 if (!inst
.operands
[1].isreg
)
9492 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9494 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9500 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9502 if (inst
.operands
[1].preind
)
9504 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9505 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9506 _("this instruction requires a post-indexed address"));
9508 inst
.operands
[1].preind
= 0;
9509 inst
.operands
[1].postind
= 1;
9510 inst
.operands
[1].writeback
= 1;
9512 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9513 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9516 /* Co-processor register load/store.
9517 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9521 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9522 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9523 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9529 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9530 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9531 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9532 && !(inst
.instruction
& 0x00400000))
9533 as_tsktsk (_("Rd and Rm should be different in mla"));
9535 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9536 inst
.instruction
|= inst
.operands
[1].reg
;
9537 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9538 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9544 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9545 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9547 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9548 encode_arm_shifter_operand (1);
9551 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9558 top
= (inst
.instruction
& 0x00400000) != 0;
9559 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9560 _(":lower16: not allowed in this instruction"));
9561 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9562 _(":upper16: not allowed in this instruction"));
9563 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9564 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9566 imm
= inst
.relocs
[0].exp
.X_add_number
;
9567 /* The value is in two pieces: 0:11, 16:19. */
9568 inst
.instruction
|= (imm
& 0x00000fff);
9569 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9574 do_vfp_nsyn_mrs (void)
9576 if (inst
.operands
[0].isvec
)
9578 if (inst
.operands
[1].reg
!= 1)
9579 first_error (_("operand 1 must be FPSCR"));
9580 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9581 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9582 do_vfp_nsyn_opcode ("fmstat");
9584 else if (inst
.operands
[1].isvec
)
9585 do_vfp_nsyn_opcode ("fmrx");
9593 do_vfp_nsyn_msr (void)
9595 if (inst
.operands
[0].isvec
)
9596 do_vfp_nsyn_opcode ("fmxr");
9606 unsigned Rt
= inst
.operands
[0].reg
;
9608 if (thumb_mode
&& Rt
== REG_SP
)
9610 inst
.error
= BAD_SP
;
9614 /* MVFR2 is only valid at ARMv8-A. */
9615 if (inst
.operands
[1].reg
== 5)
9616 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9619 /* APSR_ sets isvec. All other refs to PC are illegal. */
9620 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9622 inst
.error
= BAD_PC
;
9626 /* If we get through parsing the register name, we just insert the number
9627 generated into the instruction without further validation. */
9628 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9629 inst
.instruction
|= (Rt
<< 12);
9635 unsigned Rt
= inst
.operands
[1].reg
;
9638 reject_bad_reg (Rt
);
9639 else if (Rt
== REG_PC
)
9641 inst
.error
= BAD_PC
;
9645 /* MVFR2 is only valid for ARMv8-A. */
9646 if (inst
.operands
[0].reg
== 5)
9647 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9650 /* If we get through parsing the register name, we just insert the number
9651 generated into the instruction without further validation. */
9652 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9653 inst
.instruction
|= (Rt
<< 12);
9661 if (do_vfp_nsyn_mrs () == SUCCESS
)
9664 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9665 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9667 if (inst
.operands
[1].isreg
)
9669 br
= inst
.operands
[1].reg
;
9670 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9671 as_bad (_("bad register for mrs"));
9675 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9676 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9678 _("'APSR', 'CPSR' or 'SPSR' expected"));
9679 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9682 inst
.instruction
|= br
;
9685 /* Two possible forms:
9686 "{C|S}PSR_<field>, Rm",
9687 "{C|S}PSR_f, #expression". */
9692 if (do_vfp_nsyn_msr () == SUCCESS
)
9695 inst
.instruction
|= inst
.operands
[0].imm
;
9696 if (inst
.operands
[1].isreg
)
9697 inst
.instruction
|= inst
.operands
[1].reg
;
9700 inst
.instruction
|= INST_IMMEDIATE
;
9701 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9702 inst
.relocs
[0].pc_rel
= 0;
9709 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9711 if (!inst
.operands
[2].present
)
9712 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9713 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9714 inst
.instruction
|= inst
.operands
[1].reg
;
9715 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9717 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9718 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9719 as_tsktsk (_("Rd and Rm should be different in mul"));
9722 /* Long Multiply Parser
9723 UMULL RdLo, RdHi, Rm, Rs
9724 SMULL RdLo, RdHi, Rm, Rs
9725 UMLAL RdLo, RdHi, Rm, Rs
9726 SMLAL RdLo, RdHi, Rm, Rs. */
9731 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9732 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9733 inst
.instruction
|= inst
.operands
[2].reg
;
9734 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9736 /* rdhi and rdlo must be different. */
9737 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9738 as_tsktsk (_("rdhi and rdlo must be different"));
9740 /* rdhi, rdlo and rm must all be different before armv6. */
9741 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9742 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9743 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9744 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9750 if (inst
.operands
[0].present
9751 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9753 /* Architectural NOP hints are CPSR sets with no bits selected. */
9754 inst
.instruction
&= 0xf0000000;
9755 inst
.instruction
|= 0x0320f000;
9756 if (inst
.operands
[0].present
)
9757 inst
.instruction
|= inst
.operands
[0].imm
;
9761 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9762 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9763 Condition defaults to COND_ALWAYS.
9764 Error if Rd, Rn or Rm are R15. */
9769 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9770 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9771 inst
.instruction
|= inst
.operands
[2].reg
;
9772 if (inst
.operands
[3].present
)
9773 encode_arm_shift (3);
9776 /* ARM V6 PKHTB (Argument Parse). */
9781 if (!inst
.operands
[3].present
)
9783 /* If the shift specifier is omitted, turn the instruction
9784 into pkhbt rd, rm, rn. */
9785 inst
.instruction
&= 0xfff00010;
9786 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9787 inst
.instruction
|= inst
.operands
[1].reg
;
9788 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9792 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9793 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9794 inst
.instruction
|= inst
.operands
[2].reg
;
9795 encode_arm_shift (3);
9799 /* ARMv5TE: Preload-Cache
9800 MP Extensions: Preload for write
9804 Syntactically, like LDR with B=1, W=0, L=1. */
9809 constraint (!inst
.operands
[0].isreg
,
9810 _("'[' expected after PLD mnemonic"));
9811 constraint (inst
.operands
[0].postind
,
9812 _("post-indexed expression used in preload instruction"));
9813 constraint (inst
.operands
[0].writeback
,
9814 _("writeback used in preload instruction"));
9815 constraint (!inst
.operands
[0].preind
,
9816 _("unindexed addressing used in preload instruction"));
9817 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9820 /* ARMv7: PLI <addr_mode> */
9824 constraint (!inst
.operands
[0].isreg
,
9825 _("'[' expected after PLI mnemonic"));
9826 constraint (inst
.operands
[0].postind
,
9827 _("post-indexed expression used in preload instruction"));
9828 constraint (inst
.operands
[0].writeback
,
9829 _("writeback used in preload instruction"));
9830 constraint (!inst
.operands
[0].preind
,
9831 _("unindexed addressing used in preload instruction"));
9832 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9833 inst
.instruction
&= ~PRE_INDEX
;
9839 constraint (inst
.operands
[0].writeback
,
9840 _("push/pop do not support {reglist}^"));
9841 inst
.operands
[1] = inst
.operands
[0];
9842 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9843 inst
.operands
[0].isreg
= 1;
9844 inst
.operands
[0].writeback
= 1;
9845 inst
.operands
[0].reg
= REG_SP
;
9846 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9849 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9850 word at the specified address and the following word
9852 Unconditionally executed.
9853 Error if Rn is R15. */
9858 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9859 if (inst
.operands
[0].writeback
)
9860 inst
.instruction
|= WRITE_BACK
;
9863 /* ARM V6 ssat (argument parse). */
9868 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9869 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9870 inst
.instruction
|= inst
.operands
[2].reg
;
9872 if (inst
.operands
[3].present
)
9873 encode_arm_shift (3);
9876 /* ARM V6 usat (argument parse). */
9881 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9882 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9883 inst
.instruction
|= inst
.operands
[2].reg
;
9885 if (inst
.operands
[3].present
)
9886 encode_arm_shift (3);
9889 /* ARM V6 ssat16 (argument parse). */
9894 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9895 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9896 inst
.instruction
|= inst
.operands
[2].reg
;
9902 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9903 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9904 inst
.instruction
|= inst
.operands
[2].reg
;
9907 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9908 preserving the other bits.
9910 setend <endian_specifier>, where <endian_specifier> is either
9916 if (warn_on_deprecated
9917 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9918 as_tsktsk (_("setend use is deprecated for ARMv8"));
9920 if (inst
.operands
[0].imm
)
9921 inst
.instruction
|= 0x200;
9927 unsigned int Rm
= (inst
.operands
[1].present
9928 ? inst
.operands
[1].reg
9929 : inst
.operands
[0].reg
);
9931 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9932 inst
.instruction
|= Rm
;
9933 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9935 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9936 inst
.instruction
|= SHIFT_BY_REG
;
9937 /* PR 12854: Error on extraneous shifts. */
9938 constraint (inst
.operands
[2].shifted
,
9939 _("extraneous shift as part of operand to shift insn"));
9942 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9948 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9949 inst
.relocs
[0].pc_rel
= 0;
9955 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9956 inst
.relocs
[0].pc_rel
= 0;
9962 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9963 inst
.relocs
[0].pc_rel
= 0;
9969 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9970 _("selected processor does not support SETPAN instruction"));
9972 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9978 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9979 _("selected processor does not support SETPAN instruction"));
9981 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9984 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9985 SMLAxy{cond} Rd,Rm,Rs,Rn
9986 SMLAWy{cond} Rd,Rm,Rs,Rn
9987 Error if any register is R15. */
9992 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9993 inst
.instruction
|= inst
.operands
[1].reg
;
9994 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9995 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9998 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9999 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10000 Error if any register is R15.
10001 Warning if Rdlo == Rdhi. */
10006 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10007 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10008 inst
.instruction
|= inst
.operands
[2].reg
;
10009 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
10011 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
10012 as_tsktsk (_("rdhi and rdlo must be different"));
10015 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10016 SMULxy{cond} Rd,Rm,Rs
10017 Error if any register is R15. */
10022 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10023 inst
.instruction
|= inst
.operands
[1].reg
;
10024 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10027 /* ARM V6 srs (argument parse). The variable fields in the encoding are
10028 the same for both ARM and Thumb-2. */
10035 if (inst
.operands
[0].present
)
10037 reg
= inst
.operands
[0].reg
;
10038 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
10043 inst
.instruction
|= reg
<< 16;
10044 inst
.instruction
|= inst
.operands
[1].imm
;
10045 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
10046 inst
.instruction
|= WRITE_BACK
;
10049 /* ARM V6 strex (argument parse). */
10054 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10055 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10056 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10057 || inst
.operands
[2].negative
10058 /* See comment in do_ldrex(). */
10059 || (inst
.operands
[2].reg
== REG_PC
),
10062 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10063 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10065 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10066 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10067 _("offset must be zero in ARM encoding"));
10069 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10070 inst
.instruction
|= inst
.operands
[1].reg
;
10071 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10072 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10076 do_t_strexbh (void)
10078 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10079 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10080 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10081 || inst
.operands
[2].negative
,
10084 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10085 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10093 constraint (inst
.operands
[1].reg
% 2 != 0,
10094 _("even register required"));
10095 constraint (inst
.operands
[2].present
10096 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
10097 _("can only store two consecutive registers"));
10098 /* If op 2 were present and equal to PC, this function wouldn't
10099 have been called in the first place. */
10100 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
10102 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10103 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
10104 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
10107 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10108 inst
.instruction
|= inst
.operands
[1].reg
;
10109 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10116 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10117 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10125 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10126 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10131 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10132 extends it to 32-bits, and adds the result to a value in another
10133 register. You can specify a rotation by 0, 8, 16, or 24 bits
10134 before extracting the 16-bit value.
10135 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10136 Condition defaults to COND_ALWAYS.
10137 Error if any register uses R15. */
10142 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10143 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10144 inst
.instruction
|= inst
.operands
[2].reg
;
10145 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
10150 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10151 Condition defaults to COND_ALWAYS.
10152 Error if any register uses R15. */
10157 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10158 inst
.instruction
|= inst
.operands
[1].reg
;
10159 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
10162 /* VFP instructions. In a logical order: SP variant first, monad
10163 before dyad, arithmetic then move then load/store. */
10166 do_vfp_sp_monadic (void)
10168 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10169 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10173 do_vfp_sp_dyadic (void)
10175 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10176 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10177 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10181 do_vfp_sp_compare_z (void)
10183 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10187 do_vfp_dp_sp_cvt (void)
10189 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10190 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10194 do_vfp_sp_dp_cvt (void)
10196 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10197 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10201 do_vfp_reg_from_sp (void)
10203 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10204 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10208 do_vfp_reg2_from_sp2 (void)
10210 constraint (inst
.operands
[2].imm
!= 2,
10211 _("only two consecutive VFP SP registers allowed here"));
10212 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10213 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10214 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10218 do_vfp_sp_from_reg (void)
10220 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10221 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10225 do_vfp_sp2_from_reg2 (void)
10227 constraint (inst
.operands
[0].imm
!= 2,
10228 _("only two consecutive VFP SP registers allowed here"));
10229 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10230 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10231 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10235 do_vfp_sp_ldst (void)
10237 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10238 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10242 do_vfp_dp_ldst (void)
10244 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10245 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10250 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10252 if (inst
.operands
[0].writeback
)
10253 inst
.instruction
|= WRITE_BACK
;
10255 constraint (ldstm_type
!= VFP_LDSTMIA
,
10256 _("this addressing mode requires base-register writeback"));
10257 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10258 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10259 inst
.instruction
|= inst
.operands
[1].imm
;
10263 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10267 if (inst
.operands
[0].writeback
)
10268 inst
.instruction
|= WRITE_BACK
;
10270 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10271 _("this addressing mode requires base-register writeback"));
10273 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10274 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10276 count
= inst
.operands
[1].imm
<< 1;
10277 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10280 inst
.instruction
|= count
;
10284 do_vfp_sp_ldstmia (void)
10286 vfp_sp_ldstm (VFP_LDSTMIA
);
10290 do_vfp_sp_ldstmdb (void)
10292 vfp_sp_ldstm (VFP_LDSTMDB
);
10296 do_vfp_dp_ldstmia (void)
10298 vfp_dp_ldstm (VFP_LDSTMIA
);
10302 do_vfp_dp_ldstmdb (void)
10304 vfp_dp_ldstm (VFP_LDSTMDB
);
10308 do_vfp_xp_ldstmia (void)
10310 vfp_dp_ldstm (VFP_LDSTMIAX
);
10314 do_vfp_xp_ldstmdb (void)
10316 vfp_dp_ldstm (VFP_LDSTMDBX
);
10320 do_vfp_dp_rd_rm (void)
10322 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10323 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10327 do_vfp_dp_rn_rd (void)
10329 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10330 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10334 do_vfp_dp_rd_rn (void)
10336 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10337 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10341 do_vfp_dp_rd_rn_rm (void)
10343 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10344 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10345 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10349 do_vfp_dp_rd (void)
10351 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10355 do_vfp_dp_rm_rd_rn (void)
10357 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10358 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10359 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10362 /* VFPv3 instructions. */
10364 do_vfp_sp_const (void)
10366 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10367 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10368 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10372 do_vfp_dp_const (void)
10374 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10375 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10376 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10380 vfp_conv (int srcsize
)
10382 int immbits
= srcsize
- inst
.operands
[1].imm
;
10384 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10386 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10387 i.e. immbits must be in range 0 - 16. */
10388 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10391 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10393 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10394 i.e. immbits must be in range 0 - 31. */
10395 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10399 inst
.instruction
|= (immbits
& 1) << 5;
10400 inst
.instruction
|= (immbits
>> 1);
10404 do_vfp_sp_conv_16 (void)
10406 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10411 do_vfp_dp_conv_16 (void)
10413 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10418 do_vfp_sp_conv_32 (void)
10420 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10425 do_vfp_dp_conv_32 (void)
10427 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10431 /* FPA instructions. Also in a logical order. */
10436 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10437 inst
.instruction
|= inst
.operands
[1].reg
;
10441 do_fpa_ldmstm (void)
10443 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10444 switch (inst
.operands
[1].imm
)
10446 case 1: inst
.instruction
|= CP_T_X
; break;
10447 case 2: inst
.instruction
|= CP_T_Y
; break;
10448 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10453 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10455 /* The instruction specified "ea" or "fd", so we can only accept
10456 [Rn]{!}. The instruction does not really support stacking or
10457 unstacking, so we have to emulate these by setting appropriate
10458 bits and offsets. */
10459 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10460 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10461 _("this instruction does not support indexing"));
10463 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10464 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10466 if (!(inst
.instruction
& INDEX_UP
))
10467 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10469 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10471 inst
.operands
[2].preind
= 0;
10472 inst
.operands
[2].postind
= 1;
10476 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10479 /* iWMMXt instructions: strictly in alphabetical order. */
10482 do_iwmmxt_tandorc (void)
10484 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10488 do_iwmmxt_textrc (void)
10490 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10491 inst
.instruction
|= inst
.operands
[1].imm
;
10495 do_iwmmxt_textrm (void)
10497 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10498 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10499 inst
.instruction
|= inst
.operands
[2].imm
;
10503 do_iwmmxt_tinsr (void)
10505 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10506 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10507 inst
.instruction
|= inst
.operands
[2].imm
;
10511 do_iwmmxt_tmia (void)
10513 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10514 inst
.instruction
|= inst
.operands
[1].reg
;
10515 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10519 do_iwmmxt_waligni (void)
10521 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10522 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10523 inst
.instruction
|= inst
.operands
[2].reg
;
10524 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10528 do_iwmmxt_wmerge (void)
10530 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10531 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10532 inst
.instruction
|= inst
.operands
[2].reg
;
10533 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10537 do_iwmmxt_wmov (void)
10539 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10540 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10541 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10542 inst
.instruction
|= inst
.operands
[1].reg
;
10546 do_iwmmxt_wldstbh (void)
10549 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10551 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10553 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10554 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10558 do_iwmmxt_wldstw (void)
10560 /* RIWR_RIWC clears .isreg for a control register. */
10561 if (!inst
.operands
[0].isreg
)
10563 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10564 inst
.instruction
|= 0xf0000000;
10567 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10568 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10572 do_iwmmxt_wldstd (void)
10574 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10575 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10576 && inst
.operands
[1].immisreg
)
10578 inst
.instruction
&= ~0x1a000ff;
10579 inst
.instruction
|= (0xfU
<< 28);
10580 if (inst
.operands
[1].preind
)
10581 inst
.instruction
|= PRE_INDEX
;
10582 if (!inst
.operands
[1].negative
)
10583 inst
.instruction
|= INDEX_UP
;
10584 if (inst
.operands
[1].writeback
)
10585 inst
.instruction
|= WRITE_BACK
;
10586 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10587 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10588 inst
.instruction
|= inst
.operands
[1].imm
;
10591 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10595 do_iwmmxt_wshufh (void)
10597 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10598 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10599 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10600 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10604 do_iwmmxt_wzero (void)
10606 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10607 inst
.instruction
|= inst
.operands
[0].reg
;
10608 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10609 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10613 do_iwmmxt_wrwrwr_or_imm5 (void)
10615 if (inst
.operands
[2].isreg
)
10618 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10619 _("immediate operand requires iWMMXt2"));
10621 if (inst
.operands
[2].imm
== 0)
10623 switch ((inst
.instruction
>> 20) & 0xf)
10629 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10630 inst
.operands
[2].imm
= 16;
10631 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10637 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10638 inst
.operands
[2].imm
= 32;
10639 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10646 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10648 wrn
= (inst
.instruction
>> 16) & 0xf;
10649 inst
.instruction
&= 0xff0fff0f;
10650 inst
.instruction
|= wrn
;
10651 /* Bail out here; the instruction is now assembled. */
10656 /* Map 32 -> 0, etc. */
10657 inst
.operands
[2].imm
&= 0x1f;
10658 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10662 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10663 operations first, then control, shift, and load/store. */
10665 /* Insns like "foo X,Y,Z". */
10668 do_mav_triple (void)
10670 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10671 inst
.instruction
|= inst
.operands
[1].reg
;
10672 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10675 /* Insns like "foo W,X,Y,Z".
10676 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10681 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10682 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10683 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10684 inst
.instruction
|= inst
.operands
[3].reg
;
10687 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10689 do_mav_dspsc (void)
10691 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10694 /* Maverick shift immediate instructions.
10695 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10696 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10699 do_mav_shift (void)
10701 int imm
= inst
.operands
[2].imm
;
10703 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10704 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10706 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10707 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10708 Bit 4 should be 0. */
10709 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10711 inst
.instruction
|= imm
;
10714 /* XScale instructions. Also sorted arithmetic before move. */
10716 /* Xscale multiply-accumulate (argument parse)
10719 MIAxycc acc0,Rm,Rs. */
10724 inst
.instruction
|= inst
.operands
[1].reg
;
10725 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10728 /* Xscale move-accumulator-register (argument parse)
10730 MARcc acc0,RdLo,RdHi. */
10735 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10736 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10739 /* Xscale move-register-accumulator (argument parse)
10741 MRAcc RdLo,RdHi,acc0. */
10746 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10747 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10748 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10751 /* Encoding functions relevant only to Thumb. */
10753 /* inst.operands[i] is a shifted-register operand; encode
10754 it into inst.instruction in the format used by Thumb32. */
10757 encode_thumb32_shifted_operand (int i
)
10759 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10760 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10762 constraint (inst
.operands
[i
].immisreg
,
10763 _("shift by register not allowed in thumb mode"));
10764 inst
.instruction
|= inst
.operands
[i
].reg
;
10765 if (shift
== SHIFT_RRX
)
10766 inst
.instruction
|= SHIFT_ROR
<< 4;
10769 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10770 _("expression too complex"));
10772 constraint (value
> 32
10773 || (value
== 32 && (shift
== SHIFT_LSL
10774 || shift
== SHIFT_ROR
)),
10775 _("shift expression is too large"));
10779 else if (value
== 32)
10782 inst
.instruction
|= shift
<< 4;
10783 inst
.instruction
|= (value
& 0x1c) << 10;
10784 inst
.instruction
|= (value
& 0x03) << 6;
10789 /* inst.operands[i] was set up by parse_address. Encode it into a
10790 Thumb32 format load or store instruction. Reject forms that cannot
10791 be used with such instructions. If is_t is true, reject forms that
10792 cannot be used with a T instruction; if is_d is true, reject forms
10793 that cannot be used with a D instruction. If it is a store insn,
10794 reject PC in Rn. */
10797 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10799 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10801 constraint (!inst
.operands
[i
].isreg
,
10802 _("Instruction does not support =N addresses"));
10804 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10805 if (inst
.operands
[i
].immisreg
)
10807 constraint (is_pc
, BAD_PC_ADDRESSING
);
10808 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10809 constraint (inst
.operands
[i
].negative
,
10810 _("Thumb does not support negative register indexing"));
10811 constraint (inst
.operands
[i
].postind
,
10812 _("Thumb does not support register post-indexing"));
10813 constraint (inst
.operands
[i
].writeback
,
10814 _("Thumb does not support register indexing with writeback"));
10815 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10816 _("Thumb supports only LSL in shifted register indexing"));
10818 inst
.instruction
|= inst
.operands
[i
].imm
;
10819 if (inst
.operands
[i
].shifted
)
10821 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10822 _("expression too complex"));
10823 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10824 || inst
.relocs
[0].exp
.X_add_number
> 3,
10825 _("shift out of range"));
10826 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10828 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10830 else if (inst
.operands
[i
].preind
)
10832 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10833 constraint (is_t
&& inst
.operands
[i
].writeback
,
10834 _("cannot use writeback with this instruction"));
10835 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10836 BAD_PC_ADDRESSING
);
10840 inst
.instruction
|= 0x01000000;
10841 if (inst
.operands
[i
].writeback
)
10842 inst
.instruction
|= 0x00200000;
10846 inst
.instruction
|= 0x00000c00;
10847 if (inst
.operands
[i
].writeback
)
10848 inst
.instruction
|= 0x00000100;
10850 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10852 else if (inst
.operands
[i
].postind
)
10854 gas_assert (inst
.operands
[i
].writeback
);
10855 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10856 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10859 inst
.instruction
|= 0x00200000;
10861 inst
.instruction
|= 0x00000900;
10862 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10864 else /* unindexed - only for coprocessor */
10865 inst
.error
= _("instruction does not accept unindexed addressing");
10868 /* Table of Thumb instructions which exist in both 16- and 32-bit
10869 encodings (the latter only in post-V6T2 cores). The index is the
10870 value used in the insns table below. When there is more than one
10871 possible 16-bit encoding for the instruction, this table always
10873 Also contains several pseudo-instructions used during relaxation. */
10874 #define T16_32_TAB \
10875 X(_adc, 4140, eb400000), \
10876 X(_adcs, 4140, eb500000), \
10877 X(_add, 1c00, eb000000), \
10878 X(_adds, 1c00, eb100000), \
10879 X(_addi, 0000, f1000000), \
10880 X(_addis, 0000, f1100000), \
10881 X(_add_pc,000f, f20f0000), \
10882 X(_add_sp,000d, f10d0000), \
10883 X(_adr, 000f, f20f0000), \
10884 X(_and, 4000, ea000000), \
10885 X(_ands, 4000, ea100000), \
10886 X(_asr, 1000, fa40f000), \
10887 X(_asrs, 1000, fa50f000), \
10888 X(_b, e000, f000b000), \
10889 X(_bcond, d000, f0008000), \
10890 X(_bf, 0000, f040e001), \
10891 X(_bfcsel,0000, f000e001), \
10892 X(_bfx, 0000, f060e001), \
10893 X(_bfl, 0000, f000c001), \
10894 X(_bflx, 0000, f070e001), \
10895 X(_bic, 4380, ea200000), \
10896 X(_bics, 4380, ea300000), \
10897 X(_cmn, 42c0, eb100f00), \
10898 X(_cmp, 2800, ebb00f00), \
10899 X(_cpsie, b660, f3af8400), \
10900 X(_cpsid, b670, f3af8600), \
10901 X(_cpy, 4600, ea4f0000), \
10902 X(_dec_sp,80dd, f1ad0d00), \
10903 X(_dls, 0000, f040e001), \
10904 X(_eor, 4040, ea800000), \
10905 X(_eors, 4040, ea900000), \
10906 X(_inc_sp,00dd, f10d0d00), \
10907 X(_ldmia, c800, e8900000), \
10908 X(_ldr, 6800, f8500000), \
10909 X(_ldrb, 7800, f8100000), \
10910 X(_ldrh, 8800, f8300000), \
10911 X(_ldrsb, 5600, f9100000), \
10912 X(_ldrsh, 5e00, f9300000), \
10913 X(_ldr_pc,4800, f85f0000), \
10914 X(_ldr_pc2,4800, f85f0000), \
10915 X(_ldr_sp,9800, f85d0000), \
10916 X(_le, 0000, f00fc001), \
10917 X(_lsl, 0000, fa00f000), \
10918 X(_lsls, 0000, fa10f000), \
10919 X(_lsr, 0800, fa20f000), \
10920 X(_lsrs, 0800, fa30f000), \
10921 X(_mov, 2000, ea4f0000), \
10922 X(_movs, 2000, ea5f0000), \
10923 X(_mul, 4340, fb00f000), \
10924 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10925 X(_mvn, 43c0, ea6f0000), \
10926 X(_mvns, 43c0, ea7f0000), \
10927 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10928 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10929 X(_orr, 4300, ea400000), \
10930 X(_orrs, 4300, ea500000), \
10931 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10932 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10933 X(_rev, ba00, fa90f080), \
10934 X(_rev16, ba40, fa90f090), \
10935 X(_revsh, bac0, fa90f0b0), \
10936 X(_ror, 41c0, fa60f000), \
10937 X(_rors, 41c0, fa70f000), \
10938 X(_sbc, 4180, eb600000), \
10939 X(_sbcs, 4180, eb700000), \
10940 X(_stmia, c000, e8800000), \
10941 X(_str, 6000, f8400000), \
10942 X(_strb, 7000, f8000000), \
10943 X(_strh, 8000, f8200000), \
10944 X(_str_sp,9000, f84d0000), \
10945 X(_sub, 1e00, eba00000), \
10946 X(_subs, 1e00, ebb00000), \
10947 X(_subi, 8000, f1a00000), \
10948 X(_subis, 8000, f1b00000), \
10949 X(_sxtb, b240, fa4ff080), \
10950 X(_sxth, b200, fa0ff080), \
10951 X(_tst, 4200, ea100f00), \
10952 X(_uxtb, b2c0, fa5ff080), \
10953 X(_uxth, b280, fa1ff080), \
10954 X(_nop, bf00, f3af8000), \
10955 X(_yield, bf10, f3af8001), \
10956 X(_wfe, bf20, f3af8002), \
10957 X(_wfi, bf30, f3af8003), \
10958 X(_wls, 0000, f040c001), \
10959 X(_sev, bf40, f3af8004), \
10960 X(_sevl, bf50, f3af8005), \
10961 X(_udf, de00, f7f0a000)
10963 /* To catch errors in encoding functions, the codes are all offset by
10964 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10965 as 16-bit instructions. */
10966 #define X(a,b,c) T_MNEM##a
10967 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10970 #define X(a,b,c) 0x##b
10971 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10972 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10975 #define X(a,b,c) 0x##c
10976 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10977 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10978 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10982 /* Thumb instruction encoders, in alphabetical order. */
10984 /* ADDW or SUBW. */
10987 do_t_add_sub_w (void)
10991 Rd
= inst
.operands
[0].reg
;
10992 Rn
= inst
.operands
[1].reg
;
10994 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10995 is the SP-{plus,minus}-immediate form of the instruction. */
10997 constraint (Rd
== REG_PC
, BAD_PC
);
10999 reject_bad_reg (Rd
);
11001 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
11002 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11005 /* Parse an add or subtract instruction. We get here with inst.instruction
11006 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11009 do_t_add_sub (void)
11013 Rd
= inst
.operands
[0].reg
;
11014 Rs
= (inst
.operands
[1].present
11015 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11016 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11019 set_pred_insn_type_last ();
11021 if (unified_syntax
)
11024 bfd_boolean narrow
;
11027 flags
= (inst
.instruction
== T_MNEM_adds
11028 || inst
.instruction
== T_MNEM_subs
);
11030 narrow
= !in_pred_block ();
11032 narrow
= in_pred_block ();
11033 if (!inst
.operands
[2].isreg
)
11037 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11038 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11040 add
= (inst
.instruction
== T_MNEM_add
11041 || inst
.instruction
== T_MNEM_adds
);
11043 if (inst
.size_req
!= 4)
11045 /* Attempt to use a narrow opcode, with relaxation if
11047 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
11048 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
11049 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
11050 opcode
= T_MNEM_add_sp
;
11051 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
11052 opcode
= T_MNEM_add_pc
;
11053 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
11056 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
11058 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
11062 inst
.instruction
= THUMB_OP16(opcode
);
11063 inst
.instruction
|= (Rd
<< 4) | Rs
;
11064 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11065 || (inst
.relocs
[0].type
11066 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
11068 if (inst
.size_req
== 2)
11069 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11071 inst
.relax
= opcode
;
11075 constraint (inst
.size_req
== 2, BAD_HIREG
);
11077 if (inst
.size_req
== 4
11078 || (inst
.size_req
!= 2 && !opcode
))
11080 constraint ((inst
.relocs
[0].type
11081 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
11082 && (inst
.relocs
[0].type
11083 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
11084 THUMB1_RELOC_ONLY
);
11087 constraint (add
, BAD_PC
);
11088 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
11089 _("only SUBS PC, LR, #const allowed"));
11090 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
11091 _("expression too complex"));
11092 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
11093 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
11094 _("immediate value out of range"));
11095 inst
.instruction
= T2_SUBS_PC_LR
11096 | inst
.relocs
[0].exp
.X_add_number
;
11097 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
11100 else if (Rs
== REG_PC
)
11102 /* Always use addw/subw. */
11103 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
11104 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11108 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11109 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
11112 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11114 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
11116 inst
.instruction
|= Rd
<< 8;
11117 inst
.instruction
|= Rs
<< 16;
11122 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
11123 unsigned int shift
= inst
.operands
[2].shift_kind
;
11125 Rn
= inst
.operands
[2].reg
;
11126 /* See if we can do this with a 16-bit instruction. */
11127 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
11129 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11134 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
11135 || inst
.instruction
== T_MNEM_add
)
11137 : T_OPCODE_SUB_R3
);
11138 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11142 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
11144 /* Thumb-1 cores (except v6-M) require at least one high
11145 register in a narrow non flag setting add. */
11146 if (Rd
> 7 || Rn
> 7
11147 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
11148 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
11155 inst
.instruction
= T_OPCODE_ADD_HI
;
11156 inst
.instruction
|= (Rd
& 8) << 4;
11157 inst
.instruction
|= (Rd
& 7);
11158 inst
.instruction
|= Rn
<< 3;
11164 constraint (Rd
== REG_PC
, BAD_PC
);
11165 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11166 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11167 constraint (Rs
== REG_PC
, BAD_PC
);
11168 reject_bad_reg (Rn
);
11170 /* If we get here, it can't be done in 16 bits. */
11171 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
11172 _("shift must be constant"));
11173 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11174 inst
.instruction
|= Rd
<< 8;
11175 inst
.instruction
|= Rs
<< 16;
11176 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
11177 _("shift value over 3 not allowed in thumb mode"));
11178 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
11179 _("only LSL shift allowed in thumb mode"));
11180 encode_thumb32_shifted_operand (2);
11185 constraint (inst
.instruction
== T_MNEM_adds
11186 || inst
.instruction
== T_MNEM_subs
,
11189 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
11191 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
11192 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
11195 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11196 ? 0x0000 : 0x8000);
11197 inst
.instruction
|= (Rd
<< 4) | Rs
;
11198 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11202 Rn
= inst
.operands
[2].reg
;
11203 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
11205 /* We now have Rd, Rs, and Rn set to registers. */
11206 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11208 /* Can't do this for SUB. */
11209 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
11210 inst
.instruction
= T_OPCODE_ADD_HI
;
11211 inst
.instruction
|= (Rd
& 8) << 4;
11212 inst
.instruction
|= (Rd
& 7);
11214 inst
.instruction
|= Rn
<< 3;
11216 inst
.instruction
|= Rs
<< 3;
11218 constraint (1, _("dest must overlap one source register"));
11222 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11223 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11224 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11234 Rd
= inst
.operands
[0].reg
;
11235 reject_bad_reg (Rd
);
11237 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11239 /* Defer to section relaxation. */
11240 inst
.relax
= inst
.instruction
;
11241 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11242 inst
.instruction
|= Rd
<< 4;
11244 else if (unified_syntax
&& inst
.size_req
!= 2)
11246 /* Generate a 32-bit opcode. */
11247 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11248 inst
.instruction
|= Rd
<< 8;
11249 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11250 inst
.relocs
[0].pc_rel
= 1;
11254 /* Generate a 16-bit opcode. */
11255 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11256 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11257 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11258 inst
.relocs
[0].pc_rel
= 1;
11259 inst
.instruction
|= Rd
<< 4;
11262 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11263 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11264 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11265 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11266 inst
.relocs
[0].exp
.X_add_number
+= 1;
11269 /* Arithmetic instructions for which there is just one 16-bit
11270 instruction encoding, and it allows only two low registers.
11271 For maximal compatibility with ARM syntax, we allow three register
11272 operands even when Thumb-32 instructions are not available, as long
11273 as the first two are identical. For instance, both "sbc r0,r1" and
11274 "sbc r0,r0,r1" are allowed. */
11280 Rd
= inst
.operands
[0].reg
;
11281 Rs
= (inst
.operands
[1].present
11282 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11283 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11284 Rn
= inst
.operands
[2].reg
;
11286 reject_bad_reg (Rd
);
11287 reject_bad_reg (Rs
);
11288 if (inst
.operands
[2].isreg
)
11289 reject_bad_reg (Rn
);
11291 if (unified_syntax
)
11293 if (!inst
.operands
[2].isreg
)
11295 /* For an immediate, we always generate a 32-bit opcode;
11296 section relaxation will shrink it later if possible. */
11297 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11298 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11299 inst
.instruction
|= Rd
<< 8;
11300 inst
.instruction
|= Rs
<< 16;
11301 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11305 bfd_boolean narrow
;
11307 /* See if we can do this with a 16-bit instruction. */
11308 if (THUMB_SETS_FLAGS (inst
.instruction
))
11309 narrow
= !in_pred_block ();
11311 narrow
= in_pred_block ();
11313 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11315 if (inst
.operands
[2].shifted
)
11317 if (inst
.size_req
== 4)
11323 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11324 inst
.instruction
|= Rd
;
11325 inst
.instruction
|= Rn
<< 3;
11329 /* If we get here, it can't be done in 16 bits. */
11330 constraint (inst
.operands
[2].shifted
11331 && inst
.operands
[2].immisreg
,
11332 _("shift must be constant"));
11333 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11334 inst
.instruction
|= Rd
<< 8;
11335 inst
.instruction
|= Rs
<< 16;
11336 encode_thumb32_shifted_operand (2);
11341 /* On its face this is a lie - the instruction does set the
11342 flags. However, the only supported mnemonic in this mode
11343 says it doesn't. */
11344 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11346 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11347 _("unshifted register required"));
11348 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11349 constraint (Rd
!= Rs
,
11350 _("dest and source1 must be the same register"));
11352 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11353 inst
.instruction
|= Rd
;
11354 inst
.instruction
|= Rn
<< 3;
11358 /* Similarly, but for instructions where the arithmetic operation is
11359 commutative, so we can allow either of them to be different from
11360 the destination operand in a 16-bit instruction. For instance, all
11361 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11368 Rd
= inst
.operands
[0].reg
;
11369 Rs
= (inst
.operands
[1].present
11370 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11371 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11372 Rn
= inst
.operands
[2].reg
;
11374 reject_bad_reg (Rd
);
11375 reject_bad_reg (Rs
);
11376 if (inst
.operands
[2].isreg
)
11377 reject_bad_reg (Rn
);
11379 if (unified_syntax
)
11381 if (!inst
.operands
[2].isreg
)
11383 /* For an immediate, we always generate a 32-bit opcode;
11384 section relaxation will shrink it later if possible. */
11385 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11386 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11387 inst
.instruction
|= Rd
<< 8;
11388 inst
.instruction
|= Rs
<< 16;
11389 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11393 bfd_boolean narrow
;
11395 /* See if we can do this with a 16-bit instruction. */
11396 if (THUMB_SETS_FLAGS (inst
.instruction
))
11397 narrow
= !in_pred_block ();
11399 narrow
= in_pred_block ();
11401 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11403 if (inst
.operands
[2].shifted
)
11405 if (inst
.size_req
== 4)
11412 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11413 inst
.instruction
|= Rd
;
11414 inst
.instruction
|= Rn
<< 3;
11419 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11420 inst
.instruction
|= Rd
;
11421 inst
.instruction
|= Rs
<< 3;
11426 /* If we get here, it can't be done in 16 bits. */
11427 constraint (inst
.operands
[2].shifted
11428 && inst
.operands
[2].immisreg
,
11429 _("shift must be constant"));
11430 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11431 inst
.instruction
|= Rd
<< 8;
11432 inst
.instruction
|= Rs
<< 16;
11433 encode_thumb32_shifted_operand (2);
11438 /* On its face this is a lie - the instruction does set the
11439 flags. However, the only supported mnemonic in this mode
11440 says it doesn't. */
11441 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11443 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11444 _("unshifted register required"));
11445 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11447 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11448 inst
.instruction
|= Rd
;
11451 inst
.instruction
|= Rn
<< 3;
11453 inst
.instruction
|= Rs
<< 3;
11455 constraint (1, _("dest must overlap one source register"));
11463 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11464 constraint (msb
> 32, _("bit-field extends past end of register"));
11465 /* The instruction encoding stores the LSB and MSB,
11466 not the LSB and width. */
11467 Rd
= inst
.operands
[0].reg
;
11468 reject_bad_reg (Rd
);
11469 inst
.instruction
|= Rd
<< 8;
11470 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11471 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11472 inst
.instruction
|= msb
- 1;
11481 Rd
= inst
.operands
[0].reg
;
11482 reject_bad_reg (Rd
);
11484 /* #0 in second position is alternative syntax for bfc, which is
11485 the same instruction but with REG_PC in the Rm field. */
11486 if (!inst
.operands
[1].isreg
)
11490 Rn
= inst
.operands
[1].reg
;
11491 reject_bad_reg (Rn
);
11494 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11495 constraint (msb
> 32, _("bit-field extends past end of register"));
11496 /* The instruction encoding stores the LSB and MSB,
11497 not the LSB and width. */
11498 inst
.instruction
|= Rd
<< 8;
11499 inst
.instruction
|= Rn
<< 16;
11500 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11501 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11502 inst
.instruction
|= msb
- 1;
11510 Rd
= inst
.operands
[0].reg
;
11511 Rn
= inst
.operands
[1].reg
;
11513 reject_bad_reg (Rd
);
11514 reject_bad_reg (Rn
);
11516 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11517 _("bit-field extends past end of register"));
11518 inst
.instruction
|= Rd
<< 8;
11519 inst
.instruction
|= Rn
<< 16;
11520 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11521 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11522 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11525 /* ARM V5 Thumb BLX (argument parse)
11526 BLX <target_addr> which is BLX(1)
11527 BLX <Rm> which is BLX(2)
11528 Unfortunately, there are two different opcodes for this mnemonic.
11529 So, the insns[].value is not used, and the code here zaps values
11530 into inst.instruction.
11532 ??? How to take advantage of the additional two bits of displacement
11533 available in Thumb32 mode? Need new relocation? */
11538 set_pred_insn_type_last ();
11540 if (inst
.operands
[0].isreg
)
11542 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11543 /* We have a register, so this is BLX(2). */
11544 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11548 /* No register. This must be BLX(1). */
11549 inst
.instruction
= 0xf000e800;
11550 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11559 bfd_reloc_code_real_type reloc
;
11562 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN
);
11564 if (in_pred_block ())
11566 /* Conditional branches inside IT blocks are encoded as unconditional
11568 cond
= COND_ALWAYS
;
11573 if (cond
!= COND_ALWAYS
)
11574 opcode
= T_MNEM_bcond
;
11576 opcode
= inst
.instruction
;
11579 && (inst
.size_req
== 4
11580 || (inst
.size_req
!= 2
11581 && (inst
.operands
[0].hasreloc
11582 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11584 inst
.instruction
= THUMB_OP32(opcode
);
11585 if (cond
== COND_ALWAYS
)
11586 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11589 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11590 _("selected architecture does not support "
11591 "wide conditional branch instruction"));
11593 gas_assert (cond
!= 0xF);
11594 inst
.instruction
|= cond
<< 22;
11595 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11600 inst
.instruction
= THUMB_OP16(opcode
);
11601 if (cond
== COND_ALWAYS
)
11602 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11605 inst
.instruction
|= cond
<< 8;
11606 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11608 /* Allow section relaxation. */
11609 if (unified_syntax
&& inst
.size_req
!= 2)
11610 inst
.relax
= opcode
;
11612 inst
.relocs
[0].type
= reloc
;
11613 inst
.relocs
[0].pc_rel
= 1;
11616 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11617 between the two is the maximum immediate allowed - which is passed in
11620 do_t_bkpt_hlt1 (int range
)
11622 constraint (inst
.cond
!= COND_ALWAYS
,
11623 _("instruction is always unconditional"));
11624 if (inst
.operands
[0].present
)
11626 constraint (inst
.operands
[0].imm
> range
,
11627 _("immediate value out of range"));
11628 inst
.instruction
|= inst
.operands
[0].imm
;
11631 set_pred_insn_type (NEUTRAL_IT_INSN
);
11637 do_t_bkpt_hlt1 (63);
11643 do_t_bkpt_hlt1 (255);
11647 do_t_branch23 (void)
11649 set_pred_insn_type_last ();
11650 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11652 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11653 this file. We used to simply ignore the PLT reloc type here --
11654 the branch encoding is now needed to deal with TLSCALL relocs.
11655 So if we see a PLT reloc now, put it back to how it used to be to
11656 keep the preexisting behaviour. */
11657 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11658 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11660 #if defined(OBJ_COFF)
11661 /* If the destination of the branch is a defined symbol which does not have
11662 the THUMB_FUNC attribute, then we must be calling a function which has
11663 the (interfacearm) attribute. We look for the Thumb entry point to that
11664 function and change the branch to refer to that function instead. */
11665 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11666 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11667 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11668 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11669 inst
.relocs
[0].exp
.X_add_symbol
11670 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11677 set_pred_insn_type_last ();
11678 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11679 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11680 should cause the alignment to be checked once it is known. This is
11681 because BX PC only works if the instruction is word aligned. */
11689 set_pred_insn_type_last ();
11690 Rm
= inst
.operands
[0].reg
;
11691 reject_bad_reg (Rm
);
11692 inst
.instruction
|= Rm
<< 16;
11701 Rd
= inst
.operands
[0].reg
;
11702 Rm
= inst
.operands
[1].reg
;
11704 reject_bad_reg (Rd
);
11705 reject_bad_reg (Rm
);
11707 inst
.instruction
|= Rd
<< 8;
11708 inst
.instruction
|= Rm
<< 16;
11709 inst
.instruction
|= Rm
;
11715 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11721 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11722 inst
.instruction
|= inst
.operands
[0].imm
;
11728 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11730 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11731 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11733 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11734 inst
.instruction
= 0xf3af8000;
11735 inst
.instruction
|= imod
<< 9;
11736 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11737 if (inst
.operands
[1].present
)
11738 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11742 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11743 && (inst
.operands
[0].imm
& 4),
11744 _("selected processor does not support 'A' form "
11745 "of this instruction"));
11746 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11747 _("Thumb does not support the 2-argument "
11748 "form of this instruction"));
11749 inst
.instruction
|= inst
.operands
[0].imm
;
11753 /* THUMB CPY instruction (argument parse). */
11758 if (inst
.size_req
== 4)
11760 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11761 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11762 inst
.instruction
|= inst
.operands
[1].reg
;
11766 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11767 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11768 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11775 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11776 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11777 inst
.instruction
|= inst
.operands
[0].reg
;
11778 inst
.relocs
[0].pc_rel
= 1;
11779 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11785 inst
.instruction
|= inst
.operands
[0].imm
;
11791 unsigned Rd
, Rn
, Rm
;
11793 Rd
= inst
.operands
[0].reg
;
11794 Rn
= (inst
.operands
[1].present
11795 ? inst
.operands
[1].reg
: Rd
);
11796 Rm
= inst
.operands
[2].reg
;
11798 reject_bad_reg (Rd
);
11799 reject_bad_reg (Rn
);
11800 reject_bad_reg (Rm
);
11802 inst
.instruction
|= Rd
<< 8;
11803 inst
.instruction
|= Rn
<< 16;
11804 inst
.instruction
|= Rm
;
11810 if (unified_syntax
&& inst
.size_req
== 4)
11811 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11813 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11819 unsigned int cond
= inst
.operands
[0].imm
;
11821 set_pred_insn_type (IT_INSN
);
11822 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
11823 now_pred
.cc
= cond
;
11824 now_pred
.warn_deprecated
= FALSE
;
11825 now_pred
.type
= SCALAR_PRED
;
11827 /* If the condition is a negative condition, invert the mask. */
11828 if ((cond
& 0x1) == 0x0)
11830 unsigned int mask
= inst
.instruction
& 0x000f;
11832 if ((mask
& 0x7) == 0)
11834 /* No conversion needed. */
11835 now_pred
.block_length
= 1;
11837 else if ((mask
& 0x3) == 0)
11840 now_pred
.block_length
= 2;
11842 else if ((mask
& 0x1) == 0)
11845 now_pred
.block_length
= 3;
11850 now_pred
.block_length
= 4;
11853 inst
.instruction
&= 0xfff0;
11854 inst
.instruction
|= mask
;
11857 inst
.instruction
|= cond
<< 4;
11863 /* We are dealing with a vector predicated block. */
11864 set_pred_insn_type (VPT_INSN
);
11866 now_pred
.mask
= ((inst
.instruction
& 0x00400000) >> 19)
11867 | ((inst
.instruction
& 0xe000) >> 13);
11868 now_pred
.warn_deprecated
= FALSE
;
11869 now_pred
.type
= VECTOR_PRED
;
11872 /* Helper function used for both push/pop and ldm/stm. */
11874 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11875 bfd_boolean writeback
)
11877 bfd_boolean load
, store
;
11879 gas_assert (base
!= -1 || !do_io
);
11880 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11881 store
= do_io
&& !load
;
11883 if (mask
& (1 << 13))
11884 inst
.error
= _("SP not allowed in register list");
11886 if (do_io
&& (mask
& (1 << base
)) != 0
11888 inst
.error
= _("having the base register in the register list when "
11889 "using write back is UNPREDICTABLE");
11893 if (mask
& (1 << 15))
11895 if (mask
& (1 << 14))
11896 inst
.error
= _("LR and PC should not both be in register list");
11898 set_pred_insn_type_last ();
11903 if (mask
& (1 << 15))
11904 inst
.error
= _("PC not allowed in register list");
11907 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11909 /* Single register transfers implemented as str/ldr. */
11912 if (inst
.instruction
& (1 << 23))
11913 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11915 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11919 if (inst
.instruction
& (1 << 23))
11920 inst
.instruction
= 0x00800000; /* ia -> [base] */
11922 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11925 inst
.instruction
|= 0xf8400000;
11927 inst
.instruction
|= 0x00100000;
11929 mask
= ffs (mask
) - 1;
11932 else if (writeback
)
11933 inst
.instruction
|= WRITE_BACK
;
11935 inst
.instruction
|= mask
;
11937 inst
.instruction
|= base
<< 16;
11943 /* This really doesn't seem worth it. */
11944 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11945 _("expression too complex"));
11946 constraint (inst
.operands
[1].writeback
,
11947 _("Thumb load/store multiple does not support {reglist}^"));
11949 if (unified_syntax
)
11951 bfd_boolean narrow
;
11955 /* See if we can use a 16-bit instruction. */
11956 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11957 && inst
.size_req
!= 4
11958 && !(inst
.operands
[1].imm
& ~0xff))
11960 mask
= 1 << inst
.operands
[0].reg
;
11962 if (inst
.operands
[0].reg
<= 7)
11964 if (inst
.instruction
== T_MNEM_stmia
11965 ? inst
.operands
[0].writeback
11966 : (inst
.operands
[0].writeback
11967 == !(inst
.operands
[1].imm
& mask
)))
11969 if (inst
.instruction
== T_MNEM_stmia
11970 && (inst
.operands
[1].imm
& mask
)
11971 && (inst
.operands
[1].imm
& (mask
- 1)))
11972 as_warn (_("value stored for r%d is UNKNOWN"),
11973 inst
.operands
[0].reg
);
11975 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11976 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11977 inst
.instruction
|= inst
.operands
[1].imm
;
11980 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11982 /* This means 1 register in reg list one of 3 situations:
11983 1. Instruction is stmia, but without writeback.
11984 2. lmdia without writeback, but with Rn not in
11986 3. ldmia with writeback, but with Rn in reglist.
11987 Case 3 is UNPREDICTABLE behaviour, so we handle
11988 case 1 and 2 which can be converted into a 16-bit
11989 str or ldr. The SP cases are handled below. */
11990 unsigned long opcode
;
11991 /* First, record an error for Case 3. */
11992 if (inst
.operands
[1].imm
& mask
11993 && inst
.operands
[0].writeback
)
11995 _("having the base register in the register list when "
11996 "using write back is UNPREDICTABLE");
11998 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
12000 inst
.instruction
= THUMB_OP16 (opcode
);
12001 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
12002 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
12006 else if (inst
.operands
[0] .reg
== REG_SP
)
12008 if (inst
.operands
[0].writeback
)
12011 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
12012 ? T_MNEM_push
: T_MNEM_pop
);
12013 inst
.instruction
|= inst
.operands
[1].imm
;
12016 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
12019 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
12020 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
12021 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
12029 if (inst
.instruction
< 0xffff)
12030 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12032 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
12033 inst
.operands
[1].imm
,
12034 inst
.operands
[0].writeback
);
12039 constraint (inst
.operands
[0].reg
> 7
12040 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
12041 constraint (inst
.instruction
!= T_MNEM_ldmia
12042 && inst
.instruction
!= T_MNEM_stmia
,
12043 _("Thumb-2 instruction only valid in unified syntax"));
12044 if (inst
.instruction
== T_MNEM_stmia
)
12046 if (!inst
.operands
[0].writeback
)
12047 as_warn (_("this instruction will write back the base register"));
12048 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
12049 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
12050 as_warn (_("value stored for r%d is UNKNOWN"),
12051 inst
.operands
[0].reg
);
12055 if (!inst
.operands
[0].writeback
12056 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
12057 as_warn (_("this instruction will write back the base register"));
12058 else if (inst
.operands
[0].writeback
12059 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
12060 as_warn (_("this instruction will not write back the base register"));
12063 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12064 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12065 inst
.instruction
|= inst
.operands
[1].imm
;
12072 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
12073 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
12074 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
12075 || inst
.operands
[1].negative
,
12078 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
12080 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12081 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12082 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12088 if (!inst
.operands
[1].present
)
12090 constraint (inst
.operands
[0].reg
== REG_LR
,
12091 _("r14 not allowed as first register "
12092 "when second register is omitted"));
12093 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12095 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12098 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12099 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12100 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12106 unsigned long opcode
;
12109 if (inst
.operands
[0].isreg
12110 && !inst
.operands
[0].preind
12111 && inst
.operands
[0].reg
== REG_PC
)
12112 set_pred_insn_type_last ();
12114 opcode
= inst
.instruction
;
12115 if (unified_syntax
)
12117 if (!inst
.operands
[1].isreg
)
12119 if (opcode
<= 0xffff)
12120 inst
.instruction
= THUMB_OP32 (opcode
);
12121 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12124 if (inst
.operands
[1].isreg
12125 && !inst
.operands
[1].writeback
12126 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
12127 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
12128 && opcode
<= 0xffff
12129 && inst
.size_req
!= 4)
12131 /* Insn may have a 16-bit form. */
12132 Rn
= inst
.operands
[1].reg
;
12133 if (inst
.operands
[1].immisreg
)
12135 inst
.instruction
= THUMB_OP16 (opcode
);
12137 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
12139 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
12140 reject_bad_reg (inst
.operands
[1].imm
);
12142 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
12143 && opcode
!= T_MNEM_ldrsb
)
12144 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
12145 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
12152 if (inst
.relocs
[0].pc_rel
)
12153 opcode
= T_MNEM_ldr_pc2
;
12155 opcode
= T_MNEM_ldr_pc
;
12159 if (opcode
== T_MNEM_ldr
)
12160 opcode
= T_MNEM_ldr_sp
;
12162 opcode
= T_MNEM_str_sp
;
12164 inst
.instruction
= inst
.operands
[0].reg
<< 8;
12168 inst
.instruction
= inst
.operands
[0].reg
;
12169 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12171 inst
.instruction
|= THUMB_OP16 (opcode
);
12172 if (inst
.size_req
== 2)
12173 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12175 inst
.relax
= opcode
;
12179 /* Definitely a 32-bit variant. */
12181 /* Warning for Erratum 752419. */
12182 if (opcode
== T_MNEM_ldr
12183 && inst
.operands
[0].reg
== REG_SP
12184 && inst
.operands
[1].writeback
== 1
12185 && !inst
.operands
[1].immisreg
)
12187 if (no_cpu_selected ()
12188 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
12189 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
12190 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
12191 as_warn (_("This instruction may be unpredictable "
12192 "if executed on M-profile cores "
12193 "with interrupts enabled."));
12196 /* Do some validations regarding addressing modes. */
12197 if (inst
.operands
[1].immisreg
)
12198 reject_bad_reg (inst
.operands
[1].imm
);
12200 constraint (inst
.operands
[1].writeback
== 1
12201 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12204 inst
.instruction
= THUMB_OP32 (opcode
);
12205 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12206 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12207 check_ldr_r15_aligned ();
12211 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
12213 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
12215 /* Only [Rn,Rm] is acceptable. */
12216 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
12217 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
12218 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
12219 || inst
.operands
[1].negative
,
12220 _("Thumb does not support this addressing mode"));
12221 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12225 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12226 if (!inst
.operands
[1].isreg
)
12227 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12230 constraint (!inst
.operands
[1].preind
12231 || inst
.operands
[1].shifted
12232 || inst
.operands
[1].writeback
,
12233 _("Thumb does not support this addressing mode"));
12234 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12236 constraint (inst
.instruction
& 0x0600,
12237 _("byte or halfword not valid for base register"));
12238 constraint (inst
.operands
[1].reg
== REG_PC
12239 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12240 _("r15 based store not allowed"));
12241 constraint (inst
.operands
[1].immisreg
,
12242 _("invalid base register for register offset"));
12244 if (inst
.operands
[1].reg
== REG_PC
)
12245 inst
.instruction
= T_OPCODE_LDR_PC
;
12246 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12247 inst
.instruction
= T_OPCODE_LDR_SP
;
12249 inst
.instruction
= T_OPCODE_STR_SP
;
12251 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12252 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12256 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12257 if (!inst
.operands
[1].immisreg
)
12259 /* Immediate offset. */
12260 inst
.instruction
|= inst
.operands
[0].reg
;
12261 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12262 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12266 /* Register offset. */
12267 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12268 constraint (inst
.operands
[1].negative
,
12269 _("Thumb does not support this addressing mode"));
12272 switch (inst
.instruction
)
12274 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12275 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12276 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12277 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12278 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12279 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12280 case 0x5600 /* ldrsb */:
12281 case 0x5e00 /* ldrsh */: break;
12285 inst
.instruction
|= inst
.operands
[0].reg
;
12286 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12287 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12293 if (!inst
.operands
[1].present
)
12295 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12296 constraint (inst
.operands
[0].reg
== REG_LR
,
12297 _("r14 not allowed here"));
12298 constraint (inst
.operands
[0].reg
== REG_R12
,
12299 _("r12 not allowed here"));
12302 if (inst
.operands
[2].writeback
12303 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12304 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12305 as_warn (_("base register written back, and overlaps "
12306 "one of transfer registers"));
12308 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12309 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12310 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
12316 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12317 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
12323 unsigned Rd
, Rn
, Rm
, Ra
;
12325 Rd
= inst
.operands
[0].reg
;
12326 Rn
= inst
.operands
[1].reg
;
12327 Rm
= inst
.operands
[2].reg
;
12328 Ra
= inst
.operands
[3].reg
;
12330 reject_bad_reg (Rd
);
12331 reject_bad_reg (Rn
);
12332 reject_bad_reg (Rm
);
12333 reject_bad_reg (Ra
);
12335 inst
.instruction
|= Rd
<< 8;
12336 inst
.instruction
|= Rn
<< 16;
12337 inst
.instruction
|= Rm
;
12338 inst
.instruction
|= Ra
<< 12;
12344 unsigned RdLo
, RdHi
, Rn
, Rm
;
12346 RdLo
= inst
.operands
[0].reg
;
12347 RdHi
= inst
.operands
[1].reg
;
12348 Rn
= inst
.operands
[2].reg
;
12349 Rm
= inst
.operands
[3].reg
;
12351 reject_bad_reg (RdLo
);
12352 reject_bad_reg (RdHi
);
12353 reject_bad_reg (Rn
);
12354 reject_bad_reg (Rm
);
12356 inst
.instruction
|= RdLo
<< 12;
12357 inst
.instruction
|= RdHi
<< 8;
12358 inst
.instruction
|= Rn
<< 16;
12359 inst
.instruction
|= Rm
;
12363 do_t_mov_cmp (void)
12367 Rn
= inst
.operands
[0].reg
;
12368 Rm
= inst
.operands
[1].reg
;
12371 set_pred_insn_type_last ();
12373 if (unified_syntax
)
12375 int r0off
= (inst
.instruction
== T_MNEM_mov
12376 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12377 unsigned long opcode
;
12378 bfd_boolean narrow
;
12379 bfd_boolean low_regs
;
12381 low_regs
= (Rn
<= 7 && Rm
<= 7);
12382 opcode
= inst
.instruction
;
12383 if (in_pred_block ())
12384 narrow
= opcode
!= T_MNEM_movs
;
12386 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12387 if (inst
.size_req
== 4
12388 || inst
.operands
[1].shifted
)
12391 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12392 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12393 && !inst
.operands
[1].shifted
12397 inst
.instruction
= T2_SUBS_PC_LR
;
12401 if (opcode
== T_MNEM_cmp
)
12403 constraint (Rn
== REG_PC
, BAD_PC
);
12406 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12408 warn_deprecated_sp (Rm
);
12409 /* R15 was documented as a valid choice for Rm in ARMv6,
12410 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12411 tools reject R15, so we do too. */
12412 constraint (Rm
== REG_PC
, BAD_PC
);
12415 reject_bad_reg (Rm
);
12417 else if (opcode
== T_MNEM_mov
12418 || opcode
== T_MNEM_movs
)
12420 if (inst
.operands
[1].isreg
)
12422 if (opcode
== T_MNEM_movs
)
12424 reject_bad_reg (Rn
);
12425 reject_bad_reg (Rm
);
12429 /* This is mov.n. */
12430 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12431 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12433 as_tsktsk (_("Use of r%u as a source register is "
12434 "deprecated when r%u is the destination "
12435 "register."), Rm
, Rn
);
12440 /* This is mov.w. */
12441 constraint (Rn
== REG_PC
, BAD_PC
);
12442 constraint (Rm
== REG_PC
, BAD_PC
);
12443 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12444 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12448 reject_bad_reg (Rn
);
12451 if (!inst
.operands
[1].isreg
)
12453 /* Immediate operand. */
12454 if (!in_pred_block () && opcode
== T_MNEM_mov
)
12456 if (low_regs
&& narrow
)
12458 inst
.instruction
= THUMB_OP16 (opcode
);
12459 inst
.instruction
|= Rn
<< 8;
12460 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12461 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12463 if (inst
.size_req
== 2)
12464 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12466 inst
.relax
= opcode
;
12471 constraint ((inst
.relocs
[0].type
12472 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12473 && (inst
.relocs
[0].type
12474 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12475 THUMB1_RELOC_ONLY
);
12477 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12478 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12479 inst
.instruction
|= Rn
<< r0off
;
12480 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12483 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12484 && (inst
.instruction
== T_MNEM_mov
12485 || inst
.instruction
== T_MNEM_movs
))
12487 /* Register shifts are encoded as separate shift instructions. */
12488 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12490 if (in_pred_block ())
12495 if (inst
.size_req
== 4)
12498 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12504 switch (inst
.operands
[1].shift_kind
)
12507 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12510 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12513 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12516 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12522 inst
.instruction
= opcode
;
12525 inst
.instruction
|= Rn
;
12526 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12531 inst
.instruction
|= CONDS_BIT
;
12533 inst
.instruction
|= Rn
<< 8;
12534 inst
.instruction
|= Rm
<< 16;
12535 inst
.instruction
|= inst
.operands
[1].imm
;
12540 /* Some mov with immediate shift have narrow variants.
12541 Register shifts are handled above. */
12542 if (low_regs
&& inst
.operands
[1].shifted
12543 && (inst
.instruction
== T_MNEM_mov
12544 || inst
.instruction
== T_MNEM_movs
))
12546 if (in_pred_block ())
12547 narrow
= (inst
.instruction
== T_MNEM_mov
);
12549 narrow
= (inst
.instruction
== T_MNEM_movs
);
12554 switch (inst
.operands
[1].shift_kind
)
12556 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12557 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12558 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12559 default: narrow
= FALSE
; break;
12565 inst
.instruction
|= Rn
;
12566 inst
.instruction
|= Rm
<< 3;
12567 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12571 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12572 inst
.instruction
|= Rn
<< r0off
;
12573 encode_thumb32_shifted_operand (1);
12577 switch (inst
.instruction
)
12580 /* In v4t or v5t a move of two lowregs produces unpredictable
12581 results. Don't allow this. */
12584 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12585 "MOV Rd, Rs with two low registers is not "
12586 "permitted on this architecture");
12587 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12591 inst
.instruction
= T_OPCODE_MOV_HR
;
12592 inst
.instruction
|= (Rn
& 0x8) << 4;
12593 inst
.instruction
|= (Rn
& 0x7);
12594 inst
.instruction
|= Rm
<< 3;
12598 /* We know we have low registers at this point.
12599 Generate LSLS Rd, Rs, #0. */
12600 inst
.instruction
= T_OPCODE_LSL_I
;
12601 inst
.instruction
|= Rn
;
12602 inst
.instruction
|= Rm
<< 3;
12608 inst
.instruction
= T_OPCODE_CMP_LR
;
12609 inst
.instruction
|= Rn
;
12610 inst
.instruction
|= Rm
<< 3;
12614 inst
.instruction
= T_OPCODE_CMP_HR
;
12615 inst
.instruction
|= (Rn
& 0x8) << 4;
12616 inst
.instruction
|= (Rn
& 0x7);
12617 inst
.instruction
|= Rm
<< 3;
12624 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12626 /* PR 10443: Do not silently ignore shifted operands. */
12627 constraint (inst
.operands
[1].shifted
,
12628 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12630 if (inst
.operands
[1].isreg
)
12632 if (Rn
< 8 && Rm
< 8)
12634 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12635 since a MOV instruction produces unpredictable results. */
12636 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12637 inst
.instruction
= T_OPCODE_ADD_I3
;
12639 inst
.instruction
= T_OPCODE_CMP_LR
;
12641 inst
.instruction
|= Rn
;
12642 inst
.instruction
|= Rm
<< 3;
12646 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12647 inst
.instruction
= T_OPCODE_MOV_HR
;
12649 inst
.instruction
= T_OPCODE_CMP_HR
;
12655 constraint (Rn
> 7,
12656 _("only lo regs allowed with immediate"));
12657 inst
.instruction
|= Rn
<< 8;
12658 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12669 top
= (inst
.instruction
& 0x00800000) != 0;
12670 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12672 constraint (top
, _(":lower16: not allowed in this instruction"));
12673 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12675 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12677 constraint (!top
, _(":upper16: not allowed in this instruction"));
12678 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12681 Rd
= inst
.operands
[0].reg
;
12682 reject_bad_reg (Rd
);
12684 inst
.instruction
|= Rd
<< 8;
12685 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12687 imm
= inst
.relocs
[0].exp
.X_add_number
;
12688 inst
.instruction
|= (imm
& 0xf000) << 4;
12689 inst
.instruction
|= (imm
& 0x0800) << 15;
12690 inst
.instruction
|= (imm
& 0x0700) << 4;
12691 inst
.instruction
|= (imm
& 0x00ff);
12696 do_t_mvn_tst (void)
12700 Rn
= inst
.operands
[0].reg
;
12701 Rm
= inst
.operands
[1].reg
;
12703 if (inst
.instruction
== T_MNEM_cmp
12704 || inst
.instruction
== T_MNEM_cmn
)
12705 constraint (Rn
== REG_PC
, BAD_PC
);
12707 reject_bad_reg (Rn
);
12708 reject_bad_reg (Rm
);
12710 if (unified_syntax
)
12712 int r0off
= (inst
.instruction
== T_MNEM_mvn
12713 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12714 bfd_boolean narrow
;
12716 if (inst
.size_req
== 4
12717 || inst
.instruction
> 0xffff
12718 || inst
.operands
[1].shifted
12719 || Rn
> 7 || Rm
> 7)
12721 else if (inst
.instruction
== T_MNEM_cmn
12722 || inst
.instruction
== T_MNEM_tst
)
12724 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12725 narrow
= !in_pred_block ();
12727 narrow
= in_pred_block ();
12729 if (!inst
.operands
[1].isreg
)
12731 /* For an immediate, we always generate a 32-bit opcode;
12732 section relaxation will shrink it later if possible. */
12733 if (inst
.instruction
< 0xffff)
12734 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12735 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12736 inst
.instruction
|= Rn
<< r0off
;
12737 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12741 /* See if we can do this with a 16-bit instruction. */
12744 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12745 inst
.instruction
|= Rn
;
12746 inst
.instruction
|= Rm
<< 3;
12750 constraint (inst
.operands
[1].shifted
12751 && inst
.operands
[1].immisreg
,
12752 _("shift must be constant"));
12753 if (inst
.instruction
< 0xffff)
12754 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12755 inst
.instruction
|= Rn
<< r0off
;
12756 encode_thumb32_shifted_operand (1);
12762 constraint (inst
.instruction
> 0xffff
12763 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12764 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12765 _("unshifted register required"));
12766 constraint (Rn
> 7 || Rm
> 7,
12769 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12770 inst
.instruction
|= Rn
;
12771 inst
.instruction
|= Rm
<< 3;
12780 if (do_vfp_nsyn_mrs () == SUCCESS
)
12783 Rd
= inst
.operands
[0].reg
;
12784 reject_bad_reg (Rd
);
12785 inst
.instruction
|= Rd
<< 8;
12787 if (inst
.operands
[1].isreg
)
12789 unsigned br
= inst
.operands
[1].reg
;
12790 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12791 as_bad (_("bad register for mrs"));
12793 inst
.instruction
|= br
& (0xf << 16);
12794 inst
.instruction
|= (br
& 0x300) >> 4;
12795 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12799 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12801 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12803 /* PR gas/12698: The constraint is only applied for m_profile.
12804 If the user has specified -march=all, we want to ignore it as
12805 we are building for any CPU type, including non-m variants. */
12806 bfd_boolean m_profile
=
12807 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12808 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12809 "not support requested special purpose register"));
12812 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12814 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12815 _("'APSR', 'CPSR' or 'SPSR' expected"));
12817 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12818 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12819 inst
.instruction
|= 0xf0000;
12829 if (do_vfp_nsyn_msr () == SUCCESS
)
12832 constraint (!inst
.operands
[1].isreg
,
12833 _("Thumb encoding does not support an immediate here"));
12835 if (inst
.operands
[0].isreg
)
12836 flags
= (int)(inst
.operands
[0].reg
);
12838 flags
= inst
.operands
[0].imm
;
12840 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12842 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12844 /* PR gas/12698: The constraint is only applied for m_profile.
12845 If the user has specified -march=all, we want to ignore it as
12846 we are building for any CPU type, including non-m variants. */
12847 bfd_boolean m_profile
=
12848 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12849 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12850 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12851 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12852 && bits
!= PSR_f
)) && m_profile
,
12853 _("selected processor does not support requested special "
12854 "purpose register"));
12857 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12858 "requested special purpose register"));
12860 Rn
= inst
.operands
[1].reg
;
12861 reject_bad_reg (Rn
);
12863 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12864 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12865 inst
.instruction
|= (flags
& 0x300) >> 4;
12866 inst
.instruction
|= (flags
& 0xff);
12867 inst
.instruction
|= Rn
<< 16;
12873 bfd_boolean narrow
;
12874 unsigned Rd
, Rn
, Rm
;
12876 if (!inst
.operands
[2].present
)
12877 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12879 Rd
= inst
.operands
[0].reg
;
12880 Rn
= inst
.operands
[1].reg
;
12881 Rm
= inst
.operands
[2].reg
;
12883 if (unified_syntax
)
12885 if (inst
.size_req
== 4
12891 else if (inst
.instruction
== T_MNEM_muls
)
12892 narrow
= !in_pred_block ();
12894 narrow
= in_pred_block ();
12898 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12899 constraint (Rn
> 7 || Rm
> 7,
12906 /* 16-bit MULS/Conditional MUL. */
12907 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12908 inst
.instruction
|= Rd
;
12911 inst
.instruction
|= Rm
<< 3;
12913 inst
.instruction
|= Rn
<< 3;
12915 constraint (1, _("dest must overlap one source register"));
12919 constraint (inst
.instruction
!= T_MNEM_mul
,
12920 _("Thumb-2 MUL must not set flags"));
12922 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12923 inst
.instruction
|= Rd
<< 8;
12924 inst
.instruction
|= Rn
<< 16;
12925 inst
.instruction
|= Rm
<< 0;
12927 reject_bad_reg (Rd
);
12928 reject_bad_reg (Rn
);
12929 reject_bad_reg (Rm
);
12936 unsigned RdLo
, RdHi
, Rn
, Rm
;
12938 RdLo
= inst
.operands
[0].reg
;
12939 RdHi
= inst
.operands
[1].reg
;
12940 Rn
= inst
.operands
[2].reg
;
12941 Rm
= inst
.operands
[3].reg
;
12943 reject_bad_reg (RdLo
);
12944 reject_bad_reg (RdHi
);
12945 reject_bad_reg (Rn
);
12946 reject_bad_reg (Rm
);
12948 inst
.instruction
|= RdLo
<< 12;
12949 inst
.instruction
|= RdHi
<< 8;
12950 inst
.instruction
|= Rn
<< 16;
12951 inst
.instruction
|= Rm
;
12954 as_tsktsk (_("rdhi and rdlo must be different"));
12960 set_pred_insn_type (NEUTRAL_IT_INSN
);
12962 if (unified_syntax
)
12964 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12966 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12967 inst
.instruction
|= inst
.operands
[0].imm
;
12971 /* PR9722: Check for Thumb2 availability before
12972 generating a thumb2 nop instruction. */
12973 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12975 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12976 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12979 inst
.instruction
= 0x46c0;
12984 constraint (inst
.operands
[0].present
,
12985 _("Thumb does not support NOP with hints"));
12986 inst
.instruction
= 0x46c0;
12993 if (unified_syntax
)
12995 bfd_boolean narrow
;
12997 if (THUMB_SETS_FLAGS (inst
.instruction
))
12998 narrow
= !in_pred_block ();
13000 narrow
= in_pred_block ();
13001 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13003 if (inst
.size_req
== 4)
13008 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13009 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13010 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13014 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13015 inst
.instruction
|= inst
.operands
[0].reg
;
13016 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13021 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
13023 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13025 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13026 inst
.instruction
|= inst
.operands
[0].reg
;
13027 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13036 Rd
= inst
.operands
[0].reg
;
13037 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
13039 reject_bad_reg (Rd
);
13040 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13041 reject_bad_reg (Rn
);
13043 inst
.instruction
|= Rd
<< 8;
13044 inst
.instruction
|= Rn
<< 16;
13046 if (!inst
.operands
[2].isreg
)
13048 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13049 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13055 Rm
= inst
.operands
[2].reg
;
13056 reject_bad_reg (Rm
);
13058 constraint (inst
.operands
[2].shifted
13059 && inst
.operands
[2].immisreg
,
13060 _("shift must be constant"));
13061 encode_thumb32_shifted_operand (2);
13068 unsigned Rd
, Rn
, Rm
;
13070 Rd
= inst
.operands
[0].reg
;
13071 Rn
= inst
.operands
[1].reg
;
13072 Rm
= inst
.operands
[2].reg
;
13074 reject_bad_reg (Rd
);
13075 reject_bad_reg (Rn
);
13076 reject_bad_reg (Rm
);
13078 inst
.instruction
|= Rd
<< 8;
13079 inst
.instruction
|= Rn
<< 16;
13080 inst
.instruction
|= Rm
;
13081 if (inst
.operands
[3].present
)
13083 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
13084 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13085 _("expression too complex"));
13086 inst
.instruction
|= (val
& 0x1c) << 10;
13087 inst
.instruction
|= (val
& 0x03) << 6;
13094 if (!inst
.operands
[3].present
)
13098 inst
.instruction
&= ~0x00000020;
13100 /* PR 10168. Swap the Rm and Rn registers. */
13101 Rtmp
= inst
.operands
[1].reg
;
13102 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
13103 inst
.operands
[2].reg
= Rtmp
;
13111 if (inst
.operands
[0].immisreg
)
13112 reject_bad_reg (inst
.operands
[0].imm
);
13114 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
13118 do_t_push_pop (void)
13122 constraint (inst
.operands
[0].writeback
,
13123 _("push/pop do not support {reglist}^"));
13124 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
13125 _("expression too complex"));
13127 mask
= inst
.operands
[0].imm
;
13128 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
13129 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
13130 else if (inst
.size_req
!= 4
13131 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
13132 ? REG_LR
: REG_PC
)))
13134 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13135 inst
.instruction
|= THUMB_PP_PC_LR
;
13136 inst
.instruction
|= mask
& 0xff;
13138 else if (unified_syntax
)
13140 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13141 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
13145 inst
.error
= _("invalid register list to push/pop instruction");
13153 if (unified_syntax
)
13154 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
13157 inst
.error
= _("invalid register list to push/pop instruction");
13163 do_t_vscclrm (void)
13165 if (inst
.operands
[0].issingle
)
13167 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
13168 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
13169 inst
.instruction
|= inst
.operands
[0].imm
;
13173 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
13174 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
13175 inst
.instruction
|= 1 << 8;
13176 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
13185 Rd
= inst
.operands
[0].reg
;
13186 Rm
= inst
.operands
[1].reg
;
13188 reject_bad_reg (Rd
);
13189 reject_bad_reg (Rm
);
13191 inst
.instruction
|= Rd
<< 8;
13192 inst
.instruction
|= Rm
<< 16;
13193 inst
.instruction
|= Rm
;
13201 Rd
= inst
.operands
[0].reg
;
13202 Rm
= inst
.operands
[1].reg
;
13204 reject_bad_reg (Rd
);
13205 reject_bad_reg (Rm
);
13207 if (Rd
<= 7 && Rm
<= 7
13208 && inst
.size_req
!= 4)
13210 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13211 inst
.instruction
|= Rd
;
13212 inst
.instruction
|= Rm
<< 3;
13214 else if (unified_syntax
)
13216 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13217 inst
.instruction
|= Rd
<< 8;
13218 inst
.instruction
|= Rm
<< 16;
13219 inst
.instruction
|= Rm
;
13222 inst
.error
= BAD_HIREG
;
13230 Rd
= inst
.operands
[0].reg
;
13231 Rm
= inst
.operands
[1].reg
;
13233 reject_bad_reg (Rd
);
13234 reject_bad_reg (Rm
);
13236 inst
.instruction
|= Rd
<< 8;
13237 inst
.instruction
|= Rm
;
13245 Rd
= inst
.operands
[0].reg
;
13246 Rs
= (inst
.operands
[1].present
13247 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13248 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13250 reject_bad_reg (Rd
);
13251 reject_bad_reg (Rs
);
13252 if (inst
.operands
[2].isreg
)
13253 reject_bad_reg (inst
.operands
[2].reg
);
13255 inst
.instruction
|= Rd
<< 8;
13256 inst
.instruction
|= Rs
<< 16;
13257 if (!inst
.operands
[2].isreg
)
13259 bfd_boolean narrow
;
13261 if ((inst
.instruction
& 0x00100000) != 0)
13262 narrow
= !in_pred_block ();
13264 narrow
= in_pred_block ();
13266 if (Rd
> 7 || Rs
> 7)
13269 if (inst
.size_req
== 4 || !unified_syntax
)
13272 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13273 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13276 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13277 relaxation, but it doesn't seem worth the hassle. */
13280 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13281 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13282 inst
.instruction
|= Rs
<< 3;
13283 inst
.instruction
|= Rd
;
13287 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13288 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13292 encode_thumb32_shifted_operand (2);
13298 if (warn_on_deprecated
13299 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13300 as_tsktsk (_("setend use is deprecated for ARMv8"));
13302 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13303 if (inst
.operands
[0].imm
)
13304 inst
.instruction
|= 0x8;
13310 if (!inst
.operands
[1].present
)
13311 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13313 if (unified_syntax
)
13315 bfd_boolean narrow
;
13318 switch (inst
.instruction
)
13321 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13323 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13325 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13327 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13331 if (THUMB_SETS_FLAGS (inst
.instruction
))
13332 narrow
= !in_pred_block ();
13334 narrow
= in_pred_block ();
13335 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13337 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13339 if (inst
.operands
[2].isreg
13340 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13341 || inst
.operands
[2].reg
> 7))
13343 if (inst
.size_req
== 4)
13346 reject_bad_reg (inst
.operands
[0].reg
);
13347 reject_bad_reg (inst
.operands
[1].reg
);
13351 if (inst
.operands
[2].isreg
)
13353 reject_bad_reg (inst
.operands
[2].reg
);
13354 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13355 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13356 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13357 inst
.instruction
|= inst
.operands
[2].reg
;
13359 /* PR 12854: Error on extraneous shifts. */
13360 constraint (inst
.operands
[2].shifted
,
13361 _("extraneous shift as part of operand to shift insn"));
13365 inst
.operands
[1].shifted
= 1;
13366 inst
.operands
[1].shift_kind
= shift_kind
;
13367 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13368 ? T_MNEM_movs
: T_MNEM_mov
);
13369 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13370 encode_thumb32_shifted_operand (1);
13371 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13372 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13377 if (inst
.operands
[2].isreg
)
13379 switch (shift_kind
)
13381 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13382 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13383 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13384 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13388 inst
.instruction
|= inst
.operands
[0].reg
;
13389 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13391 /* PR 12854: Error on extraneous shifts. */
13392 constraint (inst
.operands
[2].shifted
,
13393 _("extraneous shift as part of operand to shift insn"));
13397 switch (shift_kind
)
13399 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13400 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13401 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13404 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13405 inst
.instruction
|= inst
.operands
[0].reg
;
13406 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13412 constraint (inst
.operands
[0].reg
> 7
13413 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13414 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13416 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13418 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13419 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13420 _("source1 and dest must be same register"));
13422 switch (inst
.instruction
)
13424 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13425 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13426 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13427 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13431 inst
.instruction
|= inst
.operands
[0].reg
;
13432 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13434 /* PR 12854: Error on extraneous shifts. */
13435 constraint (inst
.operands
[2].shifted
,
13436 _("extraneous shift as part of operand to shift insn"));
13440 switch (inst
.instruction
)
13442 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13443 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13444 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13445 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13448 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13449 inst
.instruction
|= inst
.operands
[0].reg
;
13450 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13458 unsigned Rd
, Rn
, Rm
;
13460 Rd
= inst
.operands
[0].reg
;
13461 Rn
= inst
.operands
[1].reg
;
13462 Rm
= inst
.operands
[2].reg
;
13464 reject_bad_reg (Rd
);
13465 reject_bad_reg (Rn
);
13466 reject_bad_reg (Rm
);
13468 inst
.instruction
|= Rd
<< 8;
13469 inst
.instruction
|= Rn
<< 16;
13470 inst
.instruction
|= Rm
;
13476 unsigned Rd
, Rn
, Rm
;
13478 Rd
= inst
.operands
[0].reg
;
13479 Rm
= inst
.operands
[1].reg
;
13480 Rn
= inst
.operands
[2].reg
;
13482 reject_bad_reg (Rd
);
13483 reject_bad_reg (Rn
);
13484 reject_bad_reg (Rm
);
13486 inst
.instruction
|= Rd
<< 8;
13487 inst
.instruction
|= Rn
<< 16;
13488 inst
.instruction
|= Rm
;
13494 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13495 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13496 _("SMC is not permitted on this architecture"));
13497 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13498 _("expression too complex"));
13499 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13500 inst
.instruction
|= (value
& 0xf000) >> 12;
13501 inst
.instruction
|= (value
& 0x0ff0);
13502 inst
.instruction
|= (value
& 0x000f) << 16;
13503 /* PR gas/15623: SMC instructions must be last in an IT block. */
13504 set_pred_insn_type_last ();
13510 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13512 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13513 inst
.instruction
|= (value
& 0x0fff);
13514 inst
.instruction
|= (value
& 0xf000) << 4;
13518 do_t_ssat_usat (int bias
)
13522 Rd
= inst
.operands
[0].reg
;
13523 Rn
= inst
.operands
[2].reg
;
13525 reject_bad_reg (Rd
);
13526 reject_bad_reg (Rn
);
13528 inst
.instruction
|= Rd
<< 8;
13529 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13530 inst
.instruction
|= Rn
<< 16;
13532 if (inst
.operands
[3].present
)
13534 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13536 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13538 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13539 _("expression too complex"));
13541 if (shift_amount
!= 0)
13543 constraint (shift_amount
> 31,
13544 _("shift expression is too large"));
13546 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13547 inst
.instruction
|= 0x00200000; /* sh bit. */
13549 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13550 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13558 do_t_ssat_usat (1);
13566 Rd
= inst
.operands
[0].reg
;
13567 Rn
= inst
.operands
[2].reg
;
13569 reject_bad_reg (Rd
);
13570 reject_bad_reg (Rn
);
13572 inst
.instruction
|= Rd
<< 8;
13573 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13574 inst
.instruction
|= Rn
<< 16;
13580 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13581 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13582 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13583 || inst
.operands
[2].negative
,
13586 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13588 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13589 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13590 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13591 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13597 if (!inst
.operands
[2].present
)
13598 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13600 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13601 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13602 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13605 inst
.instruction
|= inst
.operands
[0].reg
;
13606 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13607 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13608 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13614 unsigned Rd
, Rn
, Rm
;
13616 Rd
= inst
.operands
[0].reg
;
13617 Rn
= inst
.operands
[1].reg
;
13618 Rm
= inst
.operands
[2].reg
;
13620 reject_bad_reg (Rd
);
13621 reject_bad_reg (Rn
);
13622 reject_bad_reg (Rm
);
13624 inst
.instruction
|= Rd
<< 8;
13625 inst
.instruction
|= Rn
<< 16;
13626 inst
.instruction
|= Rm
;
13627 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13635 Rd
= inst
.operands
[0].reg
;
13636 Rm
= inst
.operands
[1].reg
;
13638 reject_bad_reg (Rd
);
13639 reject_bad_reg (Rm
);
13641 if (inst
.instruction
<= 0xffff
13642 && inst
.size_req
!= 4
13643 && Rd
<= 7 && Rm
<= 7
13644 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13646 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13647 inst
.instruction
|= Rd
;
13648 inst
.instruction
|= Rm
<< 3;
13650 else if (unified_syntax
)
13652 if (inst
.instruction
<= 0xffff)
13653 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13654 inst
.instruction
|= Rd
<< 8;
13655 inst
.instruction
|= Rm
;
13656 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13660 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13661 _("Thumb encoding does not support rotation"));
13662 constraint (1, BAD_HIREG
);
13669 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13678 half
= (inst
.instruction
& 0x10) != 0;
13679 set_pred_insn_type_last ();
13680 constraint (inst
.operands
[0].immisreg
,
13681 _("instruction requires register index"));
13683 Rn
= inst
.operands
[0].reg
;
13684 Rm
= inst
.operands
[0].imm
;
13686 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13687 constraint (Rn
== REG_SP
, BAD_SP
);
13688 reject_bad_reg (Rm
);
13690 constraint (!half
&& inst
.operands
[0].shifted
,
13691 _("instruction does not allow shifted index"));
13692 inst
.instruction
|= (Rn
<< 16) | Rm
;
13698 if (!inst
.operands
[0].present
)
13699 inst
.operands
[0].imm
= 0;
13701 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13703 constraint (inst
.size_req
== 2,
13704 _("immediate value out of range"));
13705 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13706 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13707 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13711 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13712 inst
.instruction
|= inst
.operands
[0].imm
;
13715 set_pred_insn_type (NEUTRAL_IT_INSN
);
13722 do_t_ssat_usat (0);
13730 Rd
= inst
.operands
[0].reg
;
13731 Rn
= inst
.operands
[2].reg
;
13733 reject_bad_reg (Rd
);
13734 reject_bad_reg (Rn
);
13736 inst
.instruction
|= Rd
<< 8;
13737 inst
.instruction
|= inst
.operands
[1].imm
;
13738 inst
.instruction
|= Rn
<< 16;
13741 /* Checking the range of the branch offset (VAL) with NBITS bits
13742 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13744 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13746 gas_assert (nbits
> 0 && nbits
<= 32);
13749 int cmp
= (1 << (nbits
- 1));
13750 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13755 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13761 /* For branches in Armv8.1-M Mainline. */
13763 do_t_branch_future (void)
13765 unsigned long insn
= inst
.instruction
;
13767 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13768 if (inst
.operands
[0].hasreloc
== 0)
13770 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13771 as_bad (BAD_BRANCH_OFF
);
13773 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13777 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13778 inst
.relocs
[0].pc_rel
= 1;
13784 if (inst
.operands
[1].hasreloc
== 0)
13786 int val
= inst
.operands
[1].imm
;
13787 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13788 as_bad (BAD_BRANCH_OFF
);
13790 int immA
= (val
& 0x0001f000) >> 12;
13791 int immB
= (val
& 0x00000ffc) >> 2;
13792 int immC
= (val
& 0x00000002) >> 1;
13793 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13797 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13798 inst
.relocs
[1].pc_rel
= 1;
13803 if (inst
.operands
[1].hasreloc
== 0)
13805 int val
= inst
.operands
[1].imm
;
13806 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13807 as_bad (BAD_BRANCH_OFF
);
13809 int immA
= (val
& 0x0007f000) >> 12;
13810 int immB
= (val
& 0x00000ffc) >> 2;
13811 int immC
= (val
& 0x00000002) >> 1;
13812 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13816 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13817 inst
.relocs
[1].pc_rel
= 1;
13821 case T_MNEM_bfcsel
:
13823 if (inst
.operands
[1].hasreloc
== 0)
13825 int val
= inst
.operands
[1].imm
;
13826 int immA
= (val
& 0x00001000) >> 12;
13827 int immB
= (val
& 0x00000ffc) >> 2;
13828 int immC
= (val
& 0x00000002) >> 1;
13829 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13833 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13834 inst
.relocs
[1].pc_rel
= 1;
13838 if (inst
.operands
[2].hasreloc
== 0)
13840 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13841 int val2
= inst
.operands
[2].imm
;
13842 int val0
= inst
.operands
[0].imm
& 0x1f;
13843 int diff
= val2
- val0
;
13845 inst
.instruction
|= 1 << 17; /* T bit. */
13846 else if (diff
!= 2)
13847 as_bad (_("out of range label-relative fixup value"));
13851 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13852 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13853 inst
.relocs
[2].pc_rel
= 1;
13857 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13858 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13863 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13870 /* Helper function for do_t_loloop to handle relocations. */
13872 v8_1_loop_reloc (int is_le
)
13874 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13876 int value
= inst
.relocs
[0].exp
.X_add_number
;
13877 value
= (is_le
) ? -value
: value
;
13879 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13880 as_bad (BAD_BRANCH_OFF
);
13884 immh
= (value
& 0x00000ffc) >> 2;
13885 imml
= (value
& 0x00000002) >> 1;
13887 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13891 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13892 inst
.relocs
[0].pc_rel
= 1;
13896 /* To handle the Scalar Low Overhead Loop instructions
13897 in Armv8.1-M Mainline. */
13901 unsigned long insn
= inst
.instruction
;
13903 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13904 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13910 if (!inst
.operands
[0].present
)
13911 inst
.instruction
|= 1 << 21;
13913 v8_1_loop_reloc (TRUE
);
13917 v8_1_loop_reloc (FALSE
);
13918 /* Fall through. */
13920 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13921 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13928 /* MVE instruction encoder helpers. */
13929 #define M_MNEM_vabav 0xee800f01
13930 #define M_MNEM_vmladav 0xeef00e00
13931 #define M_MNEM_vmladava 0xeef00e20
13932 #define M_MNEM_vmladavx 0xeef01e00
13933 #define M_MNEM_vmladavax 0xeef01e20
13934 #define M_MNEM_vmlsdav 0xeef00e01
13935 #define M_MNEM_vmlsdava 0xeef00e21
13936 #define M_MNEM_vmlsdavx 0xeef01e01
13937 #define M_MNEM_vmlsdavax 0xeef01e21
13938 #define M_MNEM_vmullt 0xee011e00
13939 #define M_MNEM_vmullb 0xee010e00
13940 #define M_MNEM_vst20 0xfc801e00
13941 #define M_MNEM_vst21 0xfc801e20
13942 #define M_MNEM_vst40 0xfc801e01
13943 #define M_MNEM_vst41 0xfc801e21
13944 #define M_MNEM_vst42 0xfc801e41
13945 #define M_MNEM_vst43 0xfc801e61
13946 #define M_MNEM_vld20 0xfc901e00
13947 #define M_MNEM_vld21 0xfc901e20
13948 #define M_MNEM_vld40 0xfc901e01
13949 #define M_MNEM_vld41 0xfc901e21
13950 #define M_MNEM_vld42 0xfc901e41
13951 #define M_MNEM_vld43 0xfc901e61
13952 #define M_MNEM_vstrb 0xec000e00
13953 #define M_MNEM_vstrh 0xec000e10
13954 #define M_MNEM_vstrw 0xec000e40
13955 #define M_MNEM_vstrd 0xec000e50
13956 #define M_MNEM_vldrb 0xec100e00
13957 #define M_MNEM_vldrh 0xec100e10
13958 #define M_MNEM_vldrw 0xec100e40
13959 #define M_MNEM_vldrd 0xec100e50
13961 /* Neon instruction encoder helpers. */
13963 /* Encodings for the different types for various Neon opcodes. */
13965 /* An "invalid" code for the following tables. */
13968 struct neon_tab_entry
13971 unsigned float_or_poly
;
13972 unsigned scalar_or_imm
;
13975 /* Map overloaded Neon opcodes to their respective encodings. */
13976 #define NEON_ENC_TAB \
13977 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13978 X(vabdl, 0x0800700, N_INV, N_INV), \
13979 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13980 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13981 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13982 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13983 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13984 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13985 X(vaddl, 0x0800000, N_INV, N_INV), \
13986 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13987 X(vsubl, 0x0800200, N_INV, N_INV), \
13988 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13989 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13990 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13991 /* Register variants of the following two instructions are encoded as
13992 vcge / vcgt with the operands reversed. */ \
13993 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13994 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13995 X(vfma, N_INV, 0x0000c10, N_INV), \
13996 X(vfms, N_INV, 0x0200c10, N_INV), \
13997 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13998 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13999 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14000 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14001 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14002 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14003 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14004 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14005 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14006 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14007 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14008 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14009 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14010 X(vshl, 0x0000400, N_INV, 0x0800510), \
14011 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14012 X(vand, 0x0000110, N_INV, 0x0800030), \
14013 X(vbic, 0x0100110, N_INV, 0x0800030), \
14014 X(veor, 0x1000110, N_INV, N_INV), \
14015 X(vorn, 0x0300110, N_INV, 0x0800010), \
14016 X(vorr, 0x0200110, N_INV, 0x0800010), \
14017 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14018 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14019 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14020 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14021 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14022 X(vst1, 0x0000000, 0x0800000, N_INV), \
14023 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14024 X(vst2, 0x0000100, 0x0800100, N_INV), \
14025 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14026 X(vst3, 0x0000200, 0x0800200, N_INV), \
14027 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14028 X(vst4, 0x0000300, 0x0800300, N_INV), \
14029 X(vmovn, 0x1b20200, N_INV, N_INV), \
14030 X(vtrn, 0x1b20080, N_INV, N_INV), \
14031 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14032 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14033 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14034 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14035 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14036 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14037 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14038 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14039 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14040 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14041 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14042 X(vseleq, 0xe000a00, N_INV, N_INV), \
14043 X(vselvs, 0xe100a00, N_INV, N_INV), \
14044 X(vselge, 0xe200a00, N_INV, N_INV), \
14045 X(vselgt, 0xe300a00, N_INV, N_INV), \
14046 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14047 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14048 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14049 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14050 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14051 X(aes, 0x3b00300, N_INV, N_INV), \
14052 X(sha3op, 0x2000c00, N_INV, N_INV), \
14053 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14054 X(sha2op, 0x3ba0380, N_INV, N_INV)
14058 #define X(OPC,I,F,S) N_MNEM_##OPC
14063 static const struct neon_tab_entry neon_enc_tab
[] =
14065 #define X(OPC,I,F,S) { (I), (F), (S) }
14070 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14071 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14072 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14073 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14074 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14075 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14076 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14077 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14078 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14079 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14080 #define NEON_ENC_SINGLE_(X) \
14081 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14082 #define NEON_ENC_DOUBLE_(X) \
14083 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14084 #define NEON_ENC_FPV8_(X) \
14085 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14087 #define NEON_ENCODE(type, inst) \
14090 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14091 inst.is_neon = 1; \
14095 #define check_neon_suffixes \
14098 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14100 as_bad (_("invalid neon suffix for non neon instruction")); \
14106 /* Define shapes for instruction operands. The following mnemonic characters
14107 are used in this table:
14109 F - VFP S<n> register
14110 D - Neon D<n> register
14111 Q - Neon Q<n> register
14115 L - D<n> register list
14117 This table is used to generate various data:
14118 - enumerations of the form NS_DDR to be used as arguments to
14120 - a table classifying shapes into single, double, quad, mixed.
14121 - a table used to drive neon_select_shape. */
14123 #define NEON_SHAPE_DEF \
14124 X(3, (R, Q, Q), QUAD), \
14125 X(3, (D, D, D), DOUBLE), \
14126 X(3, (Q, Q, Q), QUAD), \
14127 X(3, (D, D, I), DOUBLE), \
14128 X(3, (Q, Q, I), QUAD), \
14129 X(3, (D, D, S), DOUBLE), \
14130 X(3, (Q, Q, S), QUAD), \
14131 X(3, (Q, Q, R), QUAD), \
14132 X(2, (D, D), DOUBLE), \
14133 X(2, (Q, Q), QUAD), \
14134 X(2, (D, S), DOUBLE), \
14135 X(2, (Q, S), QUAD), \
14136 X(2, (D, R), DOUBLE), \
14137 X(2, (Q, R), QUAD), \
14138 X(2, (D, I), DOUBLE), \
14139 X(2, (Q, I), QUAD), \
14140 X(3, (D, L, D), DOUBLE), \
14141 X(2, (D, Q), MIXED), \
14142 X(2, (Q, D), MIXED), \
14143 X(3, (D, Q, I), MIXED), \
14144 X(3, (Q, D, I), MIXED), \
14145 X(3, (Q, D, D), MIXED), \
14146 X(3, (D, Q, Q), MIXED), \
14147 X(3, (Q, Q, D), MIXED), \
14148 X(3, (Q, D, S), MIXED), \
14149 X(3, (D, Q, S), MIXED), \
14150 X(4, (D, D, D, I), DOUBLE), \
14151 X(4, (Q, Q, Q, I), QUAD), \
14152 X(4, (D, D, S, I), DOUBLE), \
14153 X(4, (Q, Q, S, I), QUAD), \
14154 X(2, (F, F), SINGLE), \
14155 X(3, (F, F, F), SINGLE), \
14156 X(2, (F, I), SINGLE), \
14157 X(2, (F, D), MIXED), \
14158 X(2, (D, F), MIXED), \
14159 X(3, (F, F, I), MIXED), \
14160 X(4, (R, R, F, F), SINGLE), \
14161 X(4, (F, F, R, R), SINGLE), \
14162 X(3, (D, R, R), DOUBLE), \
14163 X(3, (R, R, D), DOUBLE), \
14164 X(2, (S, R), SINGLE), \
14165 X(2, (R, S), SINGLE), \
14166 X(2, (F, R), SINGLE), \
14167 X(2, (R, F), SINGLE), \
14168 /* Half float shape supported so far. */\
14169 X (2, (H, D), MIXED), \
14170 X (2, (D, H), MIXED), \
14171 X (2, (H, F), MIXED), \
14172 X (2, (F, H), MIXED), \
14173 X (2, (H, H), HALF), \
14174 X (2, (H, R), HALF), \
14175 X (2, (R, H), HALF), \
14176 X (2, (H, I), HALF), \
14177 X (3, (H, H, H), HALF), \
14178 X (3, (H, F, I), MIXED), \
14179 X (3, (F, H, I), MIXED), \
14180 X (3, (D, H, H), MIXED), \
14181 X (3, (D, H, S), MIXED)
14183 #define S2(A,B) NS_##A##B
14184 #define S3(A,B,C) NS_##A##B##C
14185 #define S4(A,B,C,D) NS_##A##B##C##D
14187 #define X(N, L, C) S##N L
14200 enum neon_shape_class
14209 #define X(N, L, C) SC_##C
14211 static enum neon_shape_class neon_shape_class
[] =
14230 /* Register widths of above. */
14231 static unsigned neon_shape_el_size
[] =
14243 struct neon_shape_info
14246 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
14249 #define S2(A,B) { SE_##A, SE_##B }
14250 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14251 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14253 #define X(N, L, C) { N, S##N L }
14255 static struct neon_shape_info neon_shape_tab
[] =
14265 /* Bit masks used in type checking given instructions.
14266 'N_EQK' means the type must be the same as (or based on in some way) the key
14267 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14268 set, various other bits can be set as well in order to modify the meaning of
14269 the type constraint. */
14271 enum neon_type_mask
14295 N_KEY
= 0x1000000, /* Key element (main type specifier). */
14296 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
14297 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
14298 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
14299 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
14300 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
14301 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14302 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14303 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14304 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
14305 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14307 N_MAX_NONSPECIAL
= N_P64
14310 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14312 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14313 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14314 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14315 #define N_S_32 (N_S8 | N_S16 | N_S32)
14316 #define N_F_16_32 (N_F16 | N_F32)
14317 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14318 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14319 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14320 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14321 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14322 #define N_F_MVE (N_F16 | N_F32)
14323 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14325 /* Pass this as the first type argument to neon_check_type to ignore types
14327 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14329 /* Select a "shape" for the current instruction (describing register types or
14330 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14331 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14332 function of operand parsing, so this function doesn't need to be called.
14333 Shapes should be listed in order of decreasing length. */
14335 static enum neon_shape
14336 neon_select_shape (enum neon_shape shape
, ...)
14339 enum neon_shape first_shape
= shape
;
14341 /* Fix missing optional operands. FIXME: we don't know at this point how
14342 many arguments we should have, so this makes the assumption that we have
14343 > 1. This is true of all current Neon opcodes, I think, but may not be
14344 true in the future. */
14345 if (!inst
.operands
[1].present
)
14346 inst
.operands
[1] = inst
.operands
[0];
14348 va_start (ap
, shape
);
14350 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
14355 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
14357 if (!inst
.operands
[j
].present
)
14363 switch (neon_shape_tab
[shape
].el
[j
])
14365 /* If a .f16, .16, .u16, .s16 type specifier is given over
14366 a VFP single precision register operand, it's essentially
14367 means only half of the register is used.
14369 If the type specifier is given after the mnemonics, the
14370 information is stored in inst.vectype. If the type specifier
14371 is given after register operand, the information is stored
14372 in inst.operands[].vectype.
14374 When there is only one type specifier, and all the register
14375 operands are the same type of hardware register, the type
14376 specifier applies to all register operands.
14378 If no type specifier is given, the shape is inferred from
14379 operand information.
14382 vadd.f16 s0, s1, s2: NS_HHH
14383 vabs.f16 s0, s1: NS_HH
14384 vmov.f16 s0, r1: NS_HR
14385 vmov.f16 r0, s1: NS_RH
14386 vcvt.f16 r0, s1: NS_RH
14387 vcvt.f16.s32 s2, s2, #29: NS_HFI
14388 vcvt.f16.s32 s2, s2: NS_HF
14391 if (!(inst
.operands
[j
].isreg
14392 && inst
.operands
[j
].isvec
14393 && inst
.operands
[j
].issingle
14394 && !inst
.operands
[j
].isquad
14395 && ((inst
.vectype
.elems
== 1
14396 && inst
.vectype
.el
[0].size
== 16)
14397 || (inst
.vectype
.elems
> 1
14398 && inst
.vectype
.el
[j
].size
== 16)
14399 || (inst
.vectype
.elems
== 0
14400 && inst
.operands
[j
].vectype
.type
!= NT_invtype
14401 && inst
.operands
[j
].vectype
.size
== 16))))
14406 if (!(inst
.operands
[j
].isreg
14407 && inst
.operands
[j
].isvec
14408 && inst
.operands
[j
].issingle
14409 && !inst
.operands
[j
].isquad
14410 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14411 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14412 || (inst
.vectype
.elems
== 0
14413 && (inst
.operands
[j
].vectype
.size
== 32
14414 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14419 if (!(inst
.operands
[j
].isreg
14420 && inst
.operands
[j
].isvec
14421 && !inst
.operands
[j
].isquad
14422 && !inst
.operands
[j
].issingle
))
14427 if (!(inst
.operands
[j
].isreg
14428 && !inst
.operands
[j
].isvec
))
14433 if (!(inst
.operands
[j
].isreg
14434 && inst
.operands
[j
].isvec
14435 && inst
.operands
[j
].isquad
14436 && !inst
.operands
[j
].issingle
))
14441 if (!(!inst
.operands
[j
].isreg
14442 && !inst
.operands
[j
].isscalar
))
14447 if (!(!inst
.operands
[j
].isreg
14448 && inst
.operands
[j
].isscalar
))
14458 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14459 /* We've matched all the entries in the shape table, and we don't
14460 have any left over operands which have not been matched. */
14466 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14467 first_error (_("invalid instruction shape"));
14472 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14473 means the Q bit should be set). */
14476 neon_quad (enum neon_shape shape
)
14478 return neon_shape_class
[shape
] == SC_QUAD
;
14482 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14485 /* Allow modification to be made to types which are constrained to be
14486 based on the key element, based on bits set alongside N_EQK. */
14487 if ((typebits
& N_EQK
) != 0)
14489 if ((typebits
& N_HLF
) != 0)
14491 else if ((typebits
& N_DBL
) != 0)
14493 if ((typebits
& N_SGN
) != 0)
14494 *g_type
= NT_signed
;
14495 else if ((typebits
& N_UNS
) != 0)
14496 *g_type
= NT_unsigned
;
14497 else if ((typebits
& N_INT
) != 0)
14498 *g_type
= NT_integer
;
14499 else if ((typebits
& N_FLT
) != 0)
14500 *g_type
= NT_float
;
14501 else if ((typebits
& N_SIZ
) != 0)
14502 *g_type
= NT_untyped
;
14506 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14507 operand type, i.e. the single type specified in a Neon instruction when it
14508 is the only one given. */
14510 static struct neon_type_el
14511 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14513 struct neon_type_el dest
= *key
;
14515 gas_assert ((thisarg
& N_EQK
) != 0);
14517 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14522 /* Convert Neon type and size into compact bitmask representation. */
14524 static enum neon_type_mask
14525 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14532 case 8: return N_8
;
14533 case 16: return N_16
;
14534 case 32: return N_32
;
14535 case 64: return N_64
;
14543 case 8: return N_I8
;
14544 case 16: return N_I16
;
14545 case 32: return N_I32
;
14546 case 64: return N_I64
;
14554 case 16: return N_F16
;
14555 case 32: return N_F32
;
14556 case 64: return N_F64
;
14564 case 8: return N_P8
;
14565 case 16: return N_P16
;
14566 case 64: return N_P64
;
14574 case 8: return N_S8
;
14575 case 16: return N_S16
;
14576 case 32: return N_S32
;
14577 case 64: return N_S64
;
14585 case 8: return N_U8
;
14586 case 16: return N_U16
;
14587 case 32: return N_U32
;
14588 case 64: return N_U64
;
14599 /* Convert compact Neon bitmask type representation to a type and size. Only
14600 handles the case where a single bit is set in the mask. */
14603 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14604 enum neon_type_mask mask
)
14606 if ((mask
& N_EQK
) != 0)
14609 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14611 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14613 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14615 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14620 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14622 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14623 *type
= NT_unsigned
;
14624 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14625 *type
= NT_integer
;
14626 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14627 *type
= NT_untyped
;
14628 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14630 else if ((mask
& (N_F_ALL
)) != 0)
14638 /* Modify a bitmask of allowed types. This is only needed for type
14642 modify_types_allowed (unsigned allowed
, unsigned mods
)
14645 enum neon_el_type type
;
14651 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14653 if (el_type_of_type_chk (&type
, &size
,
14654 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14656 neon_modify_type_size (mods
, &type
, &size
);
14657 destmask
|= type_chk_of_el_type (type
, size
);
14664 /* Check type and return type classification.
14665 The manual states (paraphrase): If one datatype is given, it indicates the
14667 - the second operand, if there is one
14668 - the operand, if there is no second operand
14669 - the result, if there are no operands.
14670 This isn't quite good enough though, so we use a concept of a "key" datatype
14671 which is set on a per-instruction basis, which is the one which matters when
14672 only one data type is written.
14673 Note: this function has side-effects (e.g. filling in missing operands). All
14674 Neon instructions should call it before performing bit encoding. */
14676 static struct neon_type_el
14677 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14680 unsigned i
, pass
, key_el
= 0;
14681 unsigned types
[NEON_MAX_TYPE_ELS
];
14682 enum neon_el_type k_type
= NT_invtype
;
14683 unsigned k_size
= -1u;
14684 struct neon_type_el badtype
= {NT_invtype
, -1};
14685 unsigned key_allowed
= 0;
14687 /* Optional registers in Neon instructions are always (not) in operand 1.
14688 Fill in the missing operand here, if it was omitted. */
14689 if (els
> 1 && !inst
.operands
[1].present
)
14690 inst
.operands
[1] = inst
.operands
[0];
14692 /* Suck up all the varargs. */
14694 for (i
= 0; i
< els
; i
++)
14696 unsigned thisarg
= va_arg (ap
, unsigned);
14697 if (thisarg
== N_IGNORE_TYPE
)
14702 types
[i
] = thisarg
;
14703 if ((thisarg
& N_KEY
) != 0)
14708 if (inst
.vectype
.elems
> 0)
14709 for (i
= 0; i
< els
; i
++)
14710 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14712 first_error (_("types specified in both the mnemonic and operands"));
14716 /* Duplicate inst.vectype elements here as necessary.
14717 FIXME: No idea if this is exactly the same as the ARM assembler,
14718 particularly when an insn takes one register and one non-register
14720 if (inst
.vectype
.elems
== 1 && els
> 1)
14723 inst
.vectype
.elems
= els
;
14724 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14725 for (j
= 0; j
< els
; j
++)
14727 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14730 else if (inst
.vectype
.elems
== 0 && els
> 0)
14733 /* No types were given after the mnemonic, so look for types specified
14734 after each operand. We allow some flexibility here; as long as the
14735 "key" operand has a type, we can infer the others. */
14736 for (j
= 0; j
< els
; j
++)
14737 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14738 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14740 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14742 for (j
= 0; j
< els
; j
++)
14743 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14744 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14749 first_error (_("operand types can't be inferred"));
14753 else if (inst
.vectype
.elems
!= els
)
14755 first_error (_("type specifier has the wrong number of parts"));
14759 for (pass
= 0; pass
< 2; pass
++)
14761 for (i
= 0; i
< els
; i
++)
14763 unsigned thisarg
= types
[i
];
14764 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14765 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14766 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14767 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14769 /* Decay more-specific signed & unsigned types to sign-insensitive
14770 integer types if sign-specific variants are unavailable. */
14771 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14772 && (types_allowed
& N_SU_ALL
) == 0)
14773 g_type
= NT_integer
;
14775 /* If only untyped args are allowed, decay any more specific types to
14776 them. Some instructions only care about signs for some element
14777 sizes, so handle that properly. */
14778 if (((types_allowed
& N_UNT
) == 0)
14779 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14780 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14781 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14782 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14783 g_type
= NT_untyped
;
14787 if ((thisarg
& N_KEY
) != 0)
14791 key_allowed
= thisarg
& ~N_KEY
;
14793 /* Check architecture constraint on FP16 extension. */
14795 && k_type
== NT_float
14796 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14798 inst
.error
= _(BAD_FP16
);
14805 if ((thisarg
& N_VFP
) != 0)
14807 enum neon_shape_el regshape
;
14808 unsigned regwidth
, match
;
14810 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14813 first_error (_("invalid instruction shape"));
14816 regshape
= neon_shape_tab
[ns
].el
[i
];
14817 regwidth
= neon_shape_el_size
[regshape
];
14819 /* In VFP mode, operands must match register widths. If we
14820 have a key operand, use its width, else use the width of
14821 the current operand. */
14827 /* FP16 will use a single precision register. */
14828 if (regwidth
== 32 && match
== 16)
14830 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14834 inst
.error
= _(BAD_FP16
);
14839 if (regwidth
!= match
)
14841 first_error (_("operand size must match register width"));
14846 if ((thisarg
& N_EQK
) == 0)
14848 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14850 if ((given_type
& types_allowed
) == 0)
14852 first_error (BAD_SIMD_TYPE
);
14858 enum neon_el_type mod_k_type
= k_type
;
14859 unsigned mod_k_size
= k_size
;
14860 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14861 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14863 first_error (_("inconsistent types in Neon instruction"));
14871 return inst
.vectype
.el
[key_el
];
14874 /* Neon-style VFP instruction forwarding. */
14876 /* Thumb VFP instructions have 0xE in the condition field. */
14879 do_vfp_cond_or_thumb (void)
14884 inst
.instruction
|= 0xe0000000;
14886 inst
.instruction
|= inst
.cond
<< 28;
14889 /* Look up and encode a simple mnemonic, for use as a helper function for the
14890 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14891 etc. It is assumed that operand parsing has already been done, and that the
14892 operands are in the form expected by the given opcode (this isn't necessarily
14893 the same as the form in which they were parsed, hence some massaging must
14894 take place before this function is called).
14895 Checks current arch version against that in the looked-up opcode. */
14898 do_vfp_nsyn_opcode (const char *opname
)
14900 const struct asm_opcode
*opcode
;
14902 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14907 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14908 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14915 inst
.instruction
= opcode
->tvalue
;
14916 opcode
->tencode ();
14920 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14921 opcode
->aencode ();
14926 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14928 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14930 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14933 do_vfp_nsyn_opcode ("fadds");
14935 do_vfp_nsyn_opcode ("fsubs");
14937 /* ARMv8.2 fp16 instruction. */
14939 do_scalar_fp16_v82_encode ();
14944 do_vfp_nsyn_opcode ("faddd");
14946 do_vfp_nsyn_opcode ("fsubd");
14950 /* Check operand types to see if this is a VFP instruction, and if so call
14954 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14956 enum neon_shape rs
;
14957 struct neon_type_el et
;
14962 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14963 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14967 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14968 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14969 N_F_ALL
| N_KEY
| N_VFP
);
14976 if (et
.type
!= NT_invtype
)
14987 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14989 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14991 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14994 do_vfp_nsyn_opcode ("fmacs");
14996 do_vfp_nsyn_opcode ("fnmacs");
14998 /* ARMv8.2 fp16 instruction. */
15000 do_scalar_fp16_v82_encode ();
15005 do_vfp_nsyn_opcode ("fmacd");
15007 do_vfp_nsyn_opcode ("fnmacd");
15012 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
15014 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
15016 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15019 do_vfp_nsyn_opcode ("ffmas");
15021 do_vfp_nsyn_opcode ("ffnmas");
15023 /* ARMv8.2 fp16 instruction. */
15025 do_scalar_fp16_v82_encode ();
15030 do_vfp_nsyn_opcode ("ffmad");
15032 do_vfp_nsyn_opcode ("ffnmad");
15037 do_vfp_nsyn_mul (enum neon_shape rs
)
15039 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15041 do_vfp_nsyn_opcode ("fmuls");
15043 /* ARMv8.2 fp16 instruction. */
15045 do_scalar_fp16_v82_encode ();
15048 do_vfp_nsyn_opcode ("fmuld");
15052 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
15054 int is_neg
= (inst
.instruction
& 0x80) != 0;
15055 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
15057 if (rs
== NS_FF
|| rs
== NS_HH
)
15060 do_vfp_nsyn_opcode ("fnegs");
15062 do_vfp_nsyn_opcode ("fabss");
15064 /* ARMv8.2 fp16 instruction. */
15066 do_scalar_fp16_v82_encode ();
15071 do_vfp_nsyn_opcode ("fnegd");
15073 do_vfp_nsyn_opcode ("fabsd");
15077 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15078 insns belong to Neon, and are handled elsewhere. */
15081 do_vfp_nsyn_ldm_stm (int is_dbmode
)
15083 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
15087 do_vfp_nsyn_opcode ("fldmdbs");
15089 do_vfp_nsyn_opcode ("fldmias");
15094 do_vfp_nsyn_opcode ("fstmdbs");
15096 do_vfp_nsyn_opcode ("fstmias");
15101 do_vfp_nsyn_sqrt (void)
15103 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15104 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15106 if (rs
== NS_FF
|| rs
== NS_HH
)
15108 do_vfp_nsyn_opcode ("fsqrts");
15110 /* ARMv8.2 fp16 instruction. */
15112 do_scalar_fp16_v82_encode ();
15115 do_vfp_nsyn_opcode ("fsqrtd");
15119 do_vfp_nsyn_div (void)
15121 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15122 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15123 N_F_ALL
| N_KEY
| N_VFP
);
15125 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15127 do_vfp_nsyn_opcode ("fdivs");
15129 /* ARMv8.2 fp16 instruction. */
15131 do_scalar_fp16_v82_encode ();
15134 do_vfp_nsyn_opcode ("fdivd");
15138 do_vfp_nsyn_nmul (void)
15140 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15141 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15142 N_F_ALL
| N_KEY
| N_VFP
);
15144 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15146 NEON_ENCODE (SINGLE
, inst
);
15147 do_vfp_sp_dyadic ();
15149 /* ARMv8.2 fp16 instruction. */
15151 do_scalar_fp16_v82_encode ();
15155 NEON_ENCODE (DOUBLE
, inst
);
15156 do_vfp_dp_rd_rn_rm ();
15158 do_vfp_cond_or_thumb ();
15163 do_vfp_nsyn_cmp (void)
15165 enum neon_shape rs
;
15166 if (inst
.operands
[1].isreg
)
15168 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15169 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15171 if (rs
== NS_FF
|| rs
== NS_HH
)
15173 NEON_ENCODE (SINGLE
, inst
);
15174 do_vfp_sp_monadic ();
15178 NEON_ENCODE (DOUBLE
, inst
);
15179 do_vfp_dp_rd_rm ();
15184 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
15185 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
15187 switch (inst
.instruction
& 0x0fffffff)
15190 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
15193 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
15199 if (rs
== NS_FI
|| rs
== NS_HI
)
15201 NEON_ENCODE (SINGLE
, inst
);
15202 do_vfp_sp_compare_z ();
15206 NEON_ENCODE (DOUBLE
, inst
);
15210 do_vfp_cond_or_thumb ();
15212 /* ARMv8.2 fp16 instruction. */
15213 if (rs
== NS_HI
|| rs
== NS_HH
)
15214 do_scalar_fp16_v82_encode ();
15218 nsyn_insert_sp (void)
15220 inst
.operands
[1] = inst
.operands
[0];
15221 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
15222 inst
.operands
[0].reg
= REG_SP
;
15223 inst
.operands
[0].isreg
= 1;
15224 inst
.operands
[0].writeback
= 1;
15225 inst
.operands
[0].present
= 1;
15229 do_vfp_nsyn_push (void)
15233 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15234 _("register list must contain at least 1 and at most 16 "
15237 if (inst
.operands
[1].issingle
)
15238 do_vfp_nsyn_opcode ("fstmdbs");
15240 do_vfp_nsyn_opcode ("fstmdbd");
15244 do_vfp_nsyn_pop (void)
15248 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15249 _("register list must contain at least 1 and at most 16 "
15252 if (inst
.operands
[1].issingle
)
15253 do_vfp_nsyn_opcode ("fldmias");
15255 do_vfp_nsyn_opcode ("fldmiad");
15258 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15259 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15262 neon_dp_fixup (struct arm_it
* insn
)
15264 unsigned int i
= insn
->instruction
;
15269 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15280 insn
->instruction
= i
;
15283 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15287 neon_logbits (unsigned x
)
15289 return ffs (x
) - 4;
15292 #define LOW4(R) ((R) & 0xf)
15293 #define HI1(R) (((R) >> 4) & 1)
15296 mve_encode_qqr (int size
, int fp
)
15298 if (inst
.operands
[2].reg
== REG_SP
)
15299 as_tsktsk (MVE_BAD_SP
);
15300 else if (inst
.operands
[2].reg
== REG_PC
)
15301 as_tsktsk (MVE_BAD_PC
);
15306 if (((unsigned)inst
.instruction
) == 0xd00)
15307 inst
.instruction
= 0xee300f40;
15309 else if (((unsigned)inst
.instruction
) == 0x200d00)
15310 inst
.instruction
= 0xee301f40;
15312 /* Setting size which is 1 for F16 and 0 for F32. */
15313 inst
.instruction
|= (size
== 16) << 28;
15318 if (((unsigned)inst
.instruction
) == 0x800)
15319 inst
.instruction
= 0xee010f40;
15321 else if (((unsigned)inst
.instruction
) == 0x1000800)
15322 inst
.instruction
= 0xee011f40;
15323 /* Setting bits for size. */
15324 inst
.instruction
|= neon_logbits (size
) << 20;
15326 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15327 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15328 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15329 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15330 inst
.instruction
|= inst
.operands
[2].reg
;
15335 mve_encode_rqq (unsigned bit28
, unsigned size
)
15337 inst
.instruction
|= bit28
<< 28;
15338 inst
.instruction
|= neon_logbits (size
) << 20;
15339 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15340 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
15341 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15342 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15343 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15348 mve_encode_qqq (int ubit
, int size
)
15351 inst
.instruction
|= (ubit
!= 0) << 28;
15352 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15353 inst
.instruction
|= neon_logbits (size
) << 20;
15354 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15355 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15356 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15357 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15358 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15364 /* Encode insns with bit pattern:
15366 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15367 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15369 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15370 different meaning for some instruction. */
15373 neon_three_same (int isquad
, int ubit
, int size
)
15375 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15376 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15377 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15378 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15379 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15380 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15381 inst
.instruction
|= (isquad
!= 0) << 6;
15382 inst
.instruction
|= (ubit
!= 0) << 24;
15384 inst
.instruction
|= neon_logbits (size
) << 20;
15386 neon_dp_fixup (&inst
);
15389 /* Encode instructions of the form:
15391 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15392 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15394 Don't write size if SIZE == -1. */
15397 neon_two_same (int qbit
, int ubit
, int size
)
15399 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15400 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15401 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15402 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15403 inst
.instruction
|= (qbit
!= 0) << 6;
15404 inst
.instruction
|= (ubit
!= 0) << 24;
15407 inst
.instruction
|= neon_logbits (size
) << 18;
15409 neon_dp_fixup (&inst
);
15412 /* Neon instruction encoders, in approximate order of appearance. */
15415 do_neon_dyadic_i_su (void)
15417 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15418 struct neon_type_el et
= neon_check_type (3, rs
,
15419 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
15420 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15424 do_neon_dyadic_i64_su (void)
15426 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15427 struct neon_type_el et
= neon_check_type (3, rs
,
15428 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15429 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15433 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
15436 unsigned size
= et
.size
>> 3;
15437 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15438 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15439 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15440 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15441 inst
.instruction
|= (isquad
!= 0) << 6;
15442 inst
.instruction
|= immbits
<< 16;
15443 inst
.instruction
|= (size
>> 3) << 7;
15444 inst
.instruction
|= (size
& 0x7) << 19;
15446 inst
.instruction
|= (uval
!= 0) << 24;
15448 neon_dp_fixup (&inst
);
15452 do_neon_shl_imm (void)
15454 if (!inst
.operands
[2].isreg
)
15456 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15457 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
15458 int imm
= inst
.operands
[2].imm
;
15460 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15461 _("immediate out of range for shift"));
15462 NEON_ENCODE (IMMED
, inst
);
15463 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15467 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15468 struct neon_type_el et
= neon_check_type (3, rs
,
15469 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15472 /* VSHL/VQSHL 3-register variants have syntax such as:
15474 whereas other 3-register operations encoded by neon_three_same have
15477 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15479 tmp
= inst
.operands
[2].reg
;
15480 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15481 inst
.operands
[1].reg
= tmp
;
15482 NEON_ENCODE (INTEGER
, inst
);
15483 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15488 do_neon_qshl_imm (void)
15490 if (!inst
.operands
[2].isreg
)
15492 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15493 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15494 int imm
= inst
.operands
[2].imm
;
15496 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15497 _("immediate out of range for shift"));
15498 NEON_ENCODE (IMMED
, inst
);
15499 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15503 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15504 struct neon_type_el et
= neon_check_type (3, rs
,
15505 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15508 /* See note in do_neon_shl_imm. */
15509 tmp
= inst
.operands
[2].reg
;
15510 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15511 inst
.operands
[1].reg
= tmp
;
15512 NEON_ENCODE (INTEGER
, inst
);
15513 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15518 do_neon_rshl (void)
15520 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15521 struct neon_type_el et
= neon_check_type (3, rs
,
15522 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15525 tmp
= inst
.operands
[2].reg
;
15526 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15527 inst
.operands
[1].reg
= tmp
;
15528 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15532 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15534 /* Handle .I8 pseudo-instructions. */
15537 /* Unfortunately, this will make everything apart from zero out-of-range.
15538 FIXME is this the intended semantics? There doesn't seem much point in
15539 accepting .I8 if so. */
15540 immediate
|= immediate
<< 8;
15546 if (immediate
== (immediate
& 0x000000ff))
15548 *immbits
= immediate
;
15551 else if (immediate
== (immediate
& 0x0000ff00))
15553 *immbits
= immediate
>> 8;
15556 else if (immediate
== (immediate
& 0x00ff0000))
15558 *immbits
= immediate
>> 16;
15561 else if (immediate
== (immediate
& 0xff000000))
15563 *immbits
= immediate
>> 24;
15566 if ((immediate
& 0xffff) != (immediate
>> 16))
15567 goto bad_immediate
;
15568 immediate
&= 0xffff;
15571 if (immediate
== (immediate
& 0x000000ff))
15573 *immbits
= immediate
;
15576 else if (immediate
== (immediate
& 0x0000ff00))
15578 *immbits
= immediate
>> 8;
15583 first_error (_("immediate value out of range"));
15588 do_neon_logic (void)
15590 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15592 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15593 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15594 /* U bit and size field were set as part of the bitmask. */
15595 NEON_ENCODE (INTEGER
, inst
);
15596 neon_three_same (neon_quad (rs
), 0, -1);
15600 const int three_ops_form
= (inst
.operands
[2].present
15601 && !inst
.operands
[2].isreg
);
15602 const int immoperand
= (three_ops_form
? 2 : 1);
15603 enum neon_shape rs
= (three_ops_form
15604 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15605 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15606 struct neon_type_el et
= neon_check_type (2, rs
,
15607 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15608 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15612 if (et
.type
== NT_invtype
)
15615 if (three_ops_form
)
15616 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15617 _("first and second operands shall be the same register"));
15619 NEON_ENCODE (IMMED
, inst
);
15621 immbits
= inst
.operands
[immoperand
].imm
;
15624 /* .i64 is a pseudo-op, so the immediate must be a repeating
15626 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15627 inst
.operands
[immoperand
].reg
: 0))
15629 /* Set immbits to an invalid constant. */
15630 immbits
= 0xdeadbeef;
15637 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15641 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15645 /* Pseudo-instruction for VBIC. */
15646 neon_invert_size (&immbits
, 0, et
.size
);
15647 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15651 /* Pseudo-instruction for VORR. */
15652 neon_invert_size (&immbits
, 0, et
.size
);
15653 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15663 inst
.instruction
|= neon_quad (rs
) << 6;
15664 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15665 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15666 inst
.instruction
|= cmode
<< 8;
15667 neon_write_immbits (immbits
);
15669 neon_dp_fixup (&inst
);
15674 do_neon_bitfield (void)
15676 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15677 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15678 neon_three_same (neon_quad (rs
), 0, -1);
15682 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15685 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15686 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15688 if (et
.type
== NT_float
)
15690 NEON_ENCODE (FLOAT
, inst
);
15692 mve_encode_qqr (et
.size
, 1);
15694 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15698 NEON_ENCODE (INTEGER
, inst
);
15700 mve_encode_qqr (et
.size
, 0);
15702 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15708 do_neon_dyadic_if_su_d (void)
15710 /* This version only allow D registers, but that constraint is enforced during
15711 operand parsing so we don't need to do anything extra here. */
15712 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15716 do_neon_dyadic_if_i_d (void)
15718 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15719 affected if we specify unsigned args. */
15720 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15723 enum vfp_or_neon_is_neon_bits
15726 NEON_CHECK_ARCH
= 2,
15727 NEON_CHECK_ARCH8
= 4
15730 /* Call this function if an instruction which may have belonged to the VFP or
15731 Neon instruction sets, but turned out to be a Neon instruction (due to the
15732 operand types involved, etc.). We have to check and/or fix-up a couple of
15735 - Make sure the user hasn't attempted to make a Neon instruction
15737 - Alter the value in the condition code field if necessary.
15738 - Make sure that the arch supports Neon instructions.
15740 Which of these operations take place depends on bits from enum
15741 vfp_or_neon_is_neon_bits.
15743 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15744 current instruction's condition is COND_ALWAYS, the condition field is
15745 changed to inst.uncond_value. This is necessary because instructions shared
15746 between VFP and Neon may be conditional for the VFP variants only, and the
15747 unconditional Neon version must have, e.g., 0xF in the condition field. */
15750 vfp_or_neon_is_neon (unsigned check
)
15752 /* Conditions are always legal in Thumb mode (IT blocks). */
15753 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15755 if (inst
.cond
!= COND_ALWAYS
)
15757 first_error (_(BAD_COND
));
15760 if (inst
.uncond_value
!= -1)
15761 inst
.instruction
|= inst
.uncond_value
<< 28;
15765 if (((check
& NEON_CHECK_ARCH
) && !mark_feature_used (&fpu_neon_ext_v1
))
15766 || ((check
& NEON_CHECK_ARCH8
)
15767 && !mark_feature_used (&fpu_neon_ext_armv8
)))
15769 first_error (_(BAD_FPU
));
15777 check_simd_pred_availability (int fp
, unsigned check
)
15779 if (inst
.cond
> COND_ALWAYS
)
15781 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15783 inst
.error
= BAD_FPU
;
15786 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15788 else if (inst
.cond
< COND_ALWAYS
)
15790 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15791 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15792 else if (vfp_or_neon_is_neon (check
) == FAIL
)
15797 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fp
? mve_fp_ext
: mve_ext
)
15798 && vfp_or_neon_is_neon (check
) == FAIL
)
15801 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15802 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15808 do_mve_vstr_vldr_QI (int size
, int elsize
, int load
)
15810 constraint (size
< 32, BAD_ADDR_MODE
);
15811 constraint (size
!= elsize
, BAD_EL_TYPE
);
15812 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
15813 constraint (!inst
.operands
[1].preind
, BAD_ADDR_MODE
);
15814 constraint (load
&& inst
.operands
[0].reg
== inst
.operands
[1].reg
,
15815 _("destination register and offset register may not be the"
15818 int imm
= inst
.relocs
[0].exp
.X_add_number
;
15825 constraint ((imm
% (size
/ 8) != 0)
15826 || imm
> (0x7f << neon_logbits (size
)),
15827 (size
== 32) ? _("immediate must be a multiple of 4 in the"
15828 " range of +/-[0,508]")
15829 : _("immediate must be a multiple of 8 in the"
15830 " range of +/-[0,1016]"));
15831 inst
.instruction
|= 0x11 << 24;
15832 inst
.instruction
|= add
<< 23;
15833 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15834 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
15835 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15836 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15837 inst
.instruction
|= 1 << 12;
15838 inst
.instruction
|= (size
== 64) << 8;
15839 inst
.instruction
&= 0xffffff00;
15840 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15841 inst
.instruction
|= imm
>> neon_logbits (size
);
15845 do_mve_vstr_vldr_RQ (int size
, int elsize
, int load
)
15847 unsigned os
= inst
.operands
[1].imm
>> 5;
15848 constraint (os
!= 0 && size
== 8,
15849 _("can not shift offsets when accessing less than half-word"));
15850 constraint (os
&& os
!= neon_logbits (size
),
15851 _("shift immediate must be 1, 2 or 3 for half-word, word"
15852 " or double-word accesses respectively"));
15853 if (inst
.operands
[1].reg
== REG_PC
)
15854 as_tsktsk (MVE_BAD_PC
);
15859 constraint (elsize
>= 64, BAD_EL_TYPE
);
15862 constraint (elsize
< 16 || elsize
>= 64, BAD_EL_TYPE
);
15866 constraint (elsize
!= size
, BAD_EL_TYPE
);
15871 constraint (inst
.operands
[1].writeback
|| !inst
.operands
[1].preind
,
15875 constraint (inst
.operands
[0].reg
== (inst
.operands
[1].imm
& 0x1f),
15876 _("destination register and offset register may not be"
15878 constraint (size
== elsize
&& inst
.vectype
.el
[0].type
!= NT_unsigned
,
15880 constraint (inst
.vectype
.el
[0].type
!= NT_unsigned
15881 && inst
.vectype
.el
[0].type
!= NT_signed
, BAD_EL_TYPE
);
15882 inst
.instruction
|= (inst
.vectype
.el
[0].type
== NT_unsigned
) << 28;
15886 constraint (inst
.vectype
.el
[0].type
!= NT_untyped
, BAD_EL_TYPE
);
15889 inst
.instruction
|= 1 << 23;
15890 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15891 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
15892 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15893 inst
.instruction
|= neon_logbits (elsize
) << 7;
15894 inst
.instruction
|= HI1 (inst
.operands
[1].imm
) << 5;
15895 inst
.instruction
|= LOW4 (inst
.operands
[1].imm
);
15896 inst
.instruction
|= !!os
;
15900 do_mve_vstr_vldr_RI (int size
, int elsize
, int load
)
15902 enum neon_el_type type
= inst
.vectype
.el
[0].type
;
15904 constraint (size
>= 64, BAD_ADDR_MODE
);
15908 constraint (elsize
< 16 || elsize
>= 64, BAD_EL_TYPE
);
15911 constraint (elsize
!= size
, BAD_EL_TYPE
);
15918 constraint (elsize
!= size
&& type
!= NT_unsigned
15919 && type
!= NT_signed
, BAD_EL_TYPE
);
15923 constraint (elsize
!= size
&& type
!= NT_untyped
, BAD_EL_TYPE
);
15926 int imm
= inst
.relocs
[0].exp
.X_add_number
;
15934 if ((imm
% (size
/ 8) != 0) || imm
> (0x7f << neon_logbits (size
)))
15939 constraint (1, _("immediate must be in the range of +/-[0,127]"));
15942 constraint (1, _("immediate must be a multiple of 2 in the"
15943 " range of +/-[0,254]"));
15946 constraint (1, _("immediate must be a multiple of 4 in the"
15947 " range of +/-[0,508]"));
15952 if (size
!= elsize
)
15954 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
15955 constraint (inst
.operands
[0].reg
> 14,
15956 _("MVE vector register in the range [Q0..Q7] expected"));
15957 inst
.instruction
|= (load
&& type
== NT_unsigned
) << 28;
15958 inst
.instruction
|= (size
== 16) << 19;
15959 inst
.instruction
|= neon_logbits (elsize
) << 7;
15963 if (inst
.operands
[1].reg
== REG_PC
)
15964 as_tsktsk (MVE_BAD_PC
);
15965 else if (inst
.operands
[1].reg
== REG_SP
&& inst
.operands
[1].writeback
)
15966 as_tsktsk (MVE_BAD_SP
);
15967 inst
.instruction
|= 1 << 12;
15968 inst
.instruction
|= neon_logbits (size
) << 7;
15970 inst
.instruction
|= inst
.operands
[1].preind
<< 24;
15971 inst
.instruction
|= add
<< 23;
15972 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15973 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
15974 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
15975 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15976 inst
.instruction
&= 0xffffff80;
15977 inst
.instruction
|= imm
>> neon_logbits (size
);
15982 do_mve_vstr_vldr (void)
15987 if (inst
.cond
> COND_ALWAYS
)
15988 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15990 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15992 switch (inst
.instruction
)
15999 /* fall through. */
16005 /* fall through. */
16011 /* fall through. */
16017 /* fall through. */
16022 unsigned elsize
= inst
.vectype
.el
[0].size
;
16024 if (inst
.operands
[1].isquad
)
16026 /* We are dealing with [Q, imm]{!} cases. */
16027 do_mve_vstr_vldr_QI (size
, elsize
, load
);
16031 if (inst
.operands
[1].immisreg
== 2)
16033 /* We are dealing with [R, Q, {UXTW #os}] cases. */
16034 do_mve_vstr_vldr_RQ (size
, elsize
, load
);
16036 else if (!inst
.operands
[1].immisreg
)
16038 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
16039 do_mve_vstr_vldr_RI (size
, elsize
, load
);
16042 constraint (1, BAD_ADDR_MODE
);
16049 do_mve_vst_vld (void)
16051 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16054 constraint (!inst
.operands
[1].preind
|| inst
.relocs
[0].exp
.X_add_symbol
!= 0
16055 || inst
.relocs
[0].exp
.X_add_number
!= 0
16056 || inst
.operands
[1].immisreg
!= 0,
16058 constraint (inst
.vectype
.el
[0].size
> 32, BAD_EL_TYPE
);
16059 if (inst
.operands
[1].reg
== REG_PC
)
16060 as_tsktsk (MVE_BAD_PC
);
16061 else if (inst
.operands
[1].reg
== REG_SP
&& inst
.operands
[1].writeback
)
16062 as_tsktsk (MVE_BAD_SP
);
16065 /* These instructions are one of the "exceptions" mentioned in
16066 handle_pred_state. They are MVE instructions that are not VPT compatible
16067 and do not accept a VPT code, thus appending such a code is a syntax
16069 if (inst
.cond
> COND_ALWAYS
)
16070 first_error (BAD_SYNTAX
);
16071 /* If we append a scalar condition code we can set this to
16072 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
16073 else if (inst
.cond
< COND_ALWAYS
)
16074 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16076 inst
.pred_insn_type
= MVE_UNPREDICABLE_INSN
;
16078 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16079 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
16080 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16081 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16082 inst
.instruction
|= neon_logbits (inst
.vectype
.el
[0].size
) << 7;
16087 do_neon_dyadic_if_su (void)
16089 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
16090 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16093 if (check_simd_pred_availability (et
.type
== NT_float
,
16094 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16097 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
16101 do_neon_addsub_if_i (void)
16103 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
16104 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
16107 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
16108 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
,
16109 N_EQK
, N_IF_32
| N_I64
| N_KEY
);
16111 constraint (rs
== NS_QQR
&& et
.size
== 64, BAD_FPU
);
16112 /* If we are parsing Q registers and the element types match MVE, which NEON
16113 also supports, then we must check whether this is an instruction that can
16114 be used by both MVE/NEON. This distinction can be made based on whether
16115 they are predicated or not. */
16116 if ((rs
== NS_QQQ
|| rs
== NS_QQR
) && et
.size
!= 64)
16118 if (check_simd_pred_availability (et
.type
== NT_float
,
16119 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16124 /* If they are either in a D register or are using an unsupported. */
16126 && vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16130 /* The "untyped" case can't happen. Do this to stop the "U" bit being
16131 affected if we specify unsigned args. */
16132 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
16135 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
16137 V<op> A,B (A is operand 0, B is operand 2)
16142 so handle that case specially. */
16145 neon_exchange_operands (void)
16147 if (inst
.operands
[1].present
)
16149 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
16151 /* Swap operands[1] and operands[2]. */
16152 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
16153 inst
.operands
[1] = inst
.operands
[2];
16154 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
16159 inst
.operands
[1] = inst
.operands
[2];
16160 inst
.operands
[2] = inst
.operands
[0];
16165 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
16167 if (inst
.operands
[2].isreg
)
16170 neon_exchange_operands ();
16171 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
16175 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16176 struct neon_type_el et
= neon_check_type (2, rs
,
16177 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
16179 NEON_ENCODE (IMMED
, inst
);
16180 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16181 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16182 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16183 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16184 inst
.instruction
|= neon_quad (rs
) << 6;
16185 inst
.instruction
|= (et
.type
== NT_float
) << 10;
16186 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16188 neon_dp_fixup (&inst
);
16195 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
16199 do_neon_cmp_inv (void)
16201 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
16207 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
16210 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
16211 scalars, which are encoded in 5 bits, M : Rm.
16212 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
16213 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
16216 Dot Product instructions are similar to multiply instructions except elsize
16217 should always be 32.
16219 This function translates SCALAR, which is GAS's internal encoding of indexed
16220 scalar register, to raw encoding. There is also register and index range
16221 check based on ELSIZE. */
16224 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
16226 unsigned regno
= NEON_SCALAR_REG (scalar
);
16227 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
16232 if (regno
> 7 || elno
> 3)
16234 return regno
| (elno
<< 3);
16237 if (regno
> 15 || elno
> 1)
16239 return regno
| (elno
<< 4);
16243 first_error (_("scalar out of range for multiply instruction"));
16249 /* Encode multiply / multiply-accumulate scalar instructions. */
16252 neon_mul_mac (struct neon_type_el et
, int ubit
)
16256 /* Give a more helpful error message if we have an invalid type. */
16257 if (et
.type
== NT_invtype
)
16260 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
16261 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16262 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16263 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16264 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16265 inst
.instruction
|= LOW4 (scalar
);
16266 inst
.instruction
|= HI1 (scalar
) << 5;
16267 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16268 inst
.instruction
|= neon_logbits (et
.size
) << 20;
16269 inst
.instruction
|= (ubit
!= 0) << 24;
16271 neon_dp_fixup (&inst
);
16275 do_neon_mac_maybe_scalar (void)
16277 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
16280 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16283 if (inst
.operands
[2].isscalar
)
16285 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16286 struct neon_type_el et
= neon_check_type (3, rs
,
16287 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
16288 NEON_ENCODE (SCALAR
, inst
);
16289 neon_mul_mac (et
, neon_quad (rs
));
16293 /* The "untyped" case can't happen. Do this to stop the "U" bit being
16294 affected if we specify unsigned args. */
16295 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
16300 do_neon_fmac (void)
16302 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
16305 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16308 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
16314 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16315 struct neon_type_el et
= neon_check_type (3, rs
,
16316 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16317 neon_three_same (neon_quad (rs
), 0, et
.size
);
16320 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
16321 same types as the MAC equivalents. The polynomial type for this instruction
16322 is encoded the same as the integer type. */
16327 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
16330 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16333 if (inst
.operands
[2].isscalar
)
16334 do_neon_mac_maybe_scalar ();
16336 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
16340 do_neon_qdmulh (void)
16342 if (inst
.operands
[2].isscalar
)
16344 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16345 struct neon_type_el et
= neon_check_type (3, rs
,
16346 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16347 NEON_ENCODE (SCALAR
, inst
);
16348 neon_mul_mac (et
, neon_quad (rs
));
16352 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16353 struct neon_type_el et
= neon_check_type (3, rs
,
16354 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16355 NEON_ENCODE (INTEGER
, inst
);
16356 /* The U bit (rounding) comes from bit mask. */
16357 neon_three_same (neon_quad (rs
), 0, et
.size
);
16362 do_mve_vmull (void)
16365 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_DDS
,
16366 NS_QQS
, NS_QQQ
, NS_QQR
, NS_NULL
);
16367 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
16368 && inst
.cond
== COND_ALWAYS
16369 && ((unsigned)inst
.instruction
) == M_MNEM_vmullt
)
16374 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16375 N_SUF_32
| N_F64
| N_P8
16376 | N_P16
| N_I_MVE
| N_KEY
);
16377 if (((et
.type
== NT_poly
) && et
.size
== 8
16378 && ARM_CPU_IS_ANY (cpu_variant
))
16379 || (et
.type
== NT_integer
) || (et
.type
== NT_float
))
16386 constraint (rs
!= NS_QQQ
, BAD_FPU
);
16387 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16388 N_SU_32
| N_P8
| N_P16
| N_KEY
);
16390 /* We are dealing with MVE's vmullt. */
16392 && (inst
.operands
[0].reg
== inst
.operands
[1].reg
16393 || inst
.operands
[0].reg
== inst
.operands
[2].reg
))
16394 as_tsktsk (BAD_MVE_SRCDEST
);
16396 if (inst
.cond
> COND_ALWAYS
)
16397 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16399 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16401 if (et
.type
== NT_poly
)
16402 mve_encode_qqq (neon_logbits (et
.size
), 64);
16404 mve_encode_qqq (et
.type
== NT_unsigned
, et
.size
);
16409 inst
.instruction
= N_MNEM_vmul
;
16412 inst
.pred_insn_type
= INSIDE_IT_INSN
;
16417 do_mve_vabav (void)
16419 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
16424 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16427 struct neon_type_el et
= neon_check_type (2, NS_NULL
, N_EQK
, N_KEY
| N_S8
16428 | N_S16
| N_S32
| N_U8
| N_U16
16431 if (inst
.cond
> COND_ALWAYS
)
16432 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16434 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16436 mve_encode_rqq (et
.type
== NT_unsigned
, et
.size
);
16440 do_mve_vmladav (void)
16442 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
16443 struct neon_type_el et
= neon_check_type (3, rs
,
16444 N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
16446 if (et
.type
== NT_unsigned
16447 && (inst
.instruction
== M_MNEM_vmladavx
16448 || inst
.instruction
== M_MNEM_vmladavax
16449 || inst
.instruction
== M_MNEM_vmlsdav
16450 || inst
.instruction
== M_MNEM_vmlsdava
16451 || inst
.instruction
== M_MNEM_vmlsdavx
16452 || inst
.instruction
== M_MNEM_vmlsdavax
))
16453 first_error (BAD_SIMD_TYPE
);
16455 constraint (inst
.operands
[2].reg
> 14,
16456 _("MVE vector register in the range [Q0..Q7] expected"));
16458 if (inst
.cond
> COND_ALWAYS
)
16459 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16461 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16463 if (inst
.instruction
== M_MNEM_vmlsdav
16464 || inst
.instruction
== M_MNEM_vmlsdava
16465 || inst
.instruction
== M_MNEM_vmlsdavx
16466 || inst
.instruction
== M_MNEM_vmlsdavax
)
16467 inst
.instruction
|= (et
.size
== 8) << 28;
16469 inst
.instruction
|= (et
.size
== 8) << 8;
16471 mve_encode_rqq (et
.type
== NT_unsigned
, 64);
16472 inst
.instruction
|= (et
.size
== 32) << 16;
16476 do_neon_qrdmlah (void)
16478 /* Check we're on the correct architecture. */
16479 if (!mark_feature_used (&fpu_neon_ext_armv8
))
16481 _("instruction form not available on this architecture.");
16482 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
16484 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16485 record_feature_use (&fpu_neon_ext_v8_1
);
16488 if (inst
.operands
[2].isscalar
)
16490 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16491 struct neon_type_el et
= neon_check_type (3, rs
,
16492 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16493 NEON_ENCODE (SCALAR
, inst
);
16494 neon_mul_mac (et
, neon_quad (rs
));
16498 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16499 struct neon_type_el et
= neon_check_type (3, rs
,
16500 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16501 NEON_ENCODE (INTEGER
, inst
);
16502 /* The U bit (rounding) comes from bit mask. */
16503 neon_three_same (neon_quad (rs
), 0, et
.size
);
16508 do_neon_fcmp_absolute (void)
16510 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16511 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16512 N_F_16_32
| N_KEY
);
16513 /* Size field comes from bit mask. */
16514 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
16518 do_neon_fcmp_absolute_inv (void)
16520 neon_exchange_operands ();
16521 do_neon_fcmp_absolute ();
16525 do_neon_step (void)
16527 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16528 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16529 N_F_16_32
| N_KEY
);
16530 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
16534 do_neon_abs_neg (void)
16536 enum neon_shape rs
;
16537 struct neon_type_el et
;
16539 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
16542 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16543 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
16545 if (check_simd_pred_availability (et
.type
== NT_float
,
16546 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16549 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16550 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16551 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16552 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16553 inst
.instruction
|= neon_quad (rs
) << 6;
16554 inst
.instruction
|= (et
.type
== NT_float
) << 10;
16555 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16557 neon_dp_fixup (&inst
);
16563 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16564 struct neon_type_el et
= neon_check_type (2, rs
,
16565 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16566 int imm
= inst
.operands
[2].imm
;
16567 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16568 _("immediate out of range for insert"));
16569 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16575 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16576 struct neon_type_el et
= neon_check_type (2, rs
,
16577 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16578 int imm
= inst
.operands
[2].imm
;
16579 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16580 _("immediate out of range for insert"));
16581 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
16585 do_neon_qshlu_imm (void)
16587 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16588 struct neon_type_el et
= neon_check_type (2, rs
,
16589 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
16590 int imm
= inst
.operands
[2].imm
;
16591 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16592 _("immediate out of range for shift"));
16593 /* Only encodes the 'U present' variant of the instruction.
16594 In this case, signed types have OP (bit 8) set to 0.
16595 Unsigned types have OP set to 1. */
16596 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
16597 /* The rest of the bits are the same as other immediate shifts. */
16598 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16602 do_neon_qmovn (void)
16604 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16605 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16606 /* Saturating move where operands can be signed or unsigned, and the
16607 destination has the same signedness. */
16608 NEON_ENCODE (INTEGER
, inst
);
16609 if (et
.type
== NT_unsigned
)
16610 inst
.instruction
|= 0xc0;
16612 inst
.instruction
|= 0x80;
16613 neon_two_same (0, 1, et
.size
/ 2);
16617 do_neon_qmovun (void)
16619 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16620 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16621 /* Saturating move with unsigned results. Operands must be signed. */
16622 NEON_ENCODE (INTEGER
, inst
);
16623 neon_two_same (0, 1, et
.size
/ 2);
16627 do_neon_rshift_sat_narrow (void)
16629 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16630 or unsigned. If operands are unsigned, results must also be unsigned. */
16631 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16632 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16633 int imm
= inst
.operands
[2].imm
;
16634 /* This gets the bounds check, size encoding and immediate bits calculation
16638 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16639 VQMOVN.I<size> <Dd>, <Qm>. */
16642 inst
.operands
[2].present
= 0;
16643 inst
.instruction
= N_MNEM_vqmovn
;
16648 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16649 _("immediate out of range"));
16650 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
16654 do_neon_rshift_sat_narrow_u (void)
16656 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16657 or unsigned. If operands are unsigned, results must also be unsigned. */
16658 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16659 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16660 int imm
= inst
.operands
[2].imm
;
16661 /* This gets the bounds check, size encoding and immediate bits calculation
16665 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16666 VQMOVUN.I<size> <Dd>, <Qm>. */
16669 inst
.operands
[2].present
= 0;
16670 inst
.instruction
= N_MNEM_vqmovun
;
16675 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16676 _("immediate out of range"));
16677 /* FIXME: The manual is kind of unclear about what value U should have in
16678 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16680 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
16684 do_neon_movn (void)
16686 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16687 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16688 NEON_ENCODE (INTEGER
, inst
);
16689 neon_two_same (0, 1, et
.size
/ 2);
16693 do_neon_rshift_narrow (void)
16695 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16696 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16697 int imm
= inst
.operands
[2].imm
;
16698 /* This gets the bounds check, size encoding and immediate bits calculation
16702 /* If immediate is zero then we are a pseudo-instruction for
16703 VMOVN.I<size> <Dd>, <Qm> */
16706 inst
.operands
[2].present
= 0;
16707 inst
.instruction
= N_MNEM_vmovn
;
16712 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16713 _("immediate out of range for narrowing operation"));
16714 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
16718 do_neon_shll (void)
16720 /* FIXME: Type checking when lengthening. */
16721 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
16722 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
16723 unsigned imm
= inst
.operands
[2].imm
;
16725 if (imm
== et
.size
)
16727 /* Maximum shift variant. */
16728 NEON_ENCODE (INTEGER
, inst
);
16729 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16730 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16731 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16732 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16733 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16735 neon_dp_fixup (&inst
);
16739 /* A more-specific type check for non-max versions. */
16740 et
= neon_check_type (2, NS_QDI
,
16741 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16742 NEON_ENCODE (IMMED
, inst
);
16743 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
16747 /* Check the various types for the VCVT instruction, and return which version
16748 the current instruction is. */
16750 #define CVT_FLAVOUR_VAR \
16751 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16752 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16753 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16754 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16755 /* Half-precision conversions. */ \
16756 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16757 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16758 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16759 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16760 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16761 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16762 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16763 Compared with single/double precision variants, only the co-processor \
16764 field is different, so the encoding flow is reused here. */ \
16765 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16766 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16767 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16768 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16769 /* VFP instructions. */ \
16770 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16771 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16772 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16773 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16774 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16775 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16776 /* VFP instructions with bitshift. */ \
16777 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16778 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16779 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16780 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16781 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16782 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16783 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16784 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16786 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16787 neon_cvt_flavour_##C,
16789 /* The different types of conversions we can do. */
16790 enum neon_cvt_flavour
16793 neon_cvt_flavour_invalid
,
16794 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
16799 static enum neon_cvt_flavour
16800 get_neon_cvt_flavour (enum neon_shape rs
)
16802 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16803 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16804 if (et.type != NT_invtype) \
16806 inst.error = NULL; \
16807 return (neon_cvt_flavour_##C); \
16810 struct neon_type_el et
;
16811 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
16812 || rs
== NS_FF
) ? N_VFP
: 0;
16813 /* The instruction versions which take an immediate take one register
16814 argument, which is extended to the width of the full register. Thus the
16815 "source" and "destination" registers must have the same width. Hack that
16816 here by making the size equal to the key (wider, in this case) operand. */
16817 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
16821 return neon_cvt_flavour_invalid
;
16836 /* Neon-syntax VFP conversions. */
16839 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
16841 const char *opname
= 0;
16843 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
16844 || rs
== NS_FHI
|| rs
== NS_HFI
)
16846 /* Conversions with immediate bitshift. */
16847 const char *enc
[] =
16849 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16855 if (flavour
< (int) ARRAY_SIZE (enc
))
16857 opname
= enc
[flavour
];
16858 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
16859 _("operands 0 and 1 must be the same register"));
16860 inst
.operands
[1] = inst
.operands
[2];
16861 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
16866 /* Conversions without bitshift. */
16867 const char *enc
[] =
16869 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16875 if (flavour
< (int) ARRAY_SIZE (enc
))
16876 opname
= enc
[flavour
];
16880 do_vfp_nsyn_opcode (opname
);
16882 /* ARMv8.2 fp16 VCVT instruction. */
16883 if (flavour
== neon_cvt_flavour_s32_f16
16884 || flavour
== neon_cvt_flavour_u32_f16
16885 || flavour
== neon_cvt_flavour_f16_u32
16886 || flavour
== neon_cvt_flavour_f16_s32
)
16887 do_scalar_fp16_v82_encode ();
16891 do_vfp_nsyn_cvtz (void)
16893 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
16894 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16895 const char *enc
[] =
16897 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16903 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
16904 do_vfp_nsyn_opcode (enc
[flavour
]);
16908 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
16909 enum neon_cvt_mode mode
)
16914 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16915 D register operands. */
16916 if (flavour
== neon_cvt_flavour_s32_f64
16917 || flavour
== neon_cvt_flavour_u32_f64
)
16918 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16921 if (flavour
== neon_cvt_flavour_s32_f16
16922 || flavour
== neon_cvt_flavour_u32_f16
)
16923 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
16926 set_pred_insn_type (OUTSIDE_PRED_INSN
);
16930 case neon_cvt_flavour_s32_f64
:
16934 case neon_cvt_flavour_s32_f32
:
16938 case neon_cvt_flavour_s32_f16
:
16942 case neon_cvt_flavour_u32_f64
:
16946 case neon_cvt_flavour_u32_f32
:
16950 case neon_cvt_flavour_u32_f16
:
16955 first_error (_("invalid instruction shape"));
16961 case neon_cvt_mode_a
: rm
= 0; break;
16962 case neon_cvt_mode_n
: rm
= 1; break;
16963 case neon_cvt_mode_p
: rm
= 2; break;
16964 case neon_cvt_mode_m
: rm
= 3; break;
16965 default: first_error (_("invalid rounding mode")); return;
16968 NEON_ENCODE (FPV8
, inst
);
16969 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16970 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16971 inst
.instruction
|= sz
<< 8;
16973 /* ARMv8.2 fp16 VCVT instruction. */
16974 if (flavour
== neon_cvt_flavour_s32_f16
16975 ||flavour
== neon_cvt_flavour_u32_f16
)
16976 do_scalar_fp16_v82_encode ();
16977 inst
.instruction
|= op
<< 7;
16978 inst
.instruction
|= rm
<< 16;
16979 inst
.instruction
|= 0xf0000000;
16980 inst
.is_neon
= TRUE
;
16984 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16986 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16987 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16988 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16990 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16992 if (flavour
== neon_cvt_flavour_invalid
)
16995 /* PR11109: Handle round-to-zero for VCVT conversions. */
16996 if (mode
== neon_cvt_mode_z
16997 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
16998 && (flavour
== neon_cvt_flavour_s16_f16
16999 || flavour
== neon_cvt_flavour_u16_f16
17000 || flavour
== neon_cvt_flavour_s32_f32
17001 || flavour
== neon_cvt_flavour_u32_f32
17002 || flavour
== neon_cvt_flavour_s32_f64
17003 || flavour
== neon_cvt_flavour_u32_f64
)
17004 && (rs
== NS_FD
|| rs
== NS_FF
))
17006 do_vfp_nsyn_cvtz ();
17010 /* ARMv8.2 fp16 VCVT conversions. */
17011 if (mode
== neon_cvt_mode_z
17012 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
17013 && (flavour
== neon_cvt_flavour_s32_f16
17014 || flavour
== neon_cvt_flavour_u32_f16
)
17017 do_vfp_nsyn_cvtz ();
17018 do_scalar_fp16_v82_encode ();
17022 /* VFP rather than Neon conversions. */
17023 if (flavour
>= neon_cvt_flavour_first_fp
)
17025 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
17026 do_vfp_nsyn_cvt (rs
, flavour
);
17028 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
17039 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
17040 0x0000100, 0x1000100, 0x0, 0x1000000};
17042 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17045 /* Fixed-point conversion with #0 immediate is encoded as an
17046 integer conversion. */
17047 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
17049 NEON_ENCODE (IMMED
, inst
);
17050 if (flavour
!= neon_cvt_flavour_invalid
)
17051 inst
.instruction
|= enctab
[flavour
];
17052 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17053 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17054 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17055 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17056 inst
.instruction
|= neon_quad (rs
) << 6;
17057 inst
.instruction
|= 1 << 21;
17058 if (flavour
< neon_cvt_flavour_s16_f16
)
17060 inst
.instruction
|= 1 << 21;
17061 immbits
= 32 - inst
.operands
[2].imm
;
17062 inst
.instruction
|= immbits
<< 16;
17066 inst
.instruction
|= 3 << 20;
17067 immbits
= 16 - inst
.operands
[2].imm
;
17068 inst
.instruction
|= immbits
<< 16;
17069 inst
.instruction
&= ~(1 << 9);
17072 neon_dp_fixup (&inst
);
17078 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
17080 NEON_ENCODE (FLOAT
, inst
);
17081 set_pred_insn_type (OUTSIDE_PRED_INSN
);
17083 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17086 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17087 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17088 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17089 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17090 inst
.instruction
|= neon_quad (rs
) << 6;
17091 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
17092 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
17093 inst
.instruction
|= mode
<< 8;
17094 if (flavour
== neon_cvt_flavour_u16_f16
17095 || flavour
== neon_cvt_flavour_s16_f16
)
17096 /* Mask off the original size bits and reencode them. */
17097 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
17100 inst
.instruction
|= 0xfc000000;
17102 inst
.instruction
|= 0xf0000000;
17108 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
17109 0x100, 0x180, 0x0, 0x080};
17111 NEON_ENCODE (INTEGER
, inst
);
17113 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17116 if (flavour
!= neon_cvt_flavour_invalid
)
17117 inst
.instruction
|= enctab
[flavour
];
17119 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17120 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17121 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17122 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17123 inst
.instruction
|= neon_quad (rs
) << 6;
17124 if (flavour
>= neon_cvt_flavour_s16_f16
17125 && flavour
<= neon_cvt_flavour_f16_u16
)
17126 /* Half precision. */
17127 inst
.instruction
|= 1 << 18;
17129 inst
.instruction
|= 2 << 18;
17131 neon_dp_fixup (&inst
);
17136 /* Half-precision conversions for Advanced SIMD -- neon. */
17139 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17143 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
17145 as_bad (_("operand size must match register width"));
17150 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
17152 as_bad (_("operand size must match register width"));
17157 inst
.instruction
= 0x3b60600;
17159 inst
.instruction
= 0x3b60700;
17161 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17162 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17163 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17164 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17165 neon_dp_fixup (&inst
);
17169 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
17170 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
17171 do_vfp_nsyn_cvt (rs
, flavour
);
17173 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
17178 do_neon_cvtr (void)
17180 do_neon_cvt_1 (neon_cvt_mode_x
);
17186 do_neon_cvt_1 (neon_cvt_mode_z
);
17190 do_neon_cvta (void)
17192 do_neon_cvt_1 (neon_cvt_mode_a
);
17196 do_neon_cvtn (void)
17198 do_neon_cvt_1 (neon_cvt_mode_n
);
17202 do_neon_cvtp (void)
17204 do_neon_cvt_1 (neon_cvt_mode_p
);
17208 do_neon_cvtm (void)
17210 do_neon_cvt_1 (neon_cvt_mode_m
);
17214 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
17217 mark_feature_used (&fpu_vfp_ext_armv8
);
17219 encode_arm_vfp_reg (inst
.operands
[0].reg
,
17220 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
17221 encode_arm_vfp_reg (inst
.operands
[1].reg
,
17222 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
17223 inst
.instruction
|= to
? 0x10000 : 0;
17224 inst
.instruction
|= t
? 0x80 : 0;
17225 inst
.instruction
|= is_double
? 0x100 : 0;
17226 do_vfp_cond_or_thumb ();
17230 do_neon_cvttb_1 (bfd_boolean t
)
17232 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
17233 NS_DF
, NS_DH
, NS_NULL
);
17237 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
17240 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
17242 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
17245 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
17247 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
17249 /* The VCVTB and VCVTT instructions with D-register operands
17250 don't work for SP only targets. */
17251 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17255 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
17257 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
17259 /* The VCVTB and VCVTT instructions with D-register operands
17260 don't work for SP only targets. */
17261 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17265 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
17272 do_neon_cvtb (void)
17274 do_neon_cvttb_1 (FALSE
);
17279 do_neon_cvtt (void)
17281 do_neon_cvttb_1 (TRUE
);
17285 neon_move_immediate (void)
17287 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
17288 struct neon_type_el et
= neon_check_type (2, rs
,
17289 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
17290 unsigned immlo
, immhi
= 0, immbits
;
17291 int op
, cmode
, float_p
;
17293 constraint (et
.type
== NT_invtype
,
17294 _("operand size must be specified for immediate VMOV"));
17296 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
17297 op
= (inst
.instruction
& (1 << 5)) != 0;
17299 immlo
= inst
.operands
[1].imm
;
17300 if (inst
.operands
[1].regisimm
)
17301 immhi
= inst
.operands
[1].reg
;
17303 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
17304 _("immediate has bits set outside the operand size"));
17306 float_p
= inst
.operands
[1].immisfloat
;
17308 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
17309 et
.size
, et
.type
)) == FAIL
)
17311 /* Invert relevant bits only. */
17312 neon_invert_size (&immlo
, &immhi
, et
.size
);
17313 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
17314 with one or the other; those cases are caught by
17315 neon_cmode_for_move_imm. */
17317 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
17318 &op
, et
.size
, et
.type
)) == FAIL
)
17320 first_error (_("immediate out of range"));
17325 inst
.instruction
&= ~(1 << 5);
17326 inst
.instruction
|= op
<< 5;
17328 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17329 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17330 inst
.instruction
|= neon_quad (rs
) << 6;
17331 inst
.instruction
|= cmode
<< 8;
17333 neon_write_immbits (immbits
);
17339 if (inst
.operands
[1].isreg
)
17341 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17343 NEON_ENCODE (INTEGER
, inst
);
17344 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17345 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17346 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17347 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17348 inst
.instruction
|= neon_quad (rs
) << 6;
17352 NEON_ENCODE (IMMED
, inst
);
17353 neon_move_immediate ();
17356 neon_dp_fixup (&inst
);
17359 /* Encode instructions of form:
17361 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
17362 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
17365 neon_mixed_length (struct neon_type_el et
, unsigned size
)
17367 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17368 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17369 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17370 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17371 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17372 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17373 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
17374 inst
.instruction
|= neon_logbits (size
) << 20;
17376 neon_dp_fixup (&inst
);
17380 do_neon_dyadic_long (void)
17382 enum neon_shape rs
= neon_select_shape (NS_QDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
17385 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH
| NEON_CHECK_CC
) == FAIL
)
17388 NEON_ENCODE (INTEGER
, inst
);
17389 /* FIXME: Type checking for lengthening op. */
17390 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17391 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
17392 neon_mixed_length (et
, et
.size
);
17394 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
17395 && (inst
.cond
== 0xf || inst
.cond
== 0x10))
17397 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
17398 in an IT block with le/lt conditions. */
17400 if (inst
.cond
== 0xf)
17402 else if (inst
.cond
== 0x10)
17405 inst
.pred_insn_type
= INSIDE_IT_INSN
;
17407 if (inst
.instruction
== N_MNEM_vaddl
)
17409 inst
.instruction
= N_MNEM_vadd
;
17410 do_neon_addsub_if_i ();
17412 else if (inst
.instruction
== N_MNEM_vsubl
)
17414 inst
.instruction
= N_MNEM_vsub
;
17415 do_neon_addsub_if_i ();
17417 else if (inst
.instruction
== N_MNEM_vabdl
)
17419 inst
.instruction
= N_MNEM_vabd
;
17420 do_neon_dyadic_if_su ();
17424 first_error (BAD_FPU
);
17428 do_neon_abal (void)
17430 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17431 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
17432 neon_mixed_length (et
, et
.size
);
17436 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
17438 if (inst
.operands
[2].isscalar
)
17440 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
17441 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
17442 NEON_ENCODE (SCALAR
, inst
);
17443 neon_mul_mac (et
, et
.type
== NT_unsigned
);
17447 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17448 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
17449 NEON_ENCODE (INTEGER
, inst
);
17450 neon_mixed_length (et
, et
.size
);
17455 do_neon_mac_maybe_scalar_long (void)
17457 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
17460 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
17461 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
17464 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
17466 unsigned regno
= NEON_SCALAR_REG (scalar
);
17467 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
17471 if (regno
> 7 || elno
> 3)
17474 return ((regno
& 0x7)
17475 | ((elno
& 0x1) << 3)
17476 | (((elno
>> 1) & 0x1) << 5));
17480 if (regno
> 15 || elno
> 1)
17483 return (((regno
& 0x1) << 5)
17484 | ((regno
>> 1) & 0x7)
17485 | ((elno
& 0x1) << 3));
17489 first_error (_("scalar out of range for multiply instruction"));
17494 do_neon_fmac_maybe_scalar_long (int subtype
)
17496 enum neon_shape rs
;
17498 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17499 field (bits[21:20]) has different meaning. For scalar index variant, it's
17500 used to differentiate add and subtract, otherwise it's with fixed value
17504 if (inst
.cond
!= COND_ALWAYS
)
17505 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17506 "behaviour is UNPREDICTABLE"));
17508 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
17511 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17514 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17515 be a scalar index register. */
17516 if (inst
.operands
[2].isscalar
)
17518 high8
= 0xfe000000;
17521 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
17525 high8
= 0xfc000000;
17528 inst
.instruction
|= (0x1 << 23);
17529 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
17532 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
17534 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17535 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17536 so we simply pass -1 as size. */
17537 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
17538 neon_three_same (quad_p
, 0, size
);
17540 /* Undo neon_dp_fixup. Redo the high eight bits. */
17541 inst
.instruction
&= 0x00ffffff;
17542 inst
.instruction
|= high8
;
17544 #define LOW1(R) ((R) & 0x1)
17545 #define HI4(R) (((R) >> 1) & 0xf)
17546 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17547 whether the instruction is in Q form and whether Vm is a scalar indexed
17549 if (inst
.operands
[2].isscalar
)
17552 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
17553 inst
.instruction
&= 0xffffffd0;
17554 inst
.instruction
|= rm
;
17558 /* Redo Rn as well. */
17559 inst
.instruction
&= 0xfff0ff7f;
17560 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17561 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17566 /* Redo Rn and Rm. */
17567 inst
.instruction
&= 0xfff0ff50;
17568 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17569 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17570 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
17571 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
17576 do_neon_vfmal (void)
17578 return do_neon_fmac_maybe_scalar_long (0);
17582 do_neon_vfmsl (void)
17584 return do_neon_fmac_maybe_scalar_long (1);
17588 do_neon_dyadic_wide (void)
17590 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
17591 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17592 neon_mixed_length (et
, et
.size
);
17596 do_neon_dyadic_narrow (void)
17598 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17599 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
17600 /* Operand sign is unimportant, and the U bit is part of the opcode,
17601 so force the operand type to integer. */
17602 et
.type
= NT_integer
;
17603 neon_mixed_length (et
, et
.size
/ 2);
17607 do_neon_mul_sat_scalar_long (void)
17609 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
17613 do_neon_vmull (void)
17615 if (inst
.operands
[2].isscalar
)
17616 do_neon_mac_maybe_scalar_long ();
17619 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17620 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
17622 if (et
.type
== NT_poly
)
17623 NEON_ENCODE (POLY
, inst
);
17625 NEON_ENCODE (INTEGER
, inst
);
17627 /* For polynomial encoding the U bit must be zero, and the size must
17628 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17629 obviously, as 0b10). */
17632 /* Check we're on the correct architecture. */
17633 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
17635 _("Instruction form not available on this architecture.");
17640 neon_mixed_length (et
, et
.size
);
17647 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17648 struct neon_type_el et
= neon_check_type (3, rs
,
17649 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
17650 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
17652 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
17653 _("shift out of range"));
17654 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17655 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17656 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17657 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17658 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17659 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17660 inst
.instruction
|= neon_quad (rs
) << 6;
17661 inst
.instruction
|= imm
<< 8;
17663 neon_dp_fixup (&inst
);
17669 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17670 struct neon_type_el et
= neon_check_type (2, rs
,
17671 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17672 unsigned op
= (inst
.instruction
>> 7) & 3;
17673 /* N (width of reversed regions) is encoded as part of the bitmask. We
17674 extract it here to check the elements to be reversed are smaller.
17675 Otherwise we'd get a reserved instruction. */
17676 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
17677 gas_assert (elsize
!= 0);
17678 constraint (et
.size
>= elsize
,
17679 _("elements must be smaller than reversal region"));
17680 neon_two_same (neon_quad (rs
), 1, et
.size
);
17686 if (inst
.operands
[1].isscalar
)
17688 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
17689 struct neon_type_el et
= neon_check_type (2, rs
,
17690 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17691 unsigned sizebits
= et
.size
>> 3;
17692 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17693 int logsize
= neon_logbits (et
.size
);
17694 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
17696 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
17699 NEON_ENCODE (SCALAR
, inst
);
17700 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17701 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17702 inst
.instruction
|= LOW4 (dm
);
17703 inst
.instruction
|= HI1 (dm
) << 5;
17704 inst
.instruction
|= neon_quad (rs
) << 6;
17705 inst
.instruction
|= x
<< 17;
17706 inst
.instruction
|= sizebits
<< 16;
17708 neon_dp_fixup (&inst
);
17712 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
17713 struct neon_type_el et
= neon_check_type (2, rs
,
17714 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17715 /* Duplicate ARM register to lanes of vector. */
17716 NEON_ENCODE (ARMREG
, inst
);
17719 case 8: inst
.instruction
|= 0x400000; break;
17720 case 16: inst
.instruction
|= 0x000020; break;
17721 case 32: inst
.instruction
|= 0x000000; break;
17724 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17725 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
17726 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
17727 inst
.instruction
|= neon_quad (rs
) << 21;
17728 /* The encoding for this instruction is identical for the ARM and Thumb
17729 variants, except for the condition field. */
17730 do_vfp_cond_or_thumb ();
17734 /* VMOV has particularly many variations. It can be one of:
17735 0. VMOV<c><q> <Qd>, <Qm>
17736 1. VMOV<c><q> <Dd>, <Dm>
17737 (Register operations, which are VORR with Rm = Rn.)
17738 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17739 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17741 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17742 (ARM register to scalar.)
17743 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17744 (Two ARM registers to vector.)
17745 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17746 (Scalar to ARM register.)
17747 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17748 (Vector to two ARM registers.)
17749 8. VMOV.F32 <Sd>, <Sm>
17750 9. VMOV.F64 <Dd>, <Dm>
17751 (VFP register moves.)
17752 10. VMOV.F32 <Sd>, #imm
17753 11. VMOV.F64 <Dd>, #imm
17754 (VFP float immediate load.)
17755 12. VMOV <Rd>, <Sm>
17756 (VFP single to ARM reg.)
17757 13. VMOV <Sd>, <Rm>
17758 (ARM reg to VFP single.)
17759 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17760 (Two ARM regs to two VFP singles.)
17761 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17762 (Two VFP singles to two ARM regs.)
17764 These cases can be disambiguated using neon_select_shape, except cases 1/9
17765 and 3/11 which depend on the operand type too.
17767 All the encoded bits are hardcoded by this function.
17769 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17770 Cases 5, 7 may be used with VFPv2 and above.
17772 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17773 can specify a type where it doesn't make sense to, and is ignored). */
17778 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
17779 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
17780 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
17781 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
17782 struct neon_type_el et
;
17783 const char *ldconst
= 0;
17787 case NS_DD
: /* case 1/9. */
17788 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17789 /* It is not an error here if no type is given. */
17791 if (et
.type
== NT_float
&& et
.size
== 64)
17793 do_vfp_nsyn_opcode ("fcpyd");
17796 /* fall through. */
17798 case NS_QQ
: /* case 0/1. */
17800 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17802 /* The architecture manual I have doesn't explicitly state which
17803 value the U bit should have for register->register moves, but
17804 the equivalent VORR instruction has U = 0, so do that. */
17805 inst
.instruction
= 0x0200110;
17806 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17807 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17808 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17809 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17810 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17811 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17812 inst
.instruction
|= neon_quad (rs
) << 6;
17814 neon_dp_fixup (&inst
);
17818 case NS_DI
: /* case 3/11. */
17819 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17821 if (et
.type
== NT_float
&& et
.size
== 64)
17823 /* case 11 (fconstd). */
17824 ldconst
= "fconstd";
17825 goto encode_fconstd
;
17827 /* fall through. */
17829 case NS_QI
: /* case 2/3. */
17830 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17832 inst
.instruction
= 0x0800010;
17833 neon_move_immediate ();
17834 neon_dp_fixup (&inst
);
17837 case NS_SR
: /* case 4. */
17839 unsigned bcdebits
= 0;
17841 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
17842 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
17844 /* .<size> is optional here, defaulting to .32. */
17845 if (inst
.vectype
.elems
== 0
17846 && inst
.operands
[0].vectype
.type
== NT_invtype
17847 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17849 inst
.vectype
.el
[0].type
= NT_untyped
;
17850 inst
.vectype
.el
[0].size
= 32;
17851 inst
.vectype
.elems
= 1;
17854 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17855 logsize
= neon_logbits (et
.size
);
17857 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17859 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17860 && et
.size
!= 32, _(BAD_FPU
));
17861 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17862 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17866 case 8: bcdebits
= 0x8; break;
17867 case 16: bcdebits
= 0x1; break;
17868 case 32: bcdebits
= 0x0; break;
17872 bcdebits
|= x
<< logsize
;
17874 inst
.instruction
= 0xe000b10;
17875 do_vfp_cond_or_thumb ();
17876 inst
.instruction
|= LOW4 (dn
) << 16;
17877 inst
.instruction
|= HI1 (dn
) << 7;
17878 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17879 inst
.instruction
|= (bcdebits
& 3) << 5;
17880 inst
.instruction
|= (bcdebits
>> 2) << 21;
17884 case NS_DRR
: /* case 5 (fmdrr). */
17885 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17888 inst
.instruction
= 0xc400b10;
17889 do_vfp_cond_or_thumb ();
17890 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
17891 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
17892 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17893 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
17896 case NS_RS
: /* case 6. */
17899 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17900 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
17901 unsigned abcdebits
= 0;
17903 /* .<dt> is optional here, defaulting to .32. */
17904 if (inst
.vectype
.elems
== 0
17905 && inst
.operands
[0].vectype
.type
== NT_invtype
17906 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17908 inst
.vectype
.el
[0].type
= NT_untyped
;
17909 inst
.vectype
.el
[0].size
= 32;
17910 inst
.vectype
.elems
= 1;
17913 et
= neon_check_type (2, NS_NULL
,
17914 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
17915 logsize
= neon_logbits (et
.size
);
17917 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17919 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17920 && et
.size
!= 32, _(BAD_FPU
));
17921 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17922 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17926 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
17927 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
17928 case 32: abcdebits
= 0x00; break;
17932 abcdebits
|= x
<< logsize
;
17933 inst
.instruction
= 0xe100b10;
17934 do_vfp_cond_or_thumb ();
17935 inst
.instruction
|= LOW4 (dn
) << 16;
17936 inst
.instruction
|= HI1 (dn
) << 7;
17937 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17938 inst
.instruction
|= (abcdebits
& 3) << 5;
17939 inst
.instruction
|= (abcdebits
>> 2) << 21;
17943 case NS_RRD
: /* case 7 (fmrrd). */
17944 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17947 inst
.instruction
= 0xc500b10;
17948 do_vfp_cond_or_thumb ();
17949 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17950 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17951 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17952 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17955 case NS_FF
: /* case 8 (fcpys). */
17956 do_vfp_nsyn_opcode ("fcpys");
17960 case NS_FI
: /* case 10 (fconsts). */
17961 ldconst
= "fconsts";
17963 if (!inst
.operands
[1].immisfloat
)
17966 /* Immediate has to fit in 8 bits so float is enough. */
17967 float imm
= (float) inst
.operands
[1].imm
;
17968 memcpy (&new_imm
, &imm
, sizeof (float));
17969 /* But the assembly may have been written to provide an integer
17970 bit pattern that equates to a float, so check that the
17971 conversion has worked. */
17972 if (is_quarter_float (new_imm
))
17974 if (is_quarter_float (inst
.operands
[1].imm
))
17975 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17977 inst
.operands
[1].imm
= new_imm
;
17978 inst
.operands
[1].immisfloat
= 1;
17982 if (is_quarter_float (inst
.operands
[1].imm
))
17984 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
17985 do_vfp_nsyn_opcode (ldconst
);
17987 /* ARMv8.2 fp16 vmov.f16 instruction. */
17989 do_scalar_fp16_v82_encode ();
17992 first_error (_("immediate out of range"));
17996 case NS_RF
: /* case 12 (fmrs). */
17997 do_vfp_nsyn_opcode ("fmrs");
17998 /* ARMv8.2 fp16 vmov.f16 instruction. */
18000 do_scalar_fp16_v82_encode ();
18004 case NS_FR
: /* case 13 (fmsr). */
18005 do_vfp_nsyn_opcode ("fmsr");
18006 /* ARMv8.2 fp16 vmov.f16 instruction. */
18008 do_scalar_fp16_v82_encode ();
18011 /* The encoders for the fmrrs and fmsrr instructions expect three operands
18012 (one of which is a list), but we have parsed four. Do some fiddling to
18013 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
18015 case NS_RRFF
: /* case 14 (fmrrs). */
18016 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
18017 _("VFP registers must be adjacent"));
18018 inst
.operands
[2].imm
= 2;
18019 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
18020 do_vfp_nsyn_opcode ("fmrrs");
18023 case NS_FFRR
: /* case 15 (fmsrr). */
18024 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
18025 _("VFP registers must be adjacent"));
18026 inst
.operands
[1] = inst
.operands
[2];
18027 inst
.operands
[2] = inst
.operands
[3];
18028 inst
.operands
[0].imm
= 2;
18029 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
18030 do_vfp_nsyn_opcode ("fmsrr");
18034 /* neon_select_shape has determined that the instruction
18035 shape is wrong and has already set the error message. */
18044 do_neon_rshift_round_imm (void)
18046 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
18047 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
18048 int imm
= inst
.operands
[2].imm
;
18050 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
18053 inst
.operands
[2].present
= 0;
18058 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
18059 _("immediate out of range for shift"));
18060 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
18065 do_neon_movhf (void)
18067 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
18068 constraint (rs
!= NS_HH
, _("invalid suffix"));
18070 if (inst
.cond
!= COND_ALWAYS
)
18074 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
18075 " the behaviour is UNPREDICTABLE"));
18079 inst
.error
= BAD_COND
;
18084 do_vfp_sp_monadic ();
18087 inst
.instruction
|= 0xf0000000;
18091 do_neon_movl (void)
18093 struct neon_type_el et
= neon_check_type (2, NS_QD
,
18094 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
18095 unsigned sizebits
= et
.size
>> 3;
18096 inst
.instruction
|= sizebits
<< 19;
18097 neon_two_same (0, et
.type
== NT_unsigned
, -1);
18103 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18104 struct neon_type_el et
= neon_check_type (2, rs
,
18105 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
18106 NEON_ENCODE (INTEGER
, inst
);
18107 neon_two_same (neon_quad (rs
), 1, et
.size
);
18111 do_neon_zip_uzp (void)
18113 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18114 struct neon_type_el et
= neon_check_type (2, rs
,
18115 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
18116 if (rs
== NS_DD
&& et
.size
== 32)
18118 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
18119 inst
.instruction
= N_MNEM_vtrn
;
18123 neon_two_same (neon_quad (rs
), 1, et
.size
);
18127 do_neon_sat_abs_neg (void)
18129 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18130 struct neon_type_el et
= neon_check_type (2, rs
,
18131 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18132 neon_two_same (neon_quad (rs
), 1, et
.size
);
18136 do_neon_pair_long (void)
18138 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18139 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
18140 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
18141 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
18142 neon_two_same (neon_quad (rs
), 1, et
.size
);
18146 do_neon_recip_est (void)
18148 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18149 struct neon_type_el et
= neon_check_type (2, rs
,
18150 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
18151 inst
.instruction
|= (et
.type
== NT_float
) << 8;
18152 neon_two_same (neon_quad (rs
), 1, et
.size
);
18158 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18159 struct neon_type_el et
= neon_check_type (2, rs
,
18160 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18161 neon_two_same (neon_quad (rs
), 1, et
.size
);
18167 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18168 struct neon_type_el et
= neon_check_type (2, rs
,
18169 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
18170 neon_two_same (neon_quad (rs
), 1, et
.size
);
18176 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18177 struct neon_type_el et
= neon_check_type (2, rs
,
18178 N_EQK
| N_INT
, N_8
| N_KEY
);
18179 neon_two_same (neon_quad (rs
), 1, et
.size
);
18185 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18186 neon_two_same (neon_quad (rs
), 1, -1);
18190 do_neon_tbl_tbx (void)
18192 unsigned listlenbits
;
18193 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
18195 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
18197 first_error (_("bad list length for table lookup"));
18201 listlenbits
= inst
.operands
[1].imm
- 1;
18202 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18203 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18204 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18205 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18206 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
18207 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
18208 inst
.instruction
|= listlenbits
<< 8;
18210 neon_dp_fixup (&inst
);
18214 do_neon_ldm_stm (void)
18216 /* P, U and L bits are part of bitmask. */
18217 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
18218 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
18220 if (inst
.operands
[1].issingle
)
18222 do_vfp_nsyn_ldm_stm (is_dbmode
);
18226 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
18227 _("writeback (!) must be used for VLDMDB and VSTMDB"));
18229 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
18230 _("register list must contain at least 1 and at most 16 "
18233 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
18234 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
18235 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
18236 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
18238 inst
.instruction
|= offsetbits
;
18240 do_vfp_cond_or_thumb ();
18244 do_neon_ldr_str (void)
18246 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
18248 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
18249 And is UNPREDICTABLE in thumb mode. */
18251 && inst
.operands
[1].reg
== REG_PC
18252 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
18255 inst
.error
= _("Use of PC here is UNPREDICTABLE");
18256 else if (warn_on_deprecated
)
18257 as_tsktsk (_("Use of PC here is deprecated"));
18260 if (inst
.operands
[0].issingle
)
18263 do_vfp_nsyn_opcode ("flds");
18265 do_vfp_nsyn_opcode ("fsts");
18267 /* ARMv8.2 vldr.16/vstr.16 instruction. */
18268 if (inst
.vectype
.el
[0].size
== 16)
18269 do_scalar_fp16_v82_encode ();
18274 do_vfp_nsyn_opcode ("fldd");
18276 do_vfp_nsyn_opcode ("fstd");
18281 do_t_vldr_vstr_sysreg (void)
18283 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
18284 bfd_boolean is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
18286 /* Use of PC is UNPREDICTABLE. */
18287 if (inst
.operands
[1].reg
== REG_PC
)
18288 inst
.error
= _("Use of PC here is UNPREDICTABLE");
18290 if (inst
.operands
[1].immisreg
)
18291 inst
.error
= _("instruction does not accept register index");
18293 if (!inst
.operands
[1].isreg
)
18294 inst
.error
= _("instruction does not accept PC-relative addressing");
18296 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
18297 inst
.error
= _("immediate value out of range");
18299 inst
.instruction
= 0xec000f80;
18301 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
18302 encode_arm_cp_address (1, TRUE
, FALSE
, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
18303 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
18304 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
18308 do_vldr_vstr (void)
18310 bfd_boolean sysreg_op
= !inst
.operands
[0].isreg
;
18312 /* VLDR/VSTR (System Register). */
18315 if (!mark_feature_used (&arm_ext_v8_1m_main
))
18316 as_bad (_("Instruction not permitted on this architecture"));
18318 do_t_vldr_vstr_sysreg ();
18323 if (!mark_feature_used (&fpu_vfp_ext_v1xd
))
18324 as_bad (_("Instruction not permitted on this architecture"));
18325 do_neon_ldr_str ();
18329 /* "interleave" version also handles non-interleaving register VLD1/VST1
18333 do_neon_ld_st_interleave (void)
18335 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
18336 N_8
| N_16
| N_32
| N_64
);
18337 unsigned alignbits
= 0;
18339 /* The bits in this table go:
18340 0: register stride of one (0) or two (1)
18341 1,2: register list length, minus one (1, 2, 3, 4).
18342 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
18343 We use -1 for invalid entries. */
18344 const int typetable
[] =
18346 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
18347 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
18348 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
18349 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
18353 if (et
.type
== NT_invtype
)
18356 if (inst
.operands
[1].immisalign
)
18357 switch (inst
.operands
[1].imm
>> 8)
18359 case 64: alignbits
= 1; break;
18361 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
18362 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
18363 goto bad_alignment
;
18367 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
18368 goto bad_alignment
;
18373 first_error (_("bad alignment"));
18377 inst
.instruction
|= alignbits
<< 4;
18378 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18380 /* Bits [4:6] of the immediate in a list specifier encode register stride
18381 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
18382 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
18383 up the right value for "type" in a table based on this value and the given
18384 list style, then stick it back. */
18385 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
18386 | (((inst
.instruction
>> 8) & 3) << 3);
18388 typebits
= typetable
[idx
];
18390 constraint (typebits
== -1, _("bad list type for instruction"));
18391 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
18394 inst
.instruction
&= ~0xf00;
18395 inst
.instruction
|= typebits
<< 8;
18398 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
18399 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
18400 otherwise. The variable arguments are a list of pairs of legal (size, align)
18401 values, terminated with -1. */
18404 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
18407 int result
= FAIL
, thissize
, thisalign
;
18409 if (!inst
.operands
[1].immisalign
)
18415 va_start (ap
, do_alignment
);
18419 thissize
= va_arg (ap
, int);
18420 if (thissize
== -1)
18422 thisalign
= va_arg (ap
, int);
18424 if (size
== thissize
&& align
== thisalign
)
18427 while (result
!= SUCCESS
);
18431 if (result
== SUCCESS
)
18434 first_error (_("unsupported alignment for instruction"));
18440 do_neon_ld_st_lane (void)
18442 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18443 int align_good
, do_alignment
= 0;
18444 int logsize
= neon_logbits (et
.size
);
18445 int align
= inst
.operands
[1].imm
>> 8;
18446 int n
= (inst
.instruction
>> 8) & 3;
18447 int max_el
= 64 / et
.size
;
18449 if (et
.type
== NT_invtype
)
18452 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
18453 _("bad list length"));
18454 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
18455 _("scalar index out of range"));
18456 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
18458 _("stride of 2 unavailable when element size is 8"));
18462 case 0: /* VLD1 / VST1. */
18463 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
18465 if (align_good
== FAIL
)
18469 unsigned alignbits
= 0;
18472 case 16: alignbits
= 0x1; break;
18473 case 32: alignbits
= 0x3; break;
18476 inst
.instruction
|= alignbits
<< 4;
18480 case 1: /* VLD2 / VST2. */
18481 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
18482 16, 32, 32, 64, -1);
18483 if (align_good
== FAIL
)
18486 inst
.instruction
|= 1 << 4;
18489 case 2: /* VLD3 / VST3. */
18490 constraint (inst
.operands
[1].immisalign
,
18491 _("can't use alignment with this instruction"));
18494 case 3: /* VLD4 / VST4. */
18495 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18496 16, 64, 32, 64, 32, 128, -1);
18497 if (align_good
== FAIL
)
18501 unsigned alignbits
= 0;
18504 case 8: alignbits
= 0x1; break;
18505 case 16: alignbits
= 0x1; break;
18506 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
18509 inst
.instruction
|= alignbits
<< 4;
18516 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18517 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18518 inst
.instruction
|= 1 << (4 + logsize
);
18520 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
18521 inst
.instruction
|= logsize
<< 10;
18524 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18527 do_neon_ld_dup (void)
18529 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18530 int align_good
, do_alignment
= 0;
18532 if (et
.type
== NT_invtype
)
18535 switch ((inst
.instruction
>> 8) & 3)
18537 case 0: /* VLD1. */
18538 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
18539 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18540 &do_alignment
, 16, 16, 32, 32, -1);
18541 if (align_good
== FAIL
)
18543 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
18546 case 2: inst
.instruction
|= 1 << 5; break;
18547 default: first_error (_("bad list length")); return;
18549 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18552 case 1: /* VLD2. */
18553 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18554 &do_alignment
, 8, 16, 16, 32, 32, 64,
18556 if (align_good
== FAIL
)
18558 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
18559 _("bad list length"));
18560 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18561 inst
.instruction
|= 1 << 5;
18562 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18565 case 2: /* VLD3. */
18566 constraint (inst
.operands
[1].immisalign
,
18567 _("can't use alignment with this instruction"));
18568 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
18569 _("bad list length"));
18570 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18571 inst
.instruction
|= 1 << 5;
18572 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18575 case 3: /* VLD4. */
18577 int align
= inst
.operands
[1].imm
>> 8;
18578 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18579 16, 64, 32, 64, 32, 128, -1);
18580 if (align_good
== FAIL
)
18582 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
18583 _("bad list length"));
18584 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18585 inst
.instruction
|= 1 << 5;
18586 if (et
.size
== 32 && align
== 128)
18587 inst
.instruction
|= 0x3 << 6;
18589 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18596 inst
.instruction
|= do_alignment
<< 4;
18599 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18600 apart from bits [11:4]. */
18603 do_neon_ldx_stx (void)
18605 if (inst
.operands
[1].isreg
)
18606 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
18608 switch (NEON_LANE (inst
.operands
[0].imm
))
18610 case NEON_INTERLEAVE_LANES
:
18611 NEON_ENCODE (INTERLV
, inst
);
18612 do_neon_ld_st_interleave ();
18615 case NEON_ALL_LANES
:
18616 NEON_ENCODE (DUP
, inst
);
18617 if (inst
.instruction
== N_INV
)
18619 first_error ("only loads support such operands");
18626 NEON_ENCODE (LANE
, inst
);
18627 do_neon_ld_st_lane ();
18630 /* L bit comes from bit mask. */
18631 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18632 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18633 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
18635 if (inst
.operands
[1].postind
)
18637 int postreg
= inst
.operands
[1].imm
& 0xf;
18638 constraint (!inst
.operands
[1].immisreg
,
18639 _("post-index must be a register"));
18640 constraint (postreg
== 0xd || postreg
== 0xf,
18641 _("bad register for post-index"));
18642 inst
.instruction
|= postreg
;
18646 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
18647 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
18648 || inst
.relocs
[0].exp
.X_add_number
!= 0,
18651 if (inst
.operands
[1].writeback
)
18653 inst
.instruction
|= 0xd;
18656 inst
.instruction
|= 0xf;
18660 inst
.instruction
|= 0xf9000000;
18662 inst
.instruction
|= 0xf4000000;
18667 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
18669 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18670 D register operands. */
18671 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18672 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18675 NEON_ENCODE (FPV8
, inst
);
18677 if (rs
== NS_FFF
|| rs
== NS_HHH
)
18679 do_vfp_sp_dyadic ();
18681 /* ARMv8.2 fp16 instruction. */
18683 do_scalar_fp16_v82_encode ();
18686 do_vfp_dp_rd_rn_rm ();
18689 inst
.instruction
|= 0x100;
18691 inst
.instruction
|= 0xf0000000;
18697 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18699 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
18700 first_error (_("invalid instruction shape"));
18706 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18708 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
18711 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18714 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
18718 do_vrint_1 (enum neon_cvt_mode mode
)
18720 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
18721 struct neon_type_el et
;
18726 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18727 D register operands. */
18728 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18729 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18732 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
18734 if (et
.type
!= NT_invtype
)
18736 /* VFP encodings. */
18737 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
18738 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
18739 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18741 NEON_ENCODE (FPV8
, inst
);
18742 if (rs
== NS_FF
|| rs
== NS_HH
)
18743 do_vfp_sp_monadic ();
18745 do_vfp_dp_rd_rm ();
18749 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
18750 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
18751 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
18752 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
18753 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
18754 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
18755 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
18759 inst
.instruction
|= (rs
== NS_DD
) << 8;
18760 do_vfp_cond_or_thumb ();
18762 /* ARMv8.2 fp16 vrint instruction. */
18764 do_scalar_fp16_v82_encode ();
18768 /* Neon encodings (or something broken...). */
18770 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
18772 if (et
.type
== NT_invtype
)
18775 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18776 NEON_ENCODE (FLOAT
, inst
);
18778 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18781 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18782 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18783 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18784 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18785 inst
.instruction
|= neon_quad (rs
) << 6;
18786 /* Mask off the original size bits and reencode them. */
18787 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
18788 | neon_logbits (et
.size
) << 18);
18792 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
18793 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
18794 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
18795 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
18796 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
18797 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
18798 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
18803 inst
.instruction
|= 0xfc000000;
18805 inst
.instruction
|= 0xf0000000;
18812 do_vrint_1 (neon_cvt_mode_x
);
18818 do_vrint_1 (neon_cvt_mode_z
);
18824 do_vrint_1 (neon_cvt_mode_r
);
18830 do_vrint_1 (neon_cvt_mode_a
);
18836 do_vrint_1 (neon_cvt_mode_n
);
18842 do_vrint_1 (neon_cvt_mode_p
);
18848 do_vrint_1 (neon_cvt_mode_m
);
18852 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
18854 unsigned regno
= NEON_SCALAR_REG (opnd
);
18855 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
18857 if (elsize
== 16 && elno
< 2 && regno
< 16)
18858 return regno
| (elno
<< 4);
18859 else if (elsize
== 32 && elno
== 0)
18862 first_error (_("scalar out of range"));
18869 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18871 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18872 _("expression too complex"));
18873 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18874 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
18875 _("immediate out of range"));
18877 if (inst
.operands
[2].isscalar
)
18879 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
18880 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18881 N_KEY
| N_F16
| N_F32
).size
;
18882 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
18884 inst
.instruction
= 0xfe000800;
18885 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18886 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18887 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18888 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18889 inst
.instruction
|= LOW4 (m
);
18890 inst
.instruction
|= HI1 (m
) << 5;
18891 inst
.instruction
|= neon_quad (rs
) << 6;
18892 inst
.instruction
|= rot
<< 20;
18893 inst
.instruction
|= (size
== 32) << 23;
18897 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18898 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18899 N_KEY
| N_F16
| N_F32
).size
;
18900 neon_three_same (neon_quad (rs
), 0, -1);
18901 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18902 inst
.instruction
|= 0xfc200800;
18903 inst
.instruction
|= rot
<< 23;
18904 inst
.instruction
|= (size
== 32) << 20;
18911 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18913 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18914 _("expression too complex"));
18915 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18916 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
18917 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18918 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18919 N_KEY
| N_F16
| N_F32
).size
;
18920 neon_three_same (neon_quad (rs
), 0, -1);
18921 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18922 inst
.instruction
|= 0xfc800800;
18923 inst
.instruction
|= (rot
== 270) << 24;
18924 inst
.instruction
|= (size
== 32) << 20;
18927 /* Dot Product instructions encoding support. */
18930 do_neon_dotproduct (int unsigned_p
)
18932 enum neon_shape rs
;
18933 unsigned scalar_oprd2
= 0;
18936 if (inst
.cond
!= COND_ALWAYS
)
18937 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18938 "is UNPREDICTABLE"));
18940 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18943 /* Dot Product instructions are in three-same D/Q register format or the third
18944 operand can be a scalar index register. */
18945 if (inst
.operands
[2].isscalar
)
18947 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
18948 high8
= 0xfe000000;
18949 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18953 high8
= 0xfc000000;
18954 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18958 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
18960 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
18962 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18963 Product instruction, so we pass 0 as the "ubit" parameter. And the
18964 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18965 neon_three_same (neon_quad (rs
), 0, 32);
18967 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18968 different NEON three-same encoding. */
18969 inst
.instruction
&= 0x00ffffff;
18970 inst
.instruction
|= high8
;
18971 /* Encode 'U' bit which indicates signedness. */
18972 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
18973 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18974 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18975 the instruction encoding. */
18976 if (inst
.operands
[2].isscalar
)
18978 inst
.instruction
&= 0xffffffd0;
18979 inst
.instruction
|= LOW4 (scalar_oprd2
);
18980 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
18984 /* Dot Product instructions for signed integer. */
18987 do_neon_dotproduct_s (void)
18989 return do_neon_dotproduct (0);
18992 /* Dot Product instructions for unsigned integer. */
18995 do_neon_dotproduct_u (void)
18997 return do_neon_dotproduct (1);
19000 /* Crypto v1 instructions. */
19002 do_crypto_2op_1 (unsigned elttype
, int op
)
19004 set_pred_insn_type (OUTSIDE_PRED_INSN
);
19006 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
19012 NEON_ENCODE (INTEGER
, inst
);
19013 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19014 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19015 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
19016 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
19018 inst
.instruction
|= op
<< 6;
19021 inst
.instruction
|= 0xfc000000;
19023 inst
.instruction
|= 0xf0000000;
19027 do_crypto_3op_1 (int u
, int op
)
19029 set_pred_insn_type (OUTSIDE_PRED_INSN
);
19031 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
19032 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
19037 NEON_ENCODE (INTEGER
, inst
);
19038 neon_three_same (1, u
, 8 << op
);
19044 do_crypto_2op_1 (N_8
, 0);
19050 do_crypto_2op_1 (N_8
, 1);
19056 do_crypto_2op_1 (N_8
, 2);
19062 do_crypto_2op_1 (N_8
, 3);
19068 do_crypto_3op_1 (0, 0);
19074 do_crypto_3op_1 (0, 1);
19080 do_crypto_3op_1 (0, 2);
19086 do_crypto_3op_1 (0, 3);
19092 do_crypto_3op_1 (1, 0);
19098 do_crypto_3op_1 (1, 1);
19102 do_sha256su1 (void)
19104 do_crypto_3op_1 (1, 2);
19110 do_crypto_2op_1 (N_32
, -1);
19116 do_crypto_2op_1 (N_32
, 0);
19120 do_sha256su0 (void)
19122 do_crypto_2op_1 (N_32
, 1);
19126 do_crc32_1 (unsigned int poly
, unsigned int sz
)
19128 unsigned int Rd
= inst
.operands
[0].reg
;
19129 unsigned int Rn
= inst
.operands
[1].reg
;
19130 unsigned int Rm
= inst
.operands
[2].reg
;
19132 set_pred_insn_type (OUTSIDE_PRED_INSN
);
19133 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
19134 inst
.instruction
|= LOW4 (Rn
) << 16;
19135 inst
.instruction
|= LOW4 (Rm
);
19136 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
19137 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
19139 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
19140 as_warn (UNPRED_REG ("r15"));
19182 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
19184 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
19185 do_vfp_sp_dp_cvt ();
19186 do_vfp_cond_or_thumb ();
19190 /* Overall per-instruction processing. */
19192 /* We need to be able to fix up arbitrary expressions in some statements.
19193 This is so that we can handle symbols that are an arbitrary distance from
19194 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
19195 which returns part of an address in a form which will be valid for
19196 a data instruction. We do this by pushing the expression into a symbol
19197 in the expr_section, and creating a fix for that. */
19200 fix_new_arm (fragS
* frag
,
19214 /* Create an absolute valued symbol, so we have something to
19215 refer to in the object file. Unfortunately for us, gas's
19216 generic expression parsing will already have folded out
19217 any use of .set foo/.type foo %function that may have
19218 been used to set type information of the target location,
19219 that's being specified symbolically. We have to presume
19220 the user knows what they are doing. */
19224 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
19226 symbol
= symbol_find_or_make (name
);
19227 S_SET_SEGMENT (symbol
, absolute_section
);
19228 symbol_set_frag (symbol
, &zero_address_frag
);
19229 S_SET_VALUE (symbol
, exp
->X_add_number
);
19230 exp
->X_op
= O_symbol
;
19231 exp
->X_add_symbol
= symbol
;
19232 exp
->X_add_number
= 0;
19238 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
19239 (enum bfd_reloc_code_real
) reloc
);
19243 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
19244 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
19248 /* Mark whether the fix is to a THUMB instruction, or an ARM
19250 new_fix
->tc_fix_data
= thumb_mode
;
19253 /* Create a frg for an instruction requiring relaxation. */
19255 output_relax_insn (void)
19261 /* The size of the instruction is unknown, so tie the debug info to the
19262 start of the instruction. */
19263 dwarf2_emit_insn (0);
19265 switch (inst
.relocs
[0].exp
.X_op
)
19268 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
19269 offset
= inst
.relocs
[0].exp
.X_add_number
;
19273 offset
= inst
.relocs
[0].exp
.X_add_number
;
19276 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
19280 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
19281 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
19282 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
19285 /* Write a 32-bit thumb instruction to buf. */
19287 put_thumb32_insn (char * buf
, unsigned long insn
)
19289 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
19290 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
19294 output_inst (const char * str
)
19300 as_bad ("%s -- `%s'", inst
.error
, str
);
19305 output_relax_insn ();
19308 if (inst
.size
== 0)
19311 to
= frag_more (inst
.size
);
19312 /* PR 9814: Record the thumb mode into the current frag so that we know
19313 what type of NOP padding to use, if necessary. We override any previous
19314 setting so that if the mode has changed then the NOPS that we use will
19315 match the encoding of the last instruction in the frag. */
19316 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
19318 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
19320 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
19321 put_thumb32_insn (to
, inst
.instruction
);
19323 else if (inst
.size
> INSN_SIZE
)
19325 gas_assert (inst
.size
== (2 * INSN_SIZE
));
19326 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
19327 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
19330 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
19333 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19335 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
19336 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
19337 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
19338 inst
.relocs
[r
].type
);
19341 dwarf2_emit_insn (inst
.size
);
19345 output_it_inst (int cond
, int mask
, char * to
)
19347 unsigned long instruction
= 0xbf00;
19350 instruction
|= mask
;
19351 instruction
|= cond
<< 4;
19355 to
= frag_more (2);
19357 dwarf2_emit_insn (2);
19361 md_number_to_chars (to
, instruction
, 2);
19366 /* Tag values used in struct asm_opcode's tag field. */
19369 OT_unconditional
, /* Instruction cannot be conditionalized.
19370 The ARM condition field is still 0xE. */
19371 OT_unconditionalF
, /* Instruction cannot be conditionalized
19372 and carries 0xF in its ARM condition field. */
19373 OT_csuffix
, /* Instruction takes a conditional suffix. */
19374 OT_csuffixF
, /* Some forms of the instruction take a scalar
19375 conditional suffix, others place 0xF where the
19376 condition field would be, others take a vector
19377 conditional suffix. */
19378 OT_cinfix3
, /* Instruction takes a conditional infix,
19379 beginning at character index 3. (In
19380 unified mode, it becomes a suffix.) */
19381 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
19382 tsts, cmps, cmns, and teqs. */
19383 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
19384 character index 3, even in unified mode. Used for
19385 legacy instructions where suffix and infix forms
19386 may be ambiguous. */
19387 OT_csuf_or_in3
, /* Instruction takes either a conditional
19388 suffix or an infix at character index 3. */
19389 OT_odd_infix_unc
, /* This is the unconditional variant of an
19390 instruction that takes a conditional infix
19391 at an unusual position. In unified mode,
19392 this variant will accept a suffix. */
19393 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
19394 are the conditional variants of instructions that
19395 take conditional infixes in unusual positions.
19396 The infix appears at character index
19397 (tag - OT_odd_infix_0). These are not accepted
19398 in unified mode. */
19401 /* Subroutine of md_assemble, responsible for looking up the primary
19402 opcode from the mnemonic the user wrote. STR points to the
19403 beginning of the mnemonic.
19405 This is not simply a hash table lookup, because of conditional
19406 variants. Most instructions have conditional variants, which are
19407 expressed with a _conditional affix_ to the mnemonic. If we were
19408 to encode each conditional variant as a literal string in the opcode
19409 table, it would have approximately 20,000 entries.
19411 Most mnemonics take this affix as a suffix, and in unified syntax,
19412 'most' is upgraded to 'all'. However, in the divided syntax, some
19413 instructions take the affix as an infix, notably the s-variants of
19414 the arithmetic instructions. Of those instructions, all but six
19415 have the infix appear after the third character of the mnemonic.
19417 Accordingly, the algorithm for looking up primary opcodes given
19420 1. Look up the identifier in the opcode table.
19421 If we find a match, go to step U.
19423 2. Look up the last two characters of the identifier in the
19424 conditions table. If we find a match, look up the first N-2
19425 characters of the identifier in the opcode table. If we
19426 find a match, go to step CE.
19428 3. Look up the fourth and fifth characters of the identifier in
19429 the conditions table. If we find a match, extract those
19430 characters from the identifier, and look up the remaining
19431 characters in the opcode table. If we find a match, go
19436 U. Examine the tag field of the opcode structure, in case this is
19437 one of the six instructions with its conditional infix in an
19438 unusual place. If it is, the tag tells us where to find the
19439 infix; look it up in the conditions table and set inst.cond
19440 accordingly. Otherwise, this is an unconditional instruction.
19441 Again set inst.cond accordingly. Return the opcode structure.
19443 CE. Examine the tag field to make sure this is an instruction that
19444 should receive a conditional suffix. If it is not, fail.
19445 Otherwise, set inst.cond from the suffix we already looked up,
19446 and return the opcode structure.
19448 CM. Examine the tag field to make sure this is an instruction that
19449 should receive a conditional infix after the third character.
19450 If it is not, fail. Otherwise, undo the edits to the current
19451 line of input and proceed as for case CE. */
19453 static const struct asm_opcode
*
19454 opcode_lookup (char **str
)
19458 const struct asm_opcode
*opcode
;
19459 const struct asm_cond
*cond
;
19462 /* Scan up to the end of the mnemonic, which must end in white space,
19463 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
19464 for (base
= end
= *str
; *end
!= '\0'; end
++)
19465 if (*end
== ' ' || *end
== '.')
19471 /* Handle a possible width suffix and/or Neon type suffix. */
19476 /* The .w and .n suffixes are only valid if the unified syntax is in
19478 if (unified_syntax
&& end
[1] == 'w')
19480 else if (unified_syntax
&& end
[1] == 'n')
19485 inst
.vectype
.elems
= 0;
19487 *str
= end
+ offset
;
19489 if (end
[offset
] == '.')
19491 /* See if we have a Neon type suffix (possible in either unified or
19492 non-unified ARM syntax mode). */
19493 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
19496 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
19502 /* Look for unaffixed or special-case affixed mnemonic. */
19503 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19508 if (opcode
->tag
< OT_odd_infix_0
)
19510 inst
.cond
= COND_ALWAYS
;
19514 if (warn_on_deprecated
&& unified_syntax
)
19515 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19516 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
19517 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19520 inst
.cond
= cond
->value
;
19523 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19525 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19527 if (end
- base
< 2)
19530 cond
= (const struct asm_cond
*) hash_find_n (arm_vcond_hsh
, affix
, 1);
19531 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19533 /* If this opcode can not be vector predicated then don't accept it with a
19534 vector predication code. */
19535 if (opcode
&& !opcode
->mayBeVecPred
)
19538 if (!opcode
|| !cond
)
19540 /* Cannot have a conditional suffix on a mnemonic of less than two
19542 if (end
- base
< 3)
19545 /* Look for suffixed mnemonic. */
19547 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19548 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19552 if (opcode
&& cond
)
19555 switch (opcode
->tag
)
19557 case OT_cinfix3_legacy
:
19558 /* Ignore conditional suffixes matched on infix only mnemonics. */
19562 case OT_cinfix3_deprecated
:
19563 case OT_odd_infix_unc
:
19564 if (!unified_syntax
)
19566 /* Fall through. */
19570 case OT_csuf_or_in3
:
19571 inst
.cond
= cond
->value
;
19574 case OT_unconditional
:
19575 case OT_unconditionalF
:
19577 inst
.cond
= cond
->value
;
19580 /* Delayed diagnostic. */
19581 inst
.error
= BAD_COND
;
19582 inst
.cond
= COND_ALWAYS
;
19591 /* Cannot have a usual-position infix on a mnemonic of less than
19592 six characters (five would be a suffix). */
19593 if (end
- base
< 6)
19596 /* Look for infixed mnemonic in the usual position. */
19598 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19602 memcpy (save
, affix
, 2);
19603 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
19604 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19606 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
19607 memcpy (affix
, save
, 2);
19610 && (opcode
->tag
== OT_cinfix3
19611 || opcode
->tag
== OT_cinfix3_deprecated
19612 || opcode
->tag
== OT_csuf_or_in3
19613 || opcode
->tag
== OT_cinfix3_legacy
))
19616 if (warn_on_deprecated
&& unified_syntax
19617 && (opcode
->tag
== OT_cinfix3
19618 || opcode
->tag
== OT_cinfix3_deprecated
))
19619 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19621 inst
.cond
= cond
->value
;
19628 /* This function generates an initial IT instruction, leaving its block
19629 virtually open for the new instructions. Eventually,
19630 the mask will be updated by now_pred_add_mask () each time
19631 a new instruction needs to be included in the IT block.
19632 Finally, the block is closed with close_automatic_it_block ().
19633 The block closure can be requested either from md_assemble (),
19634 a tencode (), or due to a label hook. */
19637 new_automatic_it_block (int cond
)
19639 now_pred
.state
= AUTOMATIC_PRED_BLOCK
;
19640 now_pred
.mask
= 0x18;
19641 now_pred
.cc
= cond
;
19642 now_pred
.block_length
= 1;
19643 mapping_state (MAP_THUMB
);
19644 now_pred
.insn
= output_it_inst (cond
, now_pred
.mask
, NULL
);
19645 now_pred
.warn_deprecated
= FALSE
;
19646 now_pred
.insn_cond
= TRUE
;
19649 /* Close an automatic IT block.
19650 See comments in new_automatic_it_block (). */
19653 close_automatic_it_block (void)
19655 now_pred
.mask
= 0x10;
19656 now_pred
.block_length
= 0;
19659 /* Update the mask of the current automatically-generated IT
19660 instruction. See comments in new_automatic_it_block (). */
19663 now_pred_add_mask (int cond
)
19665 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19666 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19667 | ((bitvalue) << (nbit)))
19668 const int resulting_bit
= (cond
& 1);
19670 now_pred
.mask
&= 0xf;
19671 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19673 (5 - now_pred
.block_length
));
19674 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19676 ((5 - now_pred
.block_length
) - 1));
19677 output_it_inst (now_pred
.cc
, now_pred
.mask
, now_pred
.insn
);
19680 #undef SET_BIT_VALUE
19683 /* The IT blocks handling machinery is accessed through the these functions:
19684 it_fsm_pre_encode () from md_assemble ()
19685 set_pred_insn_type () optional, from the tencode functions
19686 set_pred_insn_type_last () ditto
19687 in_pred_block () ditto
19688 it_fsm_post_encode () from md_assemble ()
19689 force_automatic_it_block_close () from label handling functions
19692 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19693 initializing the IT insn type with a generic initial value depending
19694 on the inst.condition.
19695 2) During the tencode function, two things may happen:
19696 a) The tencode function overrides the IT insn type by
19697 calling either set_pred_insn_type (type) or
19698 set_pred_insn_type_last ().
19699 b) The tencode function queries the IT block state by
19700 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19702 Both set_pred_insn_type and in_pred_block run the internal FSM state
19703 handling function (handle_pred_state), because: a) setting the IT insn
19704 type may incur in an invalid state (exiting the function),
19705 and b) querying the state requires the FSM to be updated.
19706 Specifically we want to avoid creating an IT block for conditional
19707 branches, so it_fsm_pre_encode is actually a guess and we can't
19708 determine whether an IT block is required until the tencode () routine
19709 has decided what type of instruction this actually it.
19710 Because of this, if set_pred_insn_type and in_pred_block have to be
19711 used, set_pred_insn_type has to be called first.
19713 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19714 that determines the insn IT type depending on the inst.cond code.
19715 When a tencode () routine encodes an instruction that can be
19716 either outside an IT block, or, in the case of being inside, has to be
19717 the last one, set_pred_insn_type_last () will determine the proper
19718 IT instruction type based on the inst.cond code. Otherwise,
19719 set_pred_insn_type can be called for overriding that logic or
19720 for covering other cases.
19722 Calling handle_pred_state () may not transition the IT block state to
19723 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19724 still queried. Instead, if the FSM determines that the state should
19725 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19726 after the tencode () function: that's what it_fsm_post_encode () does.
19728 Since in_pred_block () calls the state handling function to get an
19729 updated state, an error may occur (due to invalid insns combination).
19730 In that case, inst.error is set.
19731 Therefore, inst.error has to be checked after the execution of
19732 the tencode () routine.
19734 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19735 any pending state change (if any) that didn't take place in
19736 handle_pred_state () as explained above. */
19739 it_fsm_pre_encode (void)
19741 if (inst
.cond
!= COND_ALWAYS
)
19742 inst
.pred_insn_type
= INSIDE_IT_INSN
;
19744 inst
.pred_insn_type
= OUTSIDE_PRED_INSN
;
19746 now_pred
.state_handled
= 0;
19749 /* IT state FSM handling function. */
19750 /* MVE instructions and non-MVE instructions are handled differently because of
19751 the introduction of VPT blocks.
19752 Specifications say that any non-MVE instruction inside a VPT block is
19753 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19754 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19755 few exceptions we have MVE_UNPREDICABLE_INSN.
19756 The error messages provided depending on the different combinations possible
19757 are described in the cases below:
19758 For 'most' MVE instructions:
19759 1) In an IT block, with an IT code: syntax error
19760 2) In an IT block, with a VPT code: error: must be in a VPT block
19761 3) In an IT block, with no code: warning: UNPREDICTABLE
19762 4) In a VPT block, with an IT code: syntax error
19763 5) In a VPT block, with a VPT code: OK!
19764 6) In a VPT block, with no code: error: missing code
19765 7) Outside a pred block, with an IT code: error: syntax error
19766 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19767 9) Outside a pred block, with no code: OK!
19768 For non-MVE instructions:
19769 10) In an IT block, with an IT code: OK!
19770 11) In an IT block, with a VPT code: syntax error
19771 12) In an IT block, with no code: error: missing code
19772 13) In a VPT block, with an IT code: error: should be in an IT block
19773 14) In a VPT block, with a VPT code: syntax error
19774 15) In a VPT block, with no code: UNPREDICTABLE
19775 16) Outside a pred block, with an IT code: error: should be in an IT block
19776 17) Outside a pred block, with a VPT code: syntax error
19777 18) Outside a pred block, with no code: OK!
19782 handle_pred_state (void)
19784 now_pred
.state_handled
= 1;
19785 now_pred
.insn_cond
= FALSE
;
19787 switch (now_pred
.state
)
19789 case OUTSIDE_PRED_BLOCK
:
19790 switch (inst
.pred_insn_type
)
19792 case MVE_UNPREDICABLE_INSN
:
19793 case MVE_OUTSIDE_PRED_INSN
:
19794 if (inst
.cond
< COND_ALWAYS
)
19796 /* Case 7: Outside a pred block, with an IT code: error: syntax
19798 inst
.error
= BAD_SYNTAX
;
19801 /* Case 9: Outside a pred block, with no code: OK! */
19803 case OUTSIDE_PRED_INSN
:
19804 if (inst
.cond
> COND_ALWAYS
)
19806 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19808 inst
.error
= BAD_SYNTAX
;
19811 /* Case 18: Outside a pred block, with no code: OK! */
19814 case INSIDE_VPT_INSN
:
19815 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19817 inst
.error
= BAD_OUT_VPT
;
19820 case INSIDE_IT_INSN
:
19821 case INSIDE_IT_LAST_INSN
:
19822 if (inst
.cond
< COND_ALWAYS
)
19824 /* Case 16: Outside a pred block, with an IT code: error: should
19825 be in an IT block. */
19826 if (thumb_mode
== 0)
19829 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
19830 as_tsktsk (_("Warning: conditional outside an IT block"\
19835 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
19836 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
19838 /* Automatically generate the IT instruction. */
19839 new_automatic_it_block (inst
.cond
);
19840 if (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
)
19841 close_automatic_it_block ();
19845 inst
.error
= BAD_OUT_IT
;
19851 else if (inst
.cond
> COND_ALWAYS
)
19853 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19855 inst
.error
= BAD_SYNTAX
;
19860 case IF_INSIDE_IT_LAST_INSN
:
19861 case NEUTRAL_IT_INSN
:
19865 if (inst
.cond
!= COND_ALWAYS
)
19866 first_error (BAD_SYNTAX
);
19867 now_pred
.state
= MANUAL_PRED_BLOCK
;
19868 now_pred
.block_length
= 0;
19869 now_pred
.type
= VECTOR_PRED
;
19873 now_pred
.state
= MANUAL_PRED_BLOCK
;
19874 now_pred
.block_length
= 0;
19875 now_pred
.type
= SCALAR_PRED
;
19880 case AUTOMATIC_PRED_BLOCK
:
19881 /* Three things may happen now:
19882 a) We should increment current it block size;
19883 b) We should close current it block (closing insn or 4 insns);
19884 c) We should close current it block and start a new one (due
19885 to incompatible conditions or
19886 4 insns-length block reached). */
19888 switch (inst
.pred_insn_type
)
19890 case INSIDE_VPT_INSN
:
19892 case MVE_UNPREDICABLE_INSN
:
19893 case MVE_OUTSIDE_PRED_INSN
:
19895 case OUTSIDE_PRED_INSN
:
19896 /* The closure of the block shall happen immediately,
19897 so any in_pred_block () call reports the block as closed. */
19898 force_automatic_it_block_close ();
19901 case INSIDE_IT_INSN
:
19902 case INSIDE_IT_LAST_INSN
:
19903 case IF_INSIDE_IT_LAST_INSN
:
19904 now_pred
.block_length
++;
19906 if (now_pred
.block_length
> 4
19907 || !now_pred_compatible (inst
.cond
))
19909 force_automatic_it_block_close ();
19910 if (inst
.pred_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
19911 new_automatic_it_block (inst
.cond
);
19915 now_pred
.insn_cond
= TRUE
;
19916 now_pred_add_mask (inst
.cond
);
19919 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
19920 && (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
19921 || inst
.pred_insn_type
== IF_INSIDE_IT_LAST_INSN
))
19922 close_automatic_it_block ();
19925 case NEUTRAL_IT_INSN
:
19926 now_pred
.block_length
++;
19927 now_pred
.insn_cond
= TRUE
;
19929 if (now_pred
.block_length
> 4)
19930 force_automatic_it_block_close ();
19932 now_pred_add_mask (now_pred
.cc
& 1);
19936 close_automatic_it_block ();
19937 now_pred
.state
= MANUAL_PRED_BLOCK
;
19942 case MANUAL_PRED_BLOCK
:
19945 if (now_pred
.type
== SCALAR_PRED
)
19947 /* Check conditional suffixes. */
19948 cond
= now_pred
.cc
^ ((now_pred
.mask
>> 4) & 1) ^ 1;
19949 now_pred
.mask
<<= 1;
19950 now_pred
.mask
&= 0x1f;
19951 is_last
= (now_pred
.mask
== 0x10);
19955 now_pred
.cc
^= (now_pred
.mask
>> 4);
19956 cond
= now_pred
.cc
+ 0xf;
19957 now_pred
.mask
<<= 1;
19958 now_pred
.mask
&= 0x1f;
19959 is_last
= now_pred
.mask
== 0x10;
19961 now_pred
.insn_cond
= TRUE
;
19963 switch (inst
.pred_insn_type
)
19965 case OUTSIDE_PRED_INSN
:
19966 if (now_pred
.type
== SCALAR_PRED
)
19968 if (inst
.cond
== COND_ALWAYS
)
19970 /* Case 12: In an IT block, with no code: error: missing
19972 inst
.error
= BAD_NOT_IT
;
19975 else if (inst
.cond
> COND_ALWAYS
)
19977 /* Case 11: In an IT block, with a VPT code: syntax error.
19979 inst
.error
= BAD_SYNTAX
;
19982 else if (thumb_mode
)
19984 /* This is for some special cases where a non-MVE
19985 instruction is not allowed in an IT block, such as cbz,
19986 but are put into one with a condition code.
19987 You could argue this should be a syntax error, but we
19988 gave the 'not allowed in IT block' diagnostic in the
19989 past so we will keep doing so. */
19990 inst
.error
= BAD_NOT_IT
;
19997 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
19998 as_tsktsk (MVE_NOT_VPT
);
20001 case MVE_OUTSIDE_PRED_INSN
:
20002 if (now_pred
.type
== SCALAR_PRED
)
20004 if (inst
.cond
== COND_ALWAYS
)
20006 /* Case 3: In an IT block, with no code: warning:
20008 as_tsktsk (MVE_NOT_IT
);
20011 else if (inst
.cond
< COND_ALWAYS
)
20013 /* Case 1: In an IT block, with an IT code: syntax error.
20015 inst
.error
= BAD_SYNTAX
;
20023 if (inst
.cond
< COND_ALWAYS
)
20025 /* Case 4: In a VPT block, with an IT code: syntax error.
20027 inst
.error
= BAD_SYNTAX
;
20030 else if (inst
.cond
== COND_ALWAYS
)
20032 /* Case 6: In a VPT block, with no code: error: missing
20034 inst
.error
= BAD_NOT_VPT
;
20042 case MVE_UNPREDICABLE_INSN
:
20043 as_tsktsk (now_pred
.type
== SCALAR_PRED
? MVE_NOT_IT
: MVE_NOT_VPT
);
20045 case INSIDE_IT_INSN
:
20046 if (inst
.cond
> COND_ALWAYS
)
20048 /* Case 11: In an IT block, with a VPT code: syntax error. */
20049 /* Case 14: In a VPT block, with a VPT code: syntax error. */
20050 inst
.error
= BAD_SYNTAX
;
20053 else if (now_pred
.type
== SCALAR_PRED
)
20055 /* Case 10: In an IT block, with an IT code: OK! */
20056 if (cond
!= inst
.cond
)
20058 inst
.error
= now_pred
.type
== SCALAR_PRED
? BAD_IT_COND
:
20065 /* Case 13: In a VPT block, with an IT code: error: should be
20067 inst
.error
= BAD_OUT_IT
;
20072 case INSIDE_VPT_INSN
:
20073 if (now_pred
.type
== SCALAR_PRED
)
20075 /* Case 2: In an IT block, with a VPT code: error: must be in a
20077 inst
.error
= BAD_OUT_VPT
;
20080 /* Case 5: In a VPT block, with a VPT code: OK! */
20081 else if (cond
!= inst
.cond
)
20083 inst
.error
= BAD_VPT_COND
;
20087 case INSIDE_IT_LAST_INSN
:
20088 case IF_INSIDE_IT_LAST_INSN
:
20089 if (now_pred
.type
== VECTOR_PRED
|| inst
.cond
> COND_ALWAYS
)
20091 /* Case 4: In a VPT block, with an IT code: syntax error. */
20092 /* Case 11: In an IT block, with a VPT code: syntax error. */
20093 inst
.error
= BAD_SYNTAX
;
20096 else if (cond
!= inst
.cond
)
20098 inst
.error
= BAD_IT_COND
;
20103 inst
.error
= BAD_BRANCH
;
20108 case NEUTRAL_IT_INSN
:
20109 /* The BKPT instruction is unconditional even in a IT or VPT
20114 if (now_pred
.type
== SCALAR_PRED
)
20116 inst
.error
= BAD_IT_IT
;
20119 /* fall through. */
20121 if (inst
.cond
== COND_ALWAYS
)
20123 /* Executing a VPT/VPST instruction inside an IT block or a
20124 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
20126 if (now_pred
.type
== SCALAR_PRED
)
20127 as_tsktsk (MVE_NOT_IT
);
20129 as_tsktsk (MVE_NOT_VPT
);
20134 /* VPT/VPST do not accept condition codes. */
20135 inst
.error
= BAD_SYNTAX
;
20146 struct depr_insn_mask
20148 unsigned long pattern
;
20149 unsigned long mask
;
20150 const char* description
;
20153 /* List of 16-bit instruction patterns deprecated in an IT block in
20155 static const struct depr_insn_mask depr_it_insns
[] = {
20156 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
20157 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
20158 { 0xa000, 0xb800, N_("ADR") },
20159 { 0x4800, 0xf800, N_("Literal loads") },
20160 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
20161 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
20162 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
20163 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
20164 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
20169 it_fsm_post_encode (void)
20173 if (!now_pred
.state_handled
)
20174 handle_pred_state ();
20176 if (now_pred
.insn_cond
20177 && !now_pred
.warn_deprecated
20178 && warn_on_deprecated
20179 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
20180 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
20182 if (inst
.instruction
>= 0x10000)
20184 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
20185 "performance deprecated in ARMv8-A and ARMv8-R"));
20186 now_pred
.warn_deprecated
= TRUE
;
20190 const struct depr_insn_mask
*p
= depr_it_insns
;
20192 while (p
->mask
!= 0)
20194 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
20196 as_tsktsk (_("IT blocks containing 16-bit Thumb "
20197 "instructions of the following class are "
20198 "performance deprecated in ARMv8-A and "
20199 "ARMv8-R: %s"), p
->description
);
20200 now_pred
.warn_deprecated
= TRUE
;
20208 if (now_pred
.block_length
> 1)
20210 as_tsktsk (_("IT blocks containing more than one conditional "
20211 "instruction are performance deprecated in ARMv8-A and "
20213 now_pred
.warn_deprecated
= TRUE
;
20217 is_last
= (now_pred
.mask
== 0x10);
20220 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
20226 force_automatic_it_block_close (void)
20228 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
20230 close_automatic_it_block ();
20231 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
20237 in_pred_block (void)
20239 if (!now_pred
.state_handled
)
20240 handle_pred_state ();
20242 return now_pred
.state
!= OUTSIDE_PRED_BLOCK
;
20245 /* Whether OPCODE only has T32 encoding. Since this function is only used by
20246 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
20247 here, hence the "known" in the function name. */
20250 known_t32_only_insn (const struct asm_opcode
*opcode
)
20252 /* Original Thumb-1 wide instruction. */
20253 if (opcode
->tencode
== do_t_blx
20254 || opcode
->tencode
== do_t_branch23
20255 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
20256 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
20259 /* Wide-only instruction added to ARMv8-M Baseline. */
20260 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
20261 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
20262 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
20263 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
20269 /* Whether wide instruction variant can be used if available for a valid OPCODE
20273 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
20275 if (known_t32_only_insn (opcode
))
20278 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
20279 of variant T3 of B.W is checked in do_t_branch. */
20280 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
20281 && opcode
->tencode
== do_t_branch
)
20284 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
20285 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
20286 && opcode
->tencode
== do_t_mov_cmp
20287 /* Make sure CMP instruction is not affected. */
20288 && opcode
->aencode
== do_mov
)
20291 /* Wide instruction variants of all instructions with narrow *and* wide
20292 variants become available with ARMv6t2. Other opcodes are either
20293 narrow-only or wide-only and are thus available if OPCODE is valid. */
20294 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
20297 /* OPCODE with narrow only instruction variant or wide variant not
20303 md_assemble (char *str
)
20306 const struct asm_opcode
* opcode
;
20308 /* Align the previous label if needed. */
20309 if (last_label_seen
!= NULL
)
20311 symbol_set_frag (last_label_seen
, frag_now
);
20312 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
20313 S_SET_SEGMENT (last_label_seen
, now_seg
);
20316 memset (&inst
, '\0', sizeof (inst
));
20318 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
20319 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
20321 opcode
= opcode_lookup (&p
);
20324 /* It wasn't an instruction, but it might be a register alias of
20325 the form alias .req reg, or a Neon .dn/.qn directive. */
20326 if (! create_register_alias (str
, p
)
20327 && ! create_neon_reg_alias (str
, p
))
20328 as_bad (_("bad instruction `%s'"), str
);
20333 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
20334 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
20336 /* The value which unconditional instructions should have in place of the
20337 condition field. */
20338 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
20342 arm_feature_set variant
;
20344 variant
= cpu_variant
;
20345 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
20346 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
20347 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
20348 /* Check that this instruction is supported for this CPU. */
20349 if (!opcode
->tvariant
20350 || (thumb_mode
== 1
20351 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
20353 if (opcode
->tencode
== do_t_swi
)
20354 as_bad (_("SVC is not permitted on this architecture"));
20356 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
20359 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
20360 && opcode
->tencode
!= do_t_branch
)
20362 as_bad (_("Thumb does not support conditional execution"));
20366 /* Two things are addressed here:
20367 1) Implicit require narrow instructions on Thumb-1.
20368 This avoids relaxation accidentally introducing Thumb-2
20370 2) Reject wide instructions in non Thumb-2 cores.
20372 Only instructions with narrow and wide variants need to be handled
20373 but selecting all non wide-only instructions is easier. */
20374 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
20375 && !t32_insn_ok (variant
, opcode
))
20377 if (inst
.size_req
== 0)
20379 else if (inst
.size_req
== 4)
20381 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
20382 as_bad (_("selected processor does not support 32bit wide "
20383 "variant of instruction `%s'"), str
);
20385 as_bad (_("selected processor does not support `%s' in "
20386 "Thumb-2 mode"), str
);
20391 inst
.instruction
= opcode
->tvalue
;
20393 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
20395 /* Prepare the pred_insn_type for those encodings that don't set
20397 it_fsm_pre_encode ();
20399 opcode
->tencode ();
20401 it_fsm_post_encode ();
20404 if (!(inst
.error
|| inst
.relax
))
20406 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
20407 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
20408 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
20410 as_bad (_("cannot honor width suffix -- `%s'"), str
);
20415 /* Something has gone badly wrong if we try to relax a fixed size
20417 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
20419 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
20420 *opcode
->tvariant
);
20421 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
20422 set those bits when Thumb-2 32-bit instructions are seen. The impact
20423 of relaxable instructions will be considered later after we finish all
20425 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
20426 variant
= arm_arch_none
;
20428 variant
= cpu_variant
;
20429 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
20430 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
20433 check_neon_suffixes
;
20437 mapping_state (MAP_THUMB
);
20440 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
20444 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
20445 is_bx
= (opcode
->aencode
== do_bx
);
20447 /* Check that this instruction is supported for this CPU. */
20448 if (!(is_bx
&& fix_v4bx
)
20449 && !(opcode
->avariant
&&
20450 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
20452 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
20457 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
20461 inst
.instruction
= opcode
->avalue
;
20462 if (opcode
->tag
== OT_unconditionalF
)
20463 inst
.instruction
|= 0xFU
<< 28;
20465 inst
.instruction
|= inst
.cond
<< 28;
20466 inst
.size
= INSN_SIZE
;
20467 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
20469 it_fsm_pre_encode ();
20470 opcode
->aencode ();
20471 it_fsm_post_encode ();
20473 /* Arm mode bx is marked as both v4T and v5 because it's still required
20474 on a hypothetical non-thumb v5 core. */
20476 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
20478 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
20479 *opcode
->avariant
);
20481 check_neon_suffixes
;
20485 mapping_state (MAP_ARM
);
20490 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20498 check_pred_blocks_finished (void)
20503 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
20504 if (seg_info (sect
)->tc_segment_info_data
.current_pred
.state
20505 == MANUAL_PRED_BLOCK
)
20507 if (now_pred
.type
== SCALAR_PRED
)
20508 as_warn (_("section '%s' finished with an open IT block."),
20511 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20515 if (now_pred
.state
== MANUAL_PRED_BLOCK
)
20517 if (now_pred
.type
== SCALAR_PRED
)
20518 as_warn (_("file finished with an open IT block."));
20520 as_warn (_("file finished with an open VPT/VPST block."));
20525 /* Various frobbings of labels and their addresses. */
20528 arm_start_line_hook (void)
20530 last_label_seen
= NULL
;
20534 arm_frob_label (symbolS
* sym
)
20536 last_label_seen
= sym
;
20538 ARM_SET_THUMB (sym
, thumb_mode
);
20540 #if defined OBJ_COFF || defined OBJ_ELF
20541 ARM_SET_INTERWORK (sym
, support_interwork
);
20544 force_automatic_it_block_close ();
20546 /* Note - do not allow local symbols (.Lxxx) to be labelled
20547 as Thumb functions. This is because these labels, whilst
20548 they exist inside Thumb code, are not the entry points for
20549 possible ARM->Thumb calls. Also, these labels can be used
20550 as part of a computed goto or switch statement. eg gcc
20551 can generate code that looks like this:
20553 ldr r2, [pc, .Laaa]
20563 The first instruction loads the address of the jump table.
20564 The second instruction converts a table index into a byte offset.
20565 The third instruction gets the jump address out of the table.
20566 The fourth instruction performs the jump.
20568 If the address stored at .Laaa is that of a symbol which has the
20569 Thumb_Func bit set, then the linker will arrange for this address
20570 to have the bottom bit set, which in turn would mean that the
20571 address computation performed by the third instruction would end
20572 up with the bottom bit set. Since the ARM is capable of unaligned
20573 word loads, the instruction would then load the incorrect address
20574 out of the jump table, and chaos would ensue. */
20575 if (label_is_thumb_function_name
20576 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
20577 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
20579 /* When the address of a Thumb function is taken the bottom
20580 bit of that address should be set. This will allow
20581 interworking between Arm and Thumb functions to work
20584 THUMB_SET_FUNC (sym
, 1);
20586 label_is_thumb_function_name
= FALSE
;
20589 dwarf2_emit_label (sym
);
20593 arm_data_in_code (void)
20595 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
20597 *input_line_pointer
= '/';
20598 input_line_pointer
+= 5;
20599 *input_line_pointer
= 0;
20607 arm_canonicalize_symbol_name (char * name
)
20611 if (thumb_mode
&& (len
= strlen (name
)) > 5
20612 && streq (name
+ len
- 5, "/data"))
20613 *(name
+ len
- 5) = 0;
20618 /* Table of all register names defined by default. The user can
20619 define additional names with .req. Note that all register names
20620 should appear in both upper and lowercase variants. Some registers
20621 also have mixed-case names. */
20623 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20624 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20625 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20626 #define REGSET(p,t) \
20627 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20628 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20629 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20630 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20631 #define REGSETH(p,t) \
20632 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20633 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20634 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20635 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20636 #define REGSET2(p,t) \
20637 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20638 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20639 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20640 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20641 #define SPLRBANK(base,bank,t) \
20642 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20643 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20644 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20645 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20646 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20647 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20649 static const struct reg_entry reg_names
[] =
20651 /* ARM integer registers. */
20652 REGSET(r
, RN
), REGSET(R
, RN
),
20654 /* ATPCS synonyms. */
20655 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
20656 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
20657 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
20659 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
20660 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
20661 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
20663 /* Well-known aliases. */
20664 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
20665 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
20667 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
20668 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
20670 /* Coprocessor numbers. */
20671 REGSET(p
, CP
), REGSET(P
, CP
),
20673 /* Coprocessor register numbers. The "cr" variants are for backward
20675 REGSET(c
, CN
), REGSET(C
, CN
),
20676 REGSET(cr
, CN
), REGSET(CR
, CN
),
20678 /* ARM banked registers. */
20679 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
20680 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
20681 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
20682 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
20683 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
20684 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
20685 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
20687 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
20688 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
20689 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
20690 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
20691 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
20692 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
20693 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
20694 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
20696 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
20697 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
20698 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
20699 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
20700 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
20701 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
20702 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
20703 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20704 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20706 /* FPA registers. */
20707 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
20708 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
20710 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
20711 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
20713 /* VFP SP registers. */
20714 REGSET(s
,VFS
), REGSET(S
,VFS
),
20715 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
20717 /* VFP DP Registers. */
20718 REGSET(d
,VFD
), REGSET(D
,VFD
),
20719 /* Extra Neon DP registers. */
20720 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
20722 /* Neon QP registers. */
20723 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
20725 /* VFP control registers. */
20726 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
20727 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
20728 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
20729 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
20730 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
20731 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
20732 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
20734 /* Maverick DSP coprocessor registers. */
20735 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
20736 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
20738 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
20739 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
20740 REGDEF(dspsc
,0,DSPSC
),
20742 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
20743 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
20744 REGDEF(DSPSC
,0,DSPSC
),
20746 /* iWMMXt data registers - p0, c0-15. */
20747 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
20749 /* iWMMXt control registers - p1, c0-3. */
20750 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
20751 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
20752 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
20753 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
20755 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20756 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
20757 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
20758 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
20759 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
20761 /* XScale accumulator registers. */
20762 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
20768 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20769 within psr_required_here. */
20770 static const struct asm_psr psrs
[] =
20772 /* Backward compatibility notation. Note that "all" is no longer
20773 truly all possible PSR bits. */
20774 {"all", PSR_c
| PSR_f
},
20778 /* Individual flags. */
20784 /* Combinations of flags. */
20785 {"fs", PSR_f
| PSR_s
},
20786 {"fx", PSR_f
| PSR_x
},
20787 {"fc", PSR_f
| PSR_c
},
20788 {"sf", PSR_s
| PSR_f
},
20789 {"sx", PSR_s
| PSR_x
},
20790 {"sc", PSR_s
| PSR_c
},
20791 {"xf", PSR_x
| PSR_f
},
20792 {"xs", PSR_x
| PSR_s
},
20793 {"xc", PSR_x
| PSR_c
},
20794 {"cf", PSR_c
| PSR_f
},
20795 {"cs", PSR_c
| PSR_s
},
20796 {"cx", PSR_c
| PSR_x
},
20797 {"fsx", PSR_f
| PSR_s
| PSR_x
},
20798 {"fsc", PSR_f
| PSR_s
| PSR_c
},
20799 {"fxs", PSR_f
| PSR_x
| PSR_s
},
20800 {"fxc", PSR_f
| PSR_x
| PSR_c
},
20801 {"fcs", PSR_f
| PSR_c
| PSR_s
},
20802 {"fcx", PSR_f
| PSR_c
| PSR_x
},
20803 {"sfx", PSR_s
| PSR_f
| PSR_x
},
20804 {"sfc", PSR_s
| PSR_f
| PSR_c
},
20805 {"sxf", PSR_s
| PSR_x
| PSR_f
},
20806 {"sxc", PSR_s
| PSR_x
| PSR_c
},
20807 {"scf", PSR_s
| PSR_c
| PSR_f
},
20808 {"scx", PSR_s
| PSR_c
| PSR_x
},
20809 {"xfs", PSR_x
| PSR_f
| PSR_s
},
20810 {"xfc", PSR_x
| PSR_f
| PSR_c
},
20811 {"xsf", PSR_x
| PSR_s
| PSR_f
},
20812 {"xsc", PSR_x
| PSR_s
| PSR_c
},
20813 {"xcf", PSR_x
| PSR_c
| PSR_f
},
20814 {"xcs", PSR_x
| PSR_c
| PSR_s
},
20815 {"cfs", PSR_c
| PSR_f
| PSR_s
},
20816 {"cfx", PSR_c
| PSR_f
| PSR_x
},
20817 {"csf", PSR_c
| PSR_s
| PSR_f
},
20818 {"csx", PSR_c
| PSR_s
| PSR_x
},
20819 {"cxf", PSR_c
| PSR_x
| PSR_f
},
20820 {"cxs", PSR_c
| PSR_x
| PSR_s
},
20821 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
20822 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
20823 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
20824 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
20825 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
20826 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
20827 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
20828 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
20829 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
20830 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
20831 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
20832 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
20833 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
20834 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
20835 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
20836 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
20837 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
20838 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
20839 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
20840 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
20841 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
20842 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
20843 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
20844 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
20847 /* Table of V7M psr names. */
20848 static const struct asm_psr v7m_psrs
[] =
20850 {"apsr", 0x0 }, {"APSR", 0x0 },
20851 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20852 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20853 {"psr", 0x3 }, {"PSR", 0x3 },
20854 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20855 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20856 {"epsr", 0x6 }, {"EPSR", 0x6 },
20857 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20858 {"msp", 0x8 }, {"MSP", 0x8 },
20859 {"psp", 0x9 }, {"PSP", 0x9 },
20860 {"msplim", 0xa }, {"MSPLIM", 0xa },
20861 {"psplim", 0xb }, {"PSPLIM", 0xb },
20862 {"primask", 0x10}, {"PRIMASK", 0x10},
20863 {"basepri", 0x11}, {"BASEPRI", 0x11},
20864 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20865 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20866 {"control", 0x14}, {"CONTROL", 0x14},
20867 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20868 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20869 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20870 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20871 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20872 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20873 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20874 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20875 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
20878 /* Table of all shift-in-operand names. */
20879 static const struct asm_shift_name shift_names
[] =
20881 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
20882 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
20883 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
20884 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
20885 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
20886 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
},
20887 { "uxtw", SHIFT_UXTW
}, { "UXTW", SHIFT_UXTW
}
20890 /* Table of all explicit relocation names. */
20892 static struct reloc_entry reloc_names
[] =
20894 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
20895 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
20896 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
20897 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
20898 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
20899 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
20900 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
20901 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
20902 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
20903 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
20904 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
20905 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
20906 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
20907 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
20908 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
20909 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
20910 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
20911 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
20912 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
20913 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
20914 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
20915 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
20916 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
20917 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
20918 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
20919 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
20920 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
20924 /* Table of all conditional affixes. */
20925 static const struct asm_cond conds
[] =
20929 {"cs", 0x2}, {"hs", 0x2},
20930 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
20943 static const struct asm_cond vconds
[] =
20949 #define UL_BARRIER(L,U,CODE,FEAT) \
20950 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
20951 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
20953 static struct asm_barrier_opt barrier_opt_names
[] =
20955 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
20956 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
20957 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
20958 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
20959 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
20960 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
20961 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
20962 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
20963 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
20964 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
20965 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
20966 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
20967 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
20968 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
20969 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
20970 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
20975 /* Table of ARM-format instructions. */
20977 /* Macros for gluing together operand strings. N.B. In all cases
20978 other than OPS0, the trailing OP_stop comes from default
20979 zero-initialization of the unspecified elements of the array. */
20980 #define OPS0() { OP_stop, }
20981 #define OPS1(a) { OP_##a, }
20982 #define OPS2(a,b) { OP_##a,OP_##b, }
20983 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
20984 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
20985 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
20986 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
20988 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
20989 This is useful when mixing operands for ARM and THUMB, i.e. using the
20990 MIX_ARM_THUMB_OPERANDS macro.
20991 In order to use these macros, prefix the number of operands with _
20993 #define OPS_1(a) { a, }
20994 #define OPS_2(a,b) { a,b, }
20995 #define OPS_3(a,b,c) { a,b,c, }
20996 #define OPS_4(a,b,c,d) { a,b,c,d, }
20997 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
20998 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
21000 /* These macros abstract out the exact format of the mnemonic table and
21001 save some repeated characters. */
21003 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
21004 #define TxCE(mnem, op, top, nops, ops, ae, te) \
21005 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
21006 THUMB_VARIANT, do_##ae, do_##te, 0 }
21008 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
21009 a T_MNEM_xyz enumerator. */
21010 #define TCE(mnem, aop, top, nops, ops, ae, te) \
21011 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
21012 #define tCE(mnem, aop, top, nops, ops, ae, te) \
21013 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21015 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
21016 infix after the third character. */
21017 #define TxC3(mnem, op, top, nops, ops, ae, te) \
21018 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
21019 THUMB_VARIANT, do_##ae, do_##te, 0 }
21020 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
21021 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
21022 THUMB_VARIANT, do_##ae, do_##te, 0 }
21023 #define TC3(mnem, aop, top, nops, ops, ae, te) \
21024 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
21025 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
21026 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
21027 #define tC3(mnem, aop, top, nops, ops, ae, te) \
21028 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21029 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
21030 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21032 /* Mnemonic that cannot be conditionalized. The ARM condition-code
21033 field is still 0xE. Many of the Thumb variants can be executed
21034 conditionally, so this is checked separately. */
21035 #define TUE(mnem, op, top, nops, ops, ae, te) \
21036 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
21037 THUMB_VARIANT, do_##ae, do_##te, 0 }
21039 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
21040 Used by mnemonics that have very minimal differences in the encoding for
21041 ARM and Thumb variants and can be handled in a common function. */
21042 #define TUEc(mnem, op, top, nops, ops, en) \
21043 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
21044 THUMB_VARIANT, do_##en, do_##en, 0 }
21046 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
21047 condition code field. */
21048 #define TUF(mnem, op, top, nops, ops, ae, te) \
21049 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
21050 THUMB_VARIANT, do_##ae, do_##te, 0 }
21052 /* ARM-only variants of all the above. */
21053 #define CE(mnem, op, nops, ops, ae) \
21054 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21056 #define C3(mnem, op, nops, ops, ae) \
21057 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21059 /* Thumb-only variants of TCE and TUE. */
21060 #define ToC(mnem, top, nops, ops, te) \
21061 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
21064 #define ToU(mnem, top, nops, ops, te) \
21065 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
21068 /* T_MNEM_xyz enumerator variants of ToC. */
21069 #define toC(mnem, top, nops, ops, te) \
21070 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
21073 /* T_MNEM_xyz enumerator variants of ToU. */
21074 #define toU(mnem, top, nops, ops, te) \
21075 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
21078 /* Legacy mnemonics that always have conditional infix after the third
21080 #define CL(mnem, op, nops, ops, ae) \
21081 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
21082 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21084 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
21085 #define cCE(mnem, op, nops, ops, ae) \
21086 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21088 /* Legacy coprocessor instructions where conditional infix and conditional
21089 suffix are ambiguous. For consistency this includes all FPA instructions,
21090 not just the potentially ambiguous ones. */
21091 #define cCL(mnem, op, nops, ops, ae) \
21092 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
21093 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21095 /* Coprocessor, takes either a suffix or a position-3 infix
21096 (for an FPA corner case). */
21097 #define C3E(mnem, op, nops, ops, ae) \
21098 { mnem, OPS##nops ops, OT_csuf_or_in3, \
21099 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21101 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
21102 { m1 #m2 m3, OPS##nops ops, \
21103 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
21104 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21106 #define CM(m1, m2, op, nops, ops, ae) \
21107 xCM_ (m1, , m2, op, nops, ops, ae), \
21108 xCM_ (m1, eq, m2, op, nops, ops, ae), \
21109 xCM_ (m1, ne, m2, op, nops, ops, ae), \
21110 xCM_ (m1, cs, m2, op, nops, ops, ae), \
21111 xCM_ (m1, hs, m2, op, nops, ops, ae), \
21112 xCM_ (m1, cc, m2, op, nops, ops, ae), \
21113 xCM_ (m1, ul, m2, op, nops, ops, ae), \
21114 xCM_ (m1, lo, m2, op, nops, ops, ae), \
21115 xCM_ (m1, mi, m2, op, nops, ops, ae), \
21116 xCM_ (m1, pl, m2, op, nops, ops, ae), \
21117 xCM_ (m1, vs, m2, op, nops, ops, ae), \
21118 xCM_ (m1, vc, m2, op, nops, ops, ae), \
21119 xCM_ (m1, hi, m2, op, nops, ops, ae), \
21120 xCM_ (m1, ls, m2, op, nops, ops, ae), \
21121 xCM_ (m1, ge, m2, op, nops, ops, ae), \
21122 xCM_ (m1, lt, m2, op, nops, ops, ae), \
21123 xCM_ (m1, gt, m2, op, nops, ops, ae), \
21124 xCM_ (m1, le, m2, op, nops, ops, ae), \
21125 xCM_ (m1, al, m2, op, nops, ops, ae)
21127 #define UE(mnem, op, nops, ops, ae) \
21128 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21130 #define UF(mnem, op, nops, ops, ae) \
21131 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21133 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
21134 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
21135 use the same encoding function for each. */
21136 #define NUF(mnem, op, nops, ops, enc) \
21137 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
21138 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
21140 /* Neon data processing, version which indirects through neon_enc_tab for
21141 the various overloaded versions of opcodes. */
21142 #define nUF(mnem, op, nops, ops, enc) \
21143 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
21144 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
21146 /* Neon insn with conditional suffix for the ARM version, non-overloaded
21148 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
21149 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
21150 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
21152 #define NCE(mnem, op, nops, ops, enc) \
21153 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
21155 #define NCEF(mnem, op, nops, ops, enc) \
21156 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
21158 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
21159 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
21160 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
21161 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
21163 #define nCE(mnem, op, nops, ops, enc) \
21164 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
21166 #define nCEF(mnem, op, nops, ops, enc) \
21167 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
21170 #define mCEF(mnem, op, nops, ops, enc) \
21171 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
21172 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21175 /* nCEF but for MVE predicated instructions. */
21176 #define mnCEF(mnem, op, nops, ops, enc) \
21177 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
21179 /* nCE but for MVE predicated instructions. */
21180 #define mnCE(mnem, op, nops, ops, enc) \
21181 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
21183 /* NUF but for potentially MVE predicated instructions. */
21184 #define MNUF(mnem, op, nops, ops, enc) \
21185 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
21186 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21188 /* nUF but for potentially MVE predicated instructions. */
21189 #define mnUF(mnem, op, nops, ops, enc) \
21190 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
21191 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21193 /* ToC but for potentially MVE predicated instructions. */
21194 #define mToC(mnem, top, nops, ops, te) \
21195 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
21198 /* NCE but for MVE predicated instructions. */
21199 #define MNCE(mnem, op, nops, ops, enc) \
21200 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
21202 /* NCEF but for MVE predicated instructions. */
21203 #define MNCEF(mnem, op, nops, ops, enc) \
21204 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
21207 static const struct asm_opcode insns
[] =
21209 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
21210 #define THUMB_VARIANT & arm_ext_v4t
21211 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21212 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21213 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21214 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21215 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
21216 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
21217 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
21218 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
21219 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21220 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21221 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
21222 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
21223 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21224 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21225 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
21226 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
21228 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
21229 for setting PSR flag bits. They are obsolete in V6 and do not
21230 have Thumb equivalents. */
21231 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21232 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21233 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
21234 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
21235 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
21236 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
21237 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21238 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21239 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
21241 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
21242 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
21243 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
21244 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
21246 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
21247 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
21248 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
21250 OP_ADDRGLDR
),ldst
, t_ldst
),
21251 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
21253 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21254 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21255 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21256 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21257 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21258 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21260 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
21261 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
21264 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
21265 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
21266 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
21267 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
21269 /* Thumb-compatibility pseudo ops. */
21270 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21271 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21272 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21273 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21274 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21275 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21276 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21277 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21278 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
21279 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
21280 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
21281 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
21283 /* These may simplify to neg. */
21284 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
21285 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
21287 #undef THUMB_VARIANT
21288 #define THUMB_VARIANT & arm_ext_os
21290 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
21291 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
21293 #undef THUMB_VARIANT
21294 #define THUMB_VARIANT & arm_ext_v6
21296 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
21298 /* V1 instructions with no Thumb analogue prior to V6T2. */
21299 #undef THUMB_VARIANT
21300 #define THUMB_VARIANT & arm_ext_v6t2
21302 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21303 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21304 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
21306 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21307 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21308 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
21309 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21311 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21312 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21314 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21315 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21317 /* V1 instructions with no Thumb analogue at all. */
21318 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
21319 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
21321 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
21322 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
21323 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
21324 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
21325 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
21326 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
21327 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
21328 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
21331 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
21332 #undef THUMB_VARIANT
21333 #define THUMB_VARIANT & arm_ext_v4t
21335 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
21336 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
21338 #undef THUMB_VARIANT
21339 #define THUMB_VARIANT & arm_ext_v6t2
21341 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
21342 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
21344 /* Generic coprocessor instructions. */
21345 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
21346 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21347 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21348 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21349 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21350 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21351 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21354 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
21356 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
21357 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
21360 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
21361 #undef THUMB_VARIANT
21362 #define THUMB_VARIANT & arm_ext_msr
21364 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
21365 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
21368 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
21369 #undef THUMB_VARIANT
21370 #define THUMB_VARIANT & arm_ext_v6t2
21372 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21373 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21374 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21375 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21376 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21377 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21378 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21379 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21382 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
21383 #undef THUMB_VARIANT
21384 #define THUMB_VARIANT & arm_ext_v4t
21386 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21387 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21388 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21389 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21390 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21391 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21394 #define ARM_VARIANT & arm_ext_v4t_5
21396 /* ARM Architecture 4T. */
21397 /* Note: bx (and blx) are required on V5, even if the processor does
21398 not support Thumb. */
21399 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
21402 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
21403 #undef THUMB_VARIANT
21404 #define THUMB_VARIANT & arm_ext_v5t
21406 /* Note: blx has 2 variants; the .value coded here is for
21407 BLX(2). Only this variant has conditional execution. */
21408 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
21409 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
21411 #undef THUMB_VARIANT
21412 #define THUMB_VARIANT & arm_ext_v6t2
21414 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
21415 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21416 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21417 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21418 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21419 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
21420 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21421 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21424 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
21425 #undef THUMB_VARIANT
21426 #define THUMB_VARIANT & arm_ext_v5exp
21428 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21429 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21430 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21431 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21433 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21434 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21436 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21437 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21438 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21439 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21441 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21442 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21443 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21444 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21446 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21447 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21449 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21450 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21451 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21452 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21455 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
21456 #undef THUMB_VARIANT
21457 #define THUMB_VARIANT & arm_ext_v6t2
21459 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
21460 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
21462 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
21463 ADDRGLDRS
), ldrd
, t_ldstd
),
21465 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21466 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21469 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
21471 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
21474 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
21475 #undef THUMB_VARIANT
21476 #define THUMB_VARIANT & arm_ext_v6
21478 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
21479 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
21480 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21481 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21482 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21483 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21484 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21485 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21486 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21487 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
21489 #undef THUMB_VARIANT
21490 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21492 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
21493 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21495 #undef THUMB_VARIANT
21496 #define THUMB_VARIANT & arm_ext_v6t2
21498 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21499 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21501 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
21502 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
21504 /* ARM V6 not included in V7M. */
21505 #undef THUMB_VARIANT
21506 #define THUMB_VARIANT & arm_ext_v6_notm
21507 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21508 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21509 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
21510 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
21511 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21512 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21513 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
21514 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21515 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
21516 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21517 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21518 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21519 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21520 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21521 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
21522 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
21523 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21524 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21525 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
21527 /* ARM V6 not included in V7M (eg. integer SIMD). */
21528 #undef THUMB_VARIANT
21529 #define THUMB_VARIANT & arm_ext_v6_dsp
21530 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
21531 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
21532 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21533 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21534 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21535 /* Old name for QASX. */
21536 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21537 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21538 /* Old name for QSAX. */
21539 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21540 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21541 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21542 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21543 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21544 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21545 /* Old name for SASX. */
21546 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21547 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21548 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21549 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21550 /* Old name for SHASX. */
21551 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21552 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21553 /* Old name for SHSAX. */
21554 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21555 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21556 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21557 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21558 /* Old name for SSAX. */
21559 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21560 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21561 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21562 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21563 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21564 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21565 /* Old name for UASX. */
21566 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21567 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21568 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21569 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21570 /* Old name for UHASX. */
21571 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21572 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21573 /* Old name for UHSAX. */
21574 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21575 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21576 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21577 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21578 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21579 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21580 /* Old name for UQASX. */
21581 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21582 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21583 /* Old name for UQSAX. */
21584 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21585 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21586 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21587 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21588 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21589 /* Old name for USAX. */
21590 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21591 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21592 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21593 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21594 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21595 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21596 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21597 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21598 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21599 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21600 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21601 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21602 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21603 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21604 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21605 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21606 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21607 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21608 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21609 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21610 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21611 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21612 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21613 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21614 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21615 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21616 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21617 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21618 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21619 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
21620 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
21621 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21622 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21623 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
21626 #define ARM_VARIANT & arm_ext_v6k_v6t2
21627 #undef THUMB_VARIANT
21628 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21630 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
21631 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
21632 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
21633 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
21635 #undef THUMB_VARIANT
21636 #define THUMB_VARIANT & arm_ext_v6_notm
21637 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
21639 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
21640 RRnpcb
), strexd
, t_strexd
),
21642 #undef THUMB_VARIANT
21643 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21644 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
21646 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
21648 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21650 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21652 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
21655 #define ARM_VARIANT & arm_ext_sec
21656 #undef THUMB_VARIANT
21657 #define THUMB_VARIANT & arm_ext_sec
21659 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
21662 #define ARM_VARIANT & arm_ext_virt
21663 #undef THUMB_VARIANT
21664 #define THUMB_VARIANT & arm_ext_virt
21666 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
21667 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
21670 #define ARM_VARIANT & arm_ext_pan
21671 #undef THUMB_VARIANT
21672 #define THUMB_VARIANT & arm_ext_pan
21674 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
21677 #define ARM_VARIANT & arm_ext_v6t2
21678 #undef THUMB_VARIANT
21679 #define THUMB_VARIANT & arm_ext_v6t2
21681 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
21682 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
21683 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21684 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21686 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
21687 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
21689 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21690 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21691 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21692 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21695 #define ARM_VARIANT & arm_ext_v3
21696 #undef THUMB_VARIANT
21697 #define THUMB_VARIANT & arm_ext_v6t2
21699 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
21700 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
21701 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
21704 #define ARM_VARIANT & arm_ext_v6t2
21705 #undef THUMB_VARIANT
21706 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21707 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21708 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21710 /* Thumb-only instructions. */
21712 #define ARM_VARIANT NULL
21713 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
21714 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
21716 /* ARM does not really have an IT instruction, so always allow it.
21717 The opcode is copied from Thumb in order to allow warnings in
21718 -mimplicit-it=[never | arm] modes. */
21720 #define ARM_VARIANT & arm_ext_v1
21721 #undef THUMB_VARIANT
21722 #define THUMB_VARIANT & arm_ext_v6t2
21724 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
21725 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
21726 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
21727 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
21728 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
21729 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
21730 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
21731 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
21732 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
21733 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
21734 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
21735 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
21736 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
21737 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
21738 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
21739 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21740 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21741 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21743 /* Thumb2 only instructions. */
21745 #define ARM_VARIANT NULL
21747 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21748 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21749 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21750 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21751 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
21752 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
21754 /* Hardware division instructions. */
21756 #define ARM_VARIANT & arm_ext_adiv
21757 #undef THUMB_VARIANT
21758 #define THUMB_VARIANT & arm_ext_div
21760 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21761 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21763 /* ARM V6M/V7 instructions. */
21765 #define ARM_VARIANT & arm_ext_barrier
21766 #undef THUMB_VARIANT
21767 #define THUMB_VARIANT & arm_ext_barrier
21769 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
21770 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
21771 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
21773 /* ARM V7 instructions. */
21775 #define ARM_VARIANT & arm_ext_v7
21776 #undef THUMB_VARIANT
21777 #define THUMB_VARIANT & arm_ext_v7
21779 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
21780 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
21783 #define ARM_VARIANT & arm_ext_mp
21784 #undef THUMB_VARIANT
21785 #define THUMB_VARIANT & arm_ext_mp
21787 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
21789 /* AArchv8 instructions. */
21791 #define ARM_VARIANT & arm_ext_v8
21793 /* Instructions shared between armv8-a and armv8-m. */
21794 #undef THUMB_VARIANT
21795 #define THUMB_VARIANT & arm_ext_atomics
21797 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21798 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21799 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21800 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21801 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21802 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21803 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21804 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
21805 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21806 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21808 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21810 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21812 #undef THUMB_VARIANT
21813 #define THUMB_VARIANT & arm_ext_v8
21815 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
21816 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
21818 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
21821 /* Defined in V8 but is in undefined encoding space for earlier
21822 architectures. However earlier architectures are required to treat
21823 this instuction as a semihosting trap as well. Hence while not explicitly
21824 defined as such, it is in fact correct to define the instruction for all
21826 #undef THUMB_VARIANT
21827 #define THUMB_VARIANT & arm_ext_v1
21829 #define ARM_VARIANT & arm_ext_v1
21830 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
21832 /* ARMv8 T32 only. */
21834 #define ARM_VARIANT NULL
21835 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
21836 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
21837 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
21839 /* FP for ARMv8. */
21841 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21842 #undef THUMB_VARIANT
21843 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21845 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21846 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21847 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21848 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21849 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21850 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21851 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
21852 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
21853 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
21854 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
21855 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
21856 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
21857 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
21858 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
21859 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
21860 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
21861 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
21863 /* Crypto v1 extensions. */
21865 #define ARM_VARIANT & fpu_crypto_ext_armv8
21866 #undef THUMB_VARIANT
21867 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21869 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
21870 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
21871 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
21872 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
21873 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
21874 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
21875 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
21876 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
21877 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
21878 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
21879 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
21880 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
21881 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
21882 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
21885 #define ARM_VARIANT & crc_ext_armv8
21886 #undef THUMB_VARIANT
21887 #define THUMB_VARIANT & crc_ext_armv8
21888 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
21889 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
21890 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
21891 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
21892 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
21893 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
21895 /* ARMv8.2 RAS extension. */
21897 #define ARM_VARIANT & arm_ext_ras
21898 #undef THUMB_VARIANT
21899 #define THUMB_VARIANT & arm_ext_ras
21900 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
21903 #define ARM_VARIANT & arm_ext_v8_3
21904 #undef THUMB_VARIANT
21905 #define THUMB_VARIANT & arm_ext_v8_3
21906 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
21907 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
21908 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
21911 #define ARM_VARIANT & fpu_neon_ext_dotprod
21912 #undef THUMB_VARIANT
21913 #define THUMB_VARIANT & fpu_neon_ext_dotprod
21914 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
21915 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
21918 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
21919 #undef THUMB_VARIANT
21920 #define THUMB_VARIANT NULL
21922 cCE("wfs", e200110
, 1, (RR
), rd
),
21923 cCE("rfs", e300110
, 1, (RR
), rd
),
21924 cCE("wfc", e400110
, 1, (RR
), rd
),
21925 cCE("rfc", e500110
, 1, (RR
), rd
),
21927 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21928 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21929 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21930 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21932 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21933 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21934 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21935 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
21937 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
21938 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
21939 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
21940 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
21941 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
21942 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
21943 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
21944 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
21945 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
21946 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
21947 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
21948 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
21950 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
21951 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
21952 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
21953 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
21954 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
21955 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
21956 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
21957 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
21958 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
21959 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
21960 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
21961 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
21963 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
21964 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
21965 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
21966 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
21967 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
21968 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
21969 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
21970 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
21971 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
21972 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
21973 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
21974 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
21976 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
21977 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
21978 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
21979 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
21980 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
21981 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
21982 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
21983 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
21984 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
21985 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
21986 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
21987 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
21989 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
21990 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
21991 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
21992 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
21993 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
21994 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
21995 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
21996 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
21997 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
21998 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
21999 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
22000 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
22002 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
22003 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
22004 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
22005 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
22006 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
22007 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
22008 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
22009 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
22010 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
22011 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
22012 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
22013 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
22015 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
22016 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
22017 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
22018 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
22019 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
22020 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
22021 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
22022 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
22023 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
22024 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
22025 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
22026 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
22028 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
22029 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
22030 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
22031 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
22032 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
22033 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
22034 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
22035 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
22036 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
22037 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
22038 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
22039 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
22041 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
22042 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
22043 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
22044 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
22045 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
22046 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
22047 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
22048 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
22049 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
22050 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
22051 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
22052 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
22054 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
22055 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
22056 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
22057 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
22058 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
22059 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
22060 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
22061 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
22062 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
22063 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
22064 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
22065 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
22067 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
22068 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
22069 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
22070 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
22071 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
22072 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
22073 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
22074 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
22075 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
22076 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
22077 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
22078 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
22080 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
22081 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
22082 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
22083 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
22084 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
22085 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
22086 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
22087 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
22088 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
22089 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
22090 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
22091 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
22093 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
22094 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
22095 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
22096 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
22097 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
22098 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
22099 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
22100 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
22101 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
22102 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
22103 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
22104 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
22106 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
22107 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
22108 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
22109 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
22110 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
22111 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
22112 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
22113 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
22114 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
22115 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
22116 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
22117 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
22119 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
22120 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
22121 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
22122 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
22123 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
22124 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
22125 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
22126 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
22127 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
22128 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
22129 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
22130 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
22132 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
22133 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
22134 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
22135 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
22136 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
22137 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
22138 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
22139 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
22140 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
22141 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
22142 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
22143 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
22145 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22146 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22147 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22148 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22149 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22150 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22151 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22152 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22153 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22154 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22155 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22156 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22158 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22159 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22160 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22161 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22162 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22163 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22164 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22165 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22166 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22167 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22168 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22169 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22171 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22172 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22173 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22174 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22175 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22176 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22177 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22178 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22179 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22180 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22181 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22182 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22184 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22185 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22186 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22187 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22188 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22189 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22190 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22191 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22192 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22193 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22194 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22195 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22197 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22198 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22199 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22200 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22201 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22202 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22203 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22204 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22205 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22206 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22207 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22208 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22210 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22211 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22212 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22213 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22214 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22215 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22216 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22217 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22218 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22219 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22220 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22221 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22223 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22224 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22225 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22226 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22227 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22228 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22229 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22230 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22231 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22232 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22233 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22234 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22236 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22237 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22238 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22239 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22240 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22241 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22242 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22243 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22244 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22245 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22246 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22247 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22249 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22250 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22251 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22252 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22253 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22254 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22255 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22256 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22257 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22258 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22259 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22260 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22262 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22263 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22264 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22265 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22266 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22267 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22268 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22269 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22270 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22271 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22272 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22273 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22275 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22276 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22277 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22278 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22279 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22280 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22281 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22282 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22283 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22284 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22285 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22286 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22288 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22289 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22290 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22291 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22292 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22293 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22294 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22295 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22296 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22297 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22298 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22299 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22301 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22302 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22303 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22304 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22305 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22306 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22307 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22308 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22309 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22310 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22311 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22312 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22314 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22315 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22316 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22317 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22319 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
22320 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
22321 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
22322 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
22323 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
22324 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
22325 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
22326 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
22327 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
22328 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
22329 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
22330 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
22332 /* The implementation of the FIX instruction is broken on some
22333 assemblers, in that it accepts a precision specifier as well as a
22334 rounding specifier, despite the fact that this is meaningless.
22335 To be more compatible, we accept it as well, though of course it
22336 does not set any bits. */
22337 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
22338 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
22339 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
22340 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
22341 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
22342 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
22343 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
22344 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
22345 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
22346 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
22347 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
22348 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
22349 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
22351 /* Instructions that were new with the real FPA, call them V2. */
22353 #define ARM_VARIANT & fpu_fpa_ext_v2
22355 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22356 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22357 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22358 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22359 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22360 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22363 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
22365 /* Moves and type conversions. */
22366 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22367 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
22368 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
22369 cCE("fmstat", ef1fa10
, 0, (), noargs
),
22370 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
22371 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
22372 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22373 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22374 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22375 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22376 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22377 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22378 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
22379 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
22381 /* Memory operations. */
22382 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
22383 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
22384 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22385 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22386 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22387 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22388 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22389 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22390 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22391 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22392 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22393 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22394 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22395 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22396 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22397 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22398 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22399 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22401 /* Monadic operations. */
22402 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22403 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22404 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22406 /* Dyadic operations. */
22407 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22408 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22409 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22410 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22411 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22412 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22413 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22414 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22415 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22418 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22419 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
22420 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22421 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
22423 /* Double precision load/store are still present on single precision
22424 implementations. */
22425 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
22426 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
22427 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22428 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22429 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22430 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22431 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22432 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22433 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22434 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22437 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
22439 /* Moves and type conversions. */
22440 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22441 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22442 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22443 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
22444 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
22445 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
22446 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
22447 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22448 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22449 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22450 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22451 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22452 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22454 /* Monadic operations. */
22455 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22456 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22457 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22459 /* Dyadic operations. */
22460 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22461 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22462 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22463 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22464 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22465 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22466 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22467 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22468 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22471 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22472 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
22473 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22474 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
22477 #define ARM_VARIANT & fpu_vfp_ext_v2
22479 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
22480 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
22481 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
22482 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
22484 /* Instructions which may belong to either the Neon or VFP instruction sets.
22485 Individual encoder functions perform additional architecture checks. */
22487 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22488 #undef THUMB_VARIANT
22489 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22491 /* These mnemonics are unique to VFP. */
22492 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
22493 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
22494 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22495 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22496 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22497 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22498 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22499 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
22500 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
22501 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
22503 /* Mnemonics shared by Neon and VFP. */
22504 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
22505 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22506 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22508 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22509 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22510 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22511 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22512 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22513 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22515 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
22516 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
22517 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
22518 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
22521 /* NOTE: All VMOV encoding is special-cased! */
22522 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
22523 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
22525 #undef THUMB_VARIANT
22526 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22527 by different feature bits. Since we are setting the Thumb guard, we can
22528 require Thumb-1 which makes it a nop guard and set the right feature bit in
22529 do_vldr_vstr (). */
22530 #define THUMB_VARIANT & arm_ext_v4t
22531 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22532 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22535 #define ARM_VARIANT & arm_ext_fp16
22536 #undef THUMB_VARIANT
22537 #define THUMB_VARIANT & arm_ext_fp16
22538 /* New instructions added from v8.2, allowing the extraction and insertion of
22539 the upper 16 bits of a 32-bit vector register. */
22540 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
22541 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
22543 /* New backported fma/fms instructions optional in v8.2. */
22544 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
22545 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
22547 #undef THUMB_VARIANT
22548 #define THUMB_VARIANT & fpu_neon_ext_v1
22550 #define ARM_VARIANT & fpu_neon_ext_v1
22552 /* Data processing with three registers of the same length. */
22553 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22554 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
22555 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
22556 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22557 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22558 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22559 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22560 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22561 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22562 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22563 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22564 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22565 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22566 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22567 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22568 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22569 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22570 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22571 /* If not immediate, fall back to neon_dyadic_i64_su.
22572 shl_imm should accept I8 I16 I32 I64,
22573 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22574 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
22575 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
22576 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
22577 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
22578 /* Logic ops, types optional & ignored. */
22579 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22580 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22581 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22582 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22583 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22584 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22585 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22586 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22587 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
22588 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
22589 /* Bitfield ops, untyped. */
22590 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22591 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22592 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22593 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22594 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22595 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22596 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22597 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22598 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22599 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22600 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22601 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22602 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22603 back to neon_dyadic_if_su. */
22604 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22605 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22606 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22607 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22608 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22609 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22610 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22611 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22612 /* Comparison. Type I8 I16 I32 F32. */
22613 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
22614 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
22615 /* As above, D registers only. */
22616 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22617 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22618 /* Int and float variants, signedness unimportant. */
22619 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22620 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22621 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
22622 /* Add/sub take types I8 I16 I32 I64 F32. */
22623 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22624 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22625 /* vtst takes sizes 8, 16, 32. */
22626 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
22627 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
22628 /* VMUL takes I8 I16 I32 F32 P8. */
22629 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
22630 /* VQD{R}MULH takes S16 S32. */
22631 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22632 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22633 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22634 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22635 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22636 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22637 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22638 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22639 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22640 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22641 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22642 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22643 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22644 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22645 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22646 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22647 /* ARM v8.1 extension. */
22648 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22649 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22650 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22651 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22653 /* Two address, int/float. Types S8 S16 S32 F32. */
22654 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22655 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22657 /* Data processing with two registers and a shift amount. */
22658 /* Right shifts, and variants with rounding.
22659 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22660 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22661 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22662 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22663 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22664 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22665 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22666 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22667 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22668 /* Shift and insert. Sizes accepted 8 16 32 64. */
22669 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
22670 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
22671 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
22672 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
22673 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22674 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
22675 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
22676 /* Right shift immediate, saturating & narrowing, with rounding variants.
22677 Types accepted S16 S32 S64 U16 U32 U64. */
22678 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22679 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22680 /* As above, unsigned. Types accepted S16 S32 S64. */
22681 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22682 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22683 /* Right shift narrowing. Types accepted I16 I32 I64. */
22684 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22685 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22686 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22687 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
22688 /* CVT with optional immediate for fixed-point variant. */
22689 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
22691 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
22692 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
22694 /* Data processing, three registers of different lengths. */
22695 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22696 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
22697 /* If not scalar, fall back to neon_dyadic_long.
22698 Vector types as above, scalar types S16 S32 U16 U32. */
22699 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22700 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22701 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22702 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22703 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22704 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22705 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22706 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22707 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22708 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22709 /* Saturating doubling multiplies. Types S16 S32. */
22710 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22711 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22712 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22713 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22714 S16 S32 U16 U32. */
22715 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
22717 /* Extract. Size 8. */
22718 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
22719 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
22721 /* Two registers, miscellaneous. */
22722 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22723 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
22724 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
22725 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
22726 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
22727 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
22728 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
22729 /* Vector replicate. Sizes 8 16 32. */
22730 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
22731 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
22732 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22733 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
22734 /* VMOVN. Types I16 I32 I64. */
22735 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
22736 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22737 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
22738 /* VQMOVUN. Types S16 S32 S64. */
22739 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
22740 /* VZIP / VUZP. Sizes 8 16 32. */
22741 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22742 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22743 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22744 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22745 /* VQABS / VQNEG. Types S8 S16 S32. */
22746 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22747 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22748 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22749 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22750 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22751 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22752 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
22753 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22754 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
22755 /* Reciprocal estimates. Types U32 F16 F32. */
22756 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22757 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
22758 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22759 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
22760 /* VCLS. Types S8 S16 S32. */
22761 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
22762 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
22763 /* VCLZ. Types I8 I16 I32. */
22764 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
22765 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
22766 /* VCNT. Size 8. */
22767 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
22768 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
22769 /* Two address, untyped. */
22770 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
22771 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
22772 /* VTRN. Sizes 8 16 32. */
22773 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
22774 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
22776 /* Table lookup. Size 8. */
22777 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22778 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22780 #undef THUMB_VARIANT
22781 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22783 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22785 /* Neon element/structure load/store. */
22786 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22787 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22788 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22789 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22790 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22791 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22792 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22793 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22795 #undef THUMB_VARIANT
22796 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22798 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22799 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
22800 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22801 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22802 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22803 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22804 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22805 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22806 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22807 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22809 #undef THUMB_VARIANT
22810 #define THUMB_VARIANT & fpu_vfp_ext_v3
22812 #define ARM_VARIANT & fpu_vfp_ext_v3
22814 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
22815 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22816 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22817 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22818 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22819 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22820 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22821 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22822 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22825 #define ARM_VARIANT & fpu_vfp_ext_fma
22826 #undef THUMB_VARIANT
22827 #define THUMB_VARIANT & fpu_vfp_ext_fma
22828 /* Mnemonics shared by Neon and VFP. These are included in the
22829 VFP FMA variant; NEON and VFP FMA always includes the NEON
22830 FMA instructions. */
22831 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22832 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22833 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22834 the v form should always be used. */
22835 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22836 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22837 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22838 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22839 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22840 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22842 #undef THUMB_VARIANT
22844 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22846 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22847 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22848 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22849 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22850 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22851 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22852 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
22853 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
22856 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22858 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
22859 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
22860 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
22861 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
22862 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
22863 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
22864 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
22865 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
22866 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
22867 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22868 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22869 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22870 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22871 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22872 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22873 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22874 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22875 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22876 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
22877 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
22878 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22879 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22880 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22881 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22882 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22883 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22884 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
22885 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
22886 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
22887 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
22888 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
22889 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
22890 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
22891 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
22892 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22893 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22894 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
22895 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22896 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22897 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22898 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22899 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22900 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22901 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22902 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22903 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22904 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
22905 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22906 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22907 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22908 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22909 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22910 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22911 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22912 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22913 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22914 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22915 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22916 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22917 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22918 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22919 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22920 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22921 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22922 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22923 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22924 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22925 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22926 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
22927 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
22928 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22929 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22930 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22931 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22932 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22933 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22934 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22935 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22936 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22937 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22938 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22939 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22940 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22941 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22942 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22943 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22944 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22945 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22946 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
22947 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22948 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22949 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22950 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22951 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22952 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22953 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22954 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22955 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22956 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22957 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22958 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22959 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22960 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22961 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22962 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22963 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22964 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22965 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22966 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22967 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22968 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
22969 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22970 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22971 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22972 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22973 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22974 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22975 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22976 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22977 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22978 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22979 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22980 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22981 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22982 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22983 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22984 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22985 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
22986 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
22987 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22988 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
22989 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
22990 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
22991 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22992 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22993 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22994 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22995 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22996 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22997 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22998 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
22999 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23000 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23001 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23002 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23003 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23004 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23005 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23006 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23007 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23008 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23009 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23010 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23011 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23012 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23013 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23014 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23015 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23016 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23017 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23018 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23019 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
23022 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
23024 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
23025 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
23026 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
23027 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23028 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23029 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23030 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23031 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23032 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23033 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23034 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23035 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23036 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23037 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23038 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23039 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23040 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23041 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23042 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23043 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23044 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
23045 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23046 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23047 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23048 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23049 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23050 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23051 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23052 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23053 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23054 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23055 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23056 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23057 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23058 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23059 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23060 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23061 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23062 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23063 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23064 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23065 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23066 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23067 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23068 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23069 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23070 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23071 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23072 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23073 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23074 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23075 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23076 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23077 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23078 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23079 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23080 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23083 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
23085 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
23086 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
23087 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
23088 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
23089 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
23090 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
23091 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
23092 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
23093 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
23094 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
23095 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
23096 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
23097 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
23098 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
23099 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
23100 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
23101 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
23102 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
23103 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
23104 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
23105 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
23106 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
23107 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
23108 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
23109 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
23110 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
23111 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
23112 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
23113 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
23114 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
23115 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
23116 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
23117 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
23118 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
23119 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
23120 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
23121 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
23122 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
23123 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
23124 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
23125 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
23126 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
23127 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
23128 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
23129 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
23130 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
23131 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
23132 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
23133 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
23134 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
23135 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
23136 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
23137 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
23138 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
23139 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
23140 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
23141 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
23142 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
23143 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
23144 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
23145 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
23146 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
23147 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
23148 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
23149 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23150 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
23151 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23152 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
23153 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23154 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
23155 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23156 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23157 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
23158 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
23159 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
23160 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
23162 /* ARMv8.5-A instructions. */
23164 #define ARM_VARIANT & arm_ext_sb
23165 #undef THUMB_VARIANT
23166 #define THUMB_VARIANT & arm_ext_sb
23167 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
23170 #define ARM_VARIANT & arm_ext_predres
23171 #undef THUMB_VARIANT
23172 #define THUMB_VARIANT & arm_ext_predres
23173 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
23174 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
23175 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
23177 /* ARMv8-M instructions. */
23179 #define ARM_VARIANT NULL
23180 #undef THUMB_VARIANT
23181 #define THUMB_VARIANT & arm_ext_v8m
23182 ToU("sg", e97fe97f
, 0, (), noargs
),
23183 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
23184 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
23185 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
23186 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
23187 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
23188 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
23190 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
23191 instructions behave as nop if no VFP is present. */
23192 #undef THUMB_VARIANT
23193 #define THUMB_VARIANT & arm_ext_v8m_main
23194 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
23195 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
23197 /* Armv8.1-M Mainline instructions. */
23198 #undef THUMB_VARIANT
23199 #define THUMB_VARIANT & arm_ext_v8_1m_main
23200 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
23201 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
23202 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
23203 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
23204 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
23206 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
23207 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
23208 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
23210 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
23211 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
),
23213 #undef THUMB_VARIANT
23214 #define THUMB_VARIANT & mve_ext
23215 ToC("vpst", fe710f4d
, 0, (), mve_vpt
),
23216 ToC("vpstt", fe318f4d
, 0, (), mve_vpt
),
23217 ToC("vpste", fe718f4d
, 0, (), mve_vpt
),
23218 ToC("vpsttt", fe314f4d
, 0, (), mve_vpt
),
23219 ToC("vpstte", fe31cf4d
, 0, (), mve_vpt
),
23220 ToC("vpstet", fe71cf4d
, 0, (), mve_vpt
),
23221 ToC("vpstee", fe714f4d
, 0, (), mve_vpt
),
23222 ToC("vpstttt", fe312f4d
, 0, (), mve_vpt
),
23223 ToC("vpsttte", fe316f4d
, 0, (), mve_vpt
),
23224 ToC("vpsttet", fe31ef4d
, 0, (), mve_vpt
),
23225 ToC("vpsttee", fe31af4d
, 0, (), mve_vpt
),
23226 ToC("vpstett", fe71af4d
, 0, (), mve_vpt
),
23227 ToC("vpstete", fe71ef4d
, 0, (), mve_vpt
),
23228 ToC("vpsteet", fe716f4d
, 0, (), mve_vpt
),
23229 ToC("vpsteee", fe712f4d
, 0, (), mve_vpt
),
23231 /* MVE and MVE FP only. */
23232 mCEF(vmullb
, _vmullb
, 3, (RMQ
, RMQ
, RMQ
), mve_vmull
),
23233 mCEF(vabav
, _vabav
, 3, (RRnpcsp
, RMQ
, RMQ
), mve_vabav
),
23234 mCEF(vmladav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23235 mCEF(vmladava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23236 mCEF(vmladavx
, _vmladavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23237 mCEF(vmladavax
, _vmladavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23238 mCEF(vmlav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23239 mCEF(vmlava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23240 mCEF(vmlsdav
, _vmlsdav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23241 mCEF(vmlsdava
, _vmlsdava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23242 mCEF(vmlsdavx
, _vmlsdavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23243 mCEF(vmlsdavax
, _vmlsdavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23245 mCEF(vst20
, _vst20
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
23246 mCEF(vst21
, _vst21
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
23247 mCEF(vst40
, _vst40
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23248 mCEF(vst41
, _vst41
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23249 mCEF(vst42
, _vst42
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23250 mCEF(vst43
, _vst43
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23251 mCEF(vld20
, _vld20
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
23252 mCEF(vld21
, _vld21
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
23253 mCEF(vld40
, _vld40
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23254 mCEF(vld41
, _vld41
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23255 mCEF(vld42
, _vld42
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23256 mCEF(vld43
, _vld43
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23257 mCEF(vstrb
, _vstrb
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23258 mCEF(vstrh
, _vstrh
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23259 mCEF(vstrw
, _vstrw
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23260 mCEF(vstrd
, _vstrd
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23261 mCEF(vldrb
, _vldrb
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23262 mCEF(vldrh
, _vldrh
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23263 mCEF(vldrw
, _vldrw
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23264 mCEF(vldrd
, _vldrd
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23267 #define ARM_VARIANT & fpu_vfp_ext_v1xd
23268 #undef THUMB_VARIANT
23269 #define THUMB_VARIANT & arm_ext_v6t2
23271 mCEF(vmullt
, _vmullt
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQ_RNSC_MQ
), mve_vmull
),
23272 mnCEF(vadd
, _vadd
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
23273 mnCEF(vsub
, _vsub
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
23275 MNCEF(vabs
, 1b10300
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
23276 MNCEF(vneg
, 1b10380
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
23279 #define ARM_VARIANT & fpu_neon_ext_v1
23280 mnUF(vabd
, _vabd
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_if_su
),
23281 mnUF(vabdl
, _vabdl
, 3, (RNQMQ
, RNDMQ
, RNDMQ
), neon_dyadic_long
),
23282 mnUF(vaddl
, _vaddl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
23283 mnUF(vsubl
, _vsubl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
23286 #undef THUMB_VARIANT
23318 /* MD interface: bits in the object file. */
23320 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
23321 for use in the a.out file, and stores them in the array pointed to by buf.
23322 This knows about the endian-ness of the target machine and does
23323 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
23324 2 (short) and 4 (long) Floating numbers are put out as a series of
23325 LITTLENUMS (shorts, here at least). */
23328 md_number_to_chars (char * buf
, valueT val
, int n
)
23330 if (target_big_endian
)
23331 number_to_chars_bigendian (buf
, val
, n
);
23333 number_to_chars_littleendian (buf
, val
, n
);
23337 md_chars_to_number (char * buf
, int n
)
23340 unsigned char * where
= (unsigned char *) buf
;
23342 if (target_big_endian
)
23347 result
|= (*where
++ & 255);
23355 result
|= (where
[n
] & 255);
23362 /* MD interface: Sections. */
23364 /* Calculate the maximum variable size (i.e., excluding fr_fix)
23365 that an rs_machine_dependent frag may reach. */
23368 arm_frag_max_var (fragS
*fragp
)
23370 /* We only use rs_machine_dependent for variable-size Thumb instructions,
23371 which are either THUMB_SIZE (2) or INSN_SIZE (4).
23373 Note that we generate relaxable instructions even for cases that don't
23374 really need it, like an immediate that's a trivial constant. So we're
23375 overestimating the instruction size for some of those cases. Rather
23376 than putting more intelligence here, it would probably be better to
23377 avoid generating a relaxation frag in the first place when it can be
23378 determined up front that a short instruction will suffice. */
23380 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
23384 /* Estimate the size of a frag before relaxing. Assume everything fits in
23388 md_estimate_size_before_relax (fragS
* fragp
,
23389 segT segtype ATTRIBUTE_UNUSED
)
23395 /* Convert a machine dependent frag. */
23398 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
23400 unsigned long insn
;
23401 unsigned long old_op
;
23409 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23411 old_op
= bfd_get_16(abfd
, buf
);
23412 if (fragp
->fr_symbol
)
23414 exp
.X_op
= O_symbol
;
23415 exp
.X_add_symbol
= fragp
->fr_symbol
;
23419 exp
.X_op
= O_constant
;
23421 exp
.X_add_number
= fragp
->fr_offset
;
23422 opcode
= fragp
->fr_subtype
;
23425 case T_MNEM_ldr_pc
:
23426 case T_MNEM_ldr_pc2
:
23427 case T_MNEM_ldr_sp
:
23428 case T_MNEM_str_sp
:
23435 if (fragp
->fr_var
== 4)
23437 insn
= THUMB_OP32 (opcode
);
23438 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
23440 insn
|= (old_op
& 0x700) << 4;
23444 insn
|= (old_op
& 7) << 12;
23445 insn
|= (old_op
& 0x38) << 13;
23447 insn
|= 0x00000c00;
23448 put_thumb32_insn (buf
, insn
);
23449 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
23453 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
23455 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
23458 if (fragp
->fr_var
== 4)
23460 insn
= THUMB_OP32 (opcode
);
23461 insn
|= (old_op
& 0xf0) << 4;
23462 put_thumb32_insn (buf
, insn
);
23463 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
23467 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23468 exp
.X_add_number
-= 4;
23476 if (fragp
->fr_var
== 4)
23478 int r0off
= (opcode
== T_MNEM_mov
23479 || opcode
== T_MNEM_movs
) ? 0 : 8;
23480 insn
= THUMB_OP32 (opcode
);
23481 insn
= (insn
& 0xe1ffffff) | 0x10000000;
23482 insn
|= (old_op
& 0x700) << r0off
;
23483 put_thumb32_insn (buf
, insn
);
23484 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23488 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
23493 if (fragp
->fr_var
== 4)
23495 insn
= THUMB_OP32(opcode
);
23496 put_thumb32_insn (buf
, insn
);
23497 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
23500 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
23504 if (fragp
->fr_var
== 4)
23506 insn
= THUMB_OP32(opcode
);
23507 insn
|= (old_op
& 0xf00) << 14;
23508 put_thumb32_insn (buf
, insn
);
23509 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
23512 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
23515 case T_MNEM_add_sp
:
23516 case T_MNEM_add_pc
:
23517 case T_MNEM_inc_sp
:
23518 case T_MNEM_dec_sp
:
23519 if (fragp
->fr_var
== 4)
23521 /* ??? Choose between add and addw. */
23522 insn
= THUMB_OP32 (opcode
);
23523 insn
|= (old_op
& 0xf0) << 4;
23524 put_thumb32_insn (buf
, insn
);
23525 if (opcode
== T_MNEM_add_pc
)
23526 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
23528 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23531 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23539 if (fragp
->fr_var
== 4)
23541 insn
= THUMB_OP32 (opcode
);
23542 insn
|= (old_op
& 0xf0) << 4;
23543 insn
|= (old_op
& 0xf) << 16;
23544 put_thumb32_insn (buf
, insn
);
23545 if (insn
& (1 << 20))
23546 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23548 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23551 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23557 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
23558 (enum bfd_reloc_code_real
) reloc_type
);
23559 fixp
->fx_file
= fragp
->fr_file
;
23560 fixp
->fx_line
= fragp
->fr_line
;
23561 fragp
->fr_fix
+= fragp
->fr_var
;
23563 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23564 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
23565 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
23566 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
23569 /* Return the size of a relaxable immediate operand instruction.
23570 SHIFT and SIZE specify the form of the allowable immediate. */
23572 relax_immediate (fragS
*fragp
, int size
, int shift
)
23578 /* ??? Should be able to do better than this. */
23579 if (fragp
->fr_symbol
)
23582 low
= (1 << shift
) - 1;
23583 mask
= (1 << (shift
+ size
)) - (1 << shift
);
23584 offset
= fragp
->fr_offset
;
23585 /* Force misaligned offsets to 32-bit variant. */
23588 if (offset
& ~mask
)
23593 /* Get the address of a symbol during relaxation. */
23595 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
23601 sym
= fragp
->fr_symbol
;
23602 sym_frag
= symbol_get_frag (sym
);
23603 know (S_GET_SEGMENT (sym
) != absolute_section
23604 || sym_frag
== &zero_address_frag
);
23605 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
23607 /* If frag has yet to be reached on this pass, assume it will
23608 move by STRETCH just as we did. If this is not so, it will
23609 be because some frag between grows, and that will force
23613 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
23617 /* Adjust stretch for any alignment frag. Note that if have
23618 been expanding the earlier code, the symbol may be
23619 defined in what appears to be an earlier frag. FIXME:
23620 This doesn't handle the fr_subtype field, which specifies
23621 a maximum number of bytes to skip when doing an
23623 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
23625 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
23628 stretch
= - ((- stretch
)
23629 & ~ ((1 << (int) f
->fr_offset
) - 1));
23631 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
23643 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23646 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
23651 /* Assume worst case for symbols not known to be in the same section. */
23652 if (fragp
->fr_symbol
== NULL
23653 || !S_IS_DEFINED (fragp
->fr_symbol
)
23654 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23655 || S_IS_WEAK (fragp
->fr_symbol
))
23658 val
= relaxed_symbol_addr (fragp
, stretch
);
23659 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
23660 addr
= (addr
+ 4) & ~3;
23661 /* Force misaligned targets to 32-bit variant. */
23665 if (val
< 0 || val
> 1020)
23670 /* Return the size of a relaxable add/sub immediate instruction. */
23672 relax_addsub (fragS
*fragp
, asection
*sec
)
23677 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23678 op
= bfd_get_16(sec
->owner
, buf
);
23679 if ((op
& 0xf) == ((op
>> 4) & 0xf))
23680 return relax_immediate (fragp
, 8, 0);
23682 return relax_immediate (fragp
, 3, 0);
23685 /* Return TRUE iff the definition of symbol S could be pre-empted
23686 (overridden) at link or load time. */
23688 symbol_preemptible (symbolS
*s
)
23690 /* Weak symbols can always be pre-empted. */
23694 /* Non-global symbols cannot be pre-empted. */
23695 if (! S_IS_EXTERNAL (s
))
23699 /* In ELF, a global symbol can be marked protected, or private. In that
23700 case it can't be pre-empted (other definitions in the same link unit
23701 would violate the ODR). */
23702 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
23706 /* Other global symbols might be pre-empted. */
23710 /* Return the size of a relaxable branch instruction. BITS is the
23711 size of the offset field in the narrow instruction. */
23714 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
23720 /* Assume worst case for symbols not known to be in the same section. */
23721 if (!S_IS_DEFINED (fragp
->fr_symbol
)
23722 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23723 || S_IS_WEAK (fragp
->fr_symbol
))
23727 /* A branch to a function in ARM state will require interworking. */
23728 if (S_IS_DEFINED (fragp
->fr_symbol
)
23729 && ARM_IS_FUNC (fragp
->fr_symbol
))
23733 if (symbol_preemptible (fragp
->fr_symbol
))
23736 val
= relaxed_symbol_addr (fragp
, stretch
);
23737 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
23740 /* Offset is a signed value *2 */
23742 if (val
>= limit
|| val
< -limit
)
23748 /* Relax a machine dependent frag. This returns the amount by which
23749 the current size of the frag should change. */
23752 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
23757 oldsize
= fragp
->fr_var
;
23758 switch (fragp
->fr_subtype
)
23760 case T_MNEM_ldr_pc2
:
23761 newsize
= relax_adr (fragp
, sec
, stretch
);
23763 case T_MNEM_ldr_pc
:
23764 case T_MNEM_ldr_sp
:
23765 case T_MNEM_str_sp
:
23766 newsize
= relax_immediate (fragp
, 8, 2);
23770 newsize
= relax_immediate (fragp
, 5, 2);
23774 newsize
= relax_immediate (fragp
, 5, 1);
23778 newsize
= relax_immediate (fragp
, 5, 0);
23781 newsize
= relax_adr (fragp
, sec
, stretch
);
23787 newsize
= relax_immediate (fragp
, 8, 0);
23790 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
23793 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
23795 case T_MNEM_add_sp
:
23796 case T_MNEM_add_pc
:
23797 newsize
= relax_immediate (fragp
, 8, 2);
23799 case T_MNEM_inc_sp
:
23800 case T_MNEM_dec_sp
:
23801 newsize
= relax_immediate (fragp
, 7, 2);
23807 newsize
= relax_addsub (fragp
, sec
);
23813 fragp
->fr_var
= newsize
;
23814 /* Freeze wide instructions that are at or before the same location as
23815 in the previous pass. This avoids infinite loops.
23816 Don't freeze them unconditionally because targets may be artificially
23817 misaligned by the expansion of preceding frags. */
23818 if (stretch
<= 0 && newsize
> 2)
23820 md_convert_frag (sec
->owner
, sec
, fragp
);
23824 return newsize
- oldsize
;
23827 /* Round up a section size to the appropriate boundary. */
23830 md_section_align (segT segment ATTRIBUTE_UNUSED
,
23836 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23837 of an rs_align_code fragment. */
23840 arm_handle_align (fragS
* fragP
)
23842 static unsigned char const arm_noop
[2][2][4] =
23845 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23846 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23849 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23850 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23853 static unsigned char const thumb_noop
[2][2][2] =
23856 {0xc0, 0x46}, /* LE */
23857 {0x46, 0xc0}, /* BE */
23860 {0x00, 0xbf}, /* LE */
23861 {0xbf, 0x00} /* BE */
23864 static unsigned char const wide_thumb_noop
[2][4] =
23865 { /* Wide Thumb-2 */
23866 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23867 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23870 unsigned bytes
, fix
, noop_size
;
23872 const unsigned char * noop
;
23873 const unsigned char *narrow_noop
= NULL
;
23878 if (fragP
->fr_type
!= rs_align_code
)
23881 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
23882 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
23885 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
23886 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
23888 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
23890 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
23892 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
23893 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
23895 narrow_noop
= thumb_noop
[1][target_big_endian
];
23896 noop
= wide_thumb_noop
[target_big_endian
];
23899 noop
= thumb_noop
[0][target_big_endian
];
23907 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
23908 ? selected_cpu
: arm_arch_none
,
23910 [target_big_endian
];
23917 fragP
->fr_var
= noop_size
;
23919 if (bytes
& (noop_size
- 1))
23921 fix
= bytes
& (noop_size
- 1);
23923 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
23925 memset (p
, 0, fix
);
23932 if (bytes
& noop_size
)
23934 /* Insert a narrow noop. */
23935 memcpy (p
, narrow_noop
, noop_size
);
23937 bytes
-= noop_size
;
23941 /* Use wide noops for the remainder */
23945 while (bytes
>= noop_size
)
23947 memcpy (p
, noop
, noop_size
);
23949 bytes
-= noop_size
;
23953 fragP
->fr_fix
+= fix
;
23956 /* Called from md_do_align. Used to create an alignment
23957 frag in a code section. */
23960 arm_frag_align_code (int n
, int max
)
23964 /* We assume that there will never be a requirement
23965 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
23966 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
23971 _("alignments greater than %d bytes not supported in .text sections."),
23972 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
23973 as_fatal ("%s", err_msg
);
23976 p
= frag_var (rs_align_code
,
23977 MAX_MEM_FOR_RS_ALIGN_CODE
,
23979 (relax_substateT
) max
,
23986 /* Perform target specific initialisation of a frag.
23987 Note - despite the name this initialisation is not done when the frag
23988 is created, but only when its type is assigned. A frag can be created
23989 and used a long time before its type is set, so beware of assuming that
23990 this initialisation is performed first. */
23994 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
23996 /* Record whether this frag is in an ARM or a THUMB area. */
23997 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
24000 #else /* OBJ_ELF is defined. */
24002 arm_init_frag (fragS
* fragP
, int max_chars
)
24004 bfd_boolean frag_thumb_mode
;
24006 /* If the current ARM vs THUMB mode has not already
24007 been recorded into this frag then do so now. */
24008 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
24009 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
24011 /* PR 21809: Do not set a mapping state for debug sections
24012 - it just confuses other tools. */
24013 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
24016 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
24018 /* Record a mapping symbol for alignment frags. We will delete this
24019 later if the alignment ends up empty. */
24020 switch (fragP
->fr_type
)
24023 case rs_align_test
:
24025 mapping_state_2 (MAP_DATA
, max_chars
);
24027 case rs_align_code
:
24028 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
24035 /* When we change sections we need to issue a new mapping symbol. */
24038 arm_elf_change_section (void)
24040 /* Link an unlinked unwind index table section to the .text section. */
24041 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
24042 && elf_linked_to_section (now_seg
) == NULL
)
24043 elf_linked_to_section (now_seg
) = text_section
;
24047 arm_elf_section_type (const char * str
, size_t len
)
24049 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
24050 return SHT_ARM_EXIDX
;
24055 /* Code to deal with unwinding tables. */
24057 static void add_unwind_adjustsp (offsetT
);
24059 /* Generate any deferred unwind frame offset. */
24062 flush_pending_unwind (void)
24066 offset
= unwind
.pending_offset
;
24067 unwind
.pending_offset
= 0;
24069 add_unwind_adjustsp (offset
);
24072 /* Add an opcode to this list for this function. Two-byte opcodes should
24073 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
24077 add_unwind_opcode (valueT op
, int length
)
24079 /* Add any deferred stack adjustment. */
24080 if (unwind
.pending_offset
)
24081 flush_pending_unwind ();
24083 unwind
.sp_restored
= 0;
24085 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
24087 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
24088 if (unwind
.opcodes
)
24089 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
24090 unwind
.opcode_alloc
);
24092 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
24097 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
24099 unwind
.opcode_count
++;
24103 /* Add unwind opcodes to adjust the stack pointer. */
24106 add_unwind_adjustsp (offsetT offset
)
24110 if (offset
> 0x200)
24112 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
24117 /* Long form: 0xb2, uleb128. */
24118 /* This might not fit in a word so add the individual bytes,
24119 remembering the list is built in reverse order. */
24120 o
= (valueT
) ((offset
- 0x204) >> 2);
24122 add_unwind_opcode (0, 1);
24124 /* Calculate the uleb128 encoding of the offset. */
24128 bytes
[n
] = o
& 0x7f;
24134 /* Add the insn. */
24136 add_unwind_opcode (bytes
[n
- 1], 1);
24137 add_unwind_opcode (0xb2, 1);
24139 else if (offset
> 0x100)
24141 /* Two short opcodes. */
24142 add_unwind_opcode (0x3f, 1);
24143 op
= (offset
- 0x104) >> 2;
24144 add_unwind_opcode (op
, 1);
24146 else if (offset
> 0)
24148 /* Short opcode. */
24149 op
= (offset
- 4) >> 2;
24150 add_unwind_opcode (op
, 1);
24152 else if (offset
< 0)
24155 while (offset
> 0x100)
24157 add_unwind_opcode (0x7f, 1);
24160 op
= ((offset
- 4) >> 2) | 0x40;
24161 add_unwind_opcode (op
, 1);
24165 /* Finish the list of unwind opcodes for this function. */
24168 finish_unwind_opcodes (void)
24172 if (unwind
.fp_used
)
24174 /* Adjust sp as necessary. */
24175 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
24176 flush_pending_unwind ();
24178 /* After restoring sp from the frame pointer. */
24179 op
= 0x90 | unwind
.fp_reg
;
24180 add_unwind_opcode (op
, 1);
24183 flush_pending_unwind ();
24187 /* Start an exception table entry. If idx is nonzero this is an index table
24191 start_unwind_section (const segT text_seg
, int idx
)
24193 const char * text_name
;
24194 const char * prefix
;
24195 const char * prefix_once
;
24196 const char * group_name
;
24204 prefix
= ELF_STRING_ARM_unwind
;
24205 prefix_once
= ELF_STRING_ARM_unwind_once
;
24206 type
= SHT_ARM_EXIDX
;
24210 prefix
= ELF_STRING_ARM_unwind_info
;
24211 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
24212 type
= SHT_PROGBITS
;
24215 text_name
= segment_name (text_seg
);
24216 if (streq (text_name
, ".text"))
24219 if (strncmp (text_name
, ".gnu.linkonce.t.",
24220 strlen (".gnu.linkonce.t.")) == 0)
24222 prefix
= prefix_once
;
24223 text_name
+= strlen (".gnu.linkonce.t.");
24226 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
24232 /* Handle COMDAT group. */
24233 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
24235 group_name
= elf_group_name (text_seg
);
24236 if (group_name
== NULL
)
24238 as_bad (_("Group section `%s' has no group signature"),
24239 segment_name (text_seg
));
24240 ignore_rest_of_line ();
24243 flags
|= SHF_GROUP
;
24247 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
24250 /* Set the section link for index tables. */
24252 elf_linked_to_section (now_seg
) = text_seg
;
24256 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
24257 personality routine data. Returns zero, or the index table value for
24258 an inline entry. */
24261 create_unwind_entry (int have_data
)
24266 /* The current word of data. */
24268 /* The number of bytes left in this word. */
24271 finish_unwind_opcodes ();
24273 /* Remember the current text section. */
24274 unwind
.saved_seg
= now_seg
;
24275 unwind
.saved_subseg
= now_subseg
;
24277 start_unwind_section (now_seg
, 0);
24279 if (unwind
.personality_routine
== NULL
)
24281 if (unwind
.personality_index
== -2)
24284 as_bad (_("handlerdata in cantunwind frame"));
24285 return 1; /* EXIDX_CANTUNWIND. */
24288 /* Use a default personality routine if none is specified. */
24289 if (unwind
.personality_index
== -1)
24291 if (unwind
.opcode_count
> 3)
24292 unwind
.personality_index
= 1;
24294 unwind
.personality_index
= 0;
24297 /* Space for the personality routine entry. */
24298 if (unwind
.personality_index
== 0)
24300 if (unwind
.opcode_count
> 3)
24301 as_bad (_("too many unwind opcodes for personality routine 0"));
24305 /* All the data is inline in the index table. */
24308 while (unwind
.opcode_count
> 0)
24310 unwind
.opcode_count
--;
24311 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
24315 /* Pad with "finish" opcodes. */
24317 data
= (data
<< 8) | 0xb0;
24324 /* We get two opcodes "free" in the first word. */
24325 size
= unwind
.opcode_count
- 2;
24329 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
24330 if (unwind
.personality_index
!= -1)
24332 as_bad (_("attempt to recreate an unwind entry"));
24336 /* An extra byte is required for the opcode count. */
24337 size
= unwind
.opcode_count
+ 1;
24340 size
= (size
+ 3) >> 2;
24342 as_bad (_("too many unwind opcodes"));
24344 frag_align (2, 0, 0);
24345 record_alignment (now_seg
, 2);
24346 unwind
.table_entry
= expr_build_dot ();
24348 /* Allocate the table entry. */
24349 ptr
= frag_more ((size
<< 2) + 4);
24350 /* PR 13449: Zero the table entries in case some of them are not used. */
24351 memset (ptr
, 0, (size
<< 2) + 4);
24352 where
= frag_now_fix () - ((size
<< 2) + 4);
24354 switch (unwind
.personality_index
)
24357 /* ??? Should this be a PLT generating relocation? */
24358 /* Custom personality routine. */
24359 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
24360 BFD_RELOC_ARM_PREL31
);
24365 /* Set the first byte to the number of additional words. */
24366 data
= size
> 0 ? size
- 1 : 0;
24370 /* ABI defined personality routines. */
24372 /* Three opcodes bytes are packed into the first word. */
24379 /* The size and first two opcode bytes go in the first word. */
24380 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
24385 /* Should never happen. */
24389 /* Pack the opcodes into words (MSB first), reversing the list at the same
24391 while (unwind
.opcode_count
> 0)
24395 md_number_to_chars (ptr
, data
, 4);
24400 unwind
.opcode_count
--;
24402 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
24405 /* Finish off the last word. */
24408 /* Pad with "finish" opcodes. */
24410 data
= (data
<< 8) | 0xb0;
24412 md_number_to_chars (ptr
, data
, 4);
24417 /* Add an empty descriptor if there is no user-specified data. */
24418 ptr
= frag_more (4);
24419 md_number_to_chars (ptr
, 0, 4);
24426 /* Initialize the DWARF-2 unwind information for this procedure. */
24429 tc_arm_frame_initial_instructions (void)
24431 cfi_add_CFA_def_cfa (REG_SP
, 0);
24433 #endif /* OBJ_ELF */
24435 /* Convert REGNAME to a DWARF-2 register number. */
24438 tc_arm_regname_to_dw2regnum (char *regname
)
24440 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
24444 /* PR 16694: Allow VFP registers as well. */
24445 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
24449 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
24458 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
24462 exp
.X_op
= O_secrel
;
24463 exp
.X_add_symbol
= symbol
;
24464 exp
.X_add_number
= 0;
24465 emit_expr (&exp
, size
);
24469 /* MD interface: Symbol and relocation handling. */
24471 /* Return the address within the segment that a PC-relative fixup is
24472 relative to. For ARM, PC-relative fixups applied to instructions
24473 are generally relative to the location of the fixup plus 8 bytes.
24474 Thumb branches are offset by 4, and Thumb loads relative to PC
24475 require special handling. */
24478 md_pcrel_from_section (fixS
* fixP
, segT seg
)
24480 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24482 /* If this is pc-relative and we are going to emit a relocation
24483 then we just want to put out any pipeline compensation that the linker
24484 will need. Otherwise we want to use the calculated base.
24485 For WinCE we skip the bias for externals as well, since this
24486 is how the MS ARM-CE assembler behaves and we want to be compatible. */
24488 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24489 || (arm_force_relocation (fixP
)
24491 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
24497 switch (fixP
->fx_r_type
)
24499 /* PC relative addressing on the Thumb is slightly odd as the
24500 bottom two bits of the PC are forced to zero for the
24501 calculation. This happens *after* application of the
24502 pipeline offset. However, Thumb adrl already adjusts for
24503 this, so we need not do it again. */
24504 case BFD_RELOC_ARM_THUMB_ADD
:
24507 case BFD_RELOC_ARM_THUMB_OFFSET
:
24508 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24509 case BFD_RELOC_ARM_T32_ADD_PC12
:
24510 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24511 return (base
+ 4) & ~3;
24513 /* Thumb branches are simply offset by +4. */
24514 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
24515 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24516 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24517 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24518 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24519 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24520 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
24521 case BFD_RELOC_ARM_THUMB_BF17
:
24522 case BFD_RELOC_ARM_THUMB_BF19
:
24523 case BFD_RELOC_ARM_THUMB_BF13
:
24524 case BFD_RELOC_ARM_THUMB_LOOP12
:
24527 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24529 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24530 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24531 && ARM_IS_FUNC (fixP
->fx_addsy
)
24532 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24533 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24536 /* BLX is like branches above, but forces the low two bits of PC to
24538 case BFD_RELOC_THUMB_PCREL_BLX
:
24540 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24541 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24542 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24543 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24544 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24545 return (base
+ 4) & ~3;
24547 /* ARM mode branches are offset by +8. However, the Windows CE
24548 loader expects the relocation not to take this into account. */
24549 case BFD_RELOC_ARM_PCREL_BLX
:
24551 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24552 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24553 && ARM_IS_FUNC (fixP
->fx_addsy
)
24554 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24555 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24558 case BFD_RELOC_ARM_PCREL_CALL
:
24560 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24561 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24562 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24563 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24564 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24567 case BFD_RELOC_ARM_PCREL_BRANCH
:
24568 case BFD_RELOC_ARM_PCREL_JUMP
:
24569 case BFD_RELOC_ARM_PLT32
:
24571 /* When handling fixups immediately, because we have already
24572 discovered the value of a symbol, or the address of the frag involved
24573 we must account for the offset by +8, as the OS loader will never see the reloc.
24574 see fixup_segment() in write.c
24575 The S_IS_EXTERNAL test handles the case of global symbols.
24576 Those need the calculated base, not just the pipe compensation the linker will need. */
24578 && fixP
->fx_addsy
!= NULL
24579 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24580 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
24588 /* ARM mode loads relative to PC are also offset by +8. Unlike
24589 branches, the Windows CE loader *does* expect the relocation
24590 to take this into account. */
24591 case BFD_RELOC_ARM_OFFSET_IMM
:
24592 case BFD_RELOC_ARM_OFFSET_IMM8
:
24593 case BFD_RELOC_ARM_HWLITERAL
:
24594 case BFD_RELOC_ARM_LITERAL
:
24595 case BFD_RELOC_ARM_CP_OFF_IMM
:
24599 /* Other PC-relative relocations are un-offset. */
24605 static bfd_boolean flag_warn_syms
= TRUE
;
24608 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
24610 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24611 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24612 does mean that the resulting code might be very confusing to the reader.
24613 Also this warning can be triggered if the user omits an operand before
24614 an immediate address, eg:
24618 GAS treats this as an assignment of the value of the symbol foo to a
24619 symbol LDR, and so (without this code) it will not issue any kind of
24620 warning or error message.
24622 Note - ARM instructions are case-insensitive but the strings in the hash
24623 table are all stored in lower case, so we must first ensure that name is
24625 if (flag_warn_syms
&& arm_ops_hsh
)
24627 char * nbuf
= strdup (name
);
24630 for (p
= nbuf
; *p
; p
++)
24632 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
24634 static struct hash_control
* already_warned
= NULL
;
24636 if (already_warned
== NULL
)
24637 already_warned
= hash_new ();
24638 /* Only warn about the symbol once. To keep the code
24639 simple we let hash_insert do the lookup for us. */
24640 if (hash_insert (already_warned
, nbuf
, NULL
) == NULL
)
24641 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
24650 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24651 Otherwise we have no need to default values of symbols. */
24654 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
24657 if (name
[0] == '_' && name
[1] == 'G'
24658 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
24662 if (symbol_find (name
))
24663 as_bad (_("GOT already in the symbol table"));
24665 GOT_symbol
= symbol_new (name
, undefined_section
,
24666 (valueT
) 0, & zero_address_frag
);
24676 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24677 computed as two separate immediate values, added together. We
24678 already know that this value cannot be computed by just one ARM
24681 static unsigned int
24682 validate_immediate_twopart (unsigned int val
,
24683 unsigned int * highpart
)
24688 for (i
= 0; i
< 32; i
+= 2)
24689 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
24695 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
24697 else if (a
& 0xff0000)
24699 if (a
& 0xff000000)
24701 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
24705 gas_assert (a
& 0xff000000);
24706 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
24709 return (a
& 0xff) | (i
<< 7);
24716 validate_offset_imm (unsigned int val
, int hwse
)
24718 if ((hwse
&& val
> 255) || val
> 4095)
24723 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24724 negative immediate constant by altering the instruction. A bit of
24729 by inverting the second operand, and
24732 by negating the second operand. */
24735 negate_data_op (unsigned long * instruction
,
24736 unsigned long value
)
24739 unsigned long negated
, inverted
;
24741 negated
= encode_arm_immediate (-value
);
24742 inverted
= encode_arm_immediate (~value
);
24744 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
24747 /* First negates. */
24748 case OPCODE_SUB
: /* ADD <-> SUB */
24749 new_inst
= OPCODE_ADD
;
24754 new_inst
= OPCODE_SUB
;
24758 case OPCODE_CMP
: /* CMP <-> CMN */
24759 new_inst
= OPCODE_CMN
;
24764 new_inst
= OPCODE_CMP
;
24768 /* Now Inverted ops. */
24769 case OPCODE_MOV
: /* MOV <-> MVN */
24770 new_inst
= OPCODE_MVN
;
24775 new_inst
= OPCODE_MOV
;
24779 case OPCODE_AND
: /* AND <-> BIC */
24780 new_inst
= OPCODE_BIC
;
24785 new_inst
= OPCODE_AND
;
24789 case OPCODE_ADC
: /* ADC <-> SBC */
24790 new_inst
= OPCODE_SBC
;
24795 new_inst
= OPCODE_ADC
;
24799 /* We cannot do anything. */
24804 if (value
== (unsigned) FAIL
)
24807 *instruction
&= OPCODE_MASK
;
24808 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
24812 /* Like negate_data_op, but for Thumb-2. */
24814 static unsigned int
24815 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
24819 unsigned int negated
, inverted
;
24821 negated
= encode_thumb32_immediate (-value
);
24822 inverted
= encode_thumb32_immediate (~value
);
24824 rd
= (*instruction
>> 8) & 0xf;
24825 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
24828 /* ADD <-> SUB. Includes CMP <-> CMN. */
24829 case T2_OPCODE_SUB
:
24830 new_inst
= T2_OPCODE_ADD
;
24834 case T2_OPCODE_ADD
:
24835 new_inst
= T2_OPCODE_SUB
;
24839 /* ORR <-> ORN. Includes MOV <-> MVN. */
24840 case T2_OPCODE_ORR
:
24841 new_inst
= T2_OPCODE_ORN
;
24845 case T2_OPCODE_ORN
:
24846 new_inst
= T2_OPCODE_ORR
;
24850 /* AND <-> BIC. TST has no inverted equivalent. */
24851 case T2_OPCODE_AND
:
24852 new_inst
= T2_OPCODE_BIC
;
24859 case T2_OPCODE_BIC
:
24860 new_inst
= T2_OPCODE_AND
;
24865 case T2_OPCODE_ADC
:
24866 new_inst
= T2_OPCODE_SBC
;
24870 case T2_OPCODE_SBC
:
24871 new_inst
= T2_OPCODE_ADC
;
24875 /* We cannot do anything. */
24880 if (value
== (unsigned int)FAIL
)
24883 *instruction
&= T2_OPCODE_MASK
;
24884 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
24888 /* Read a 32-bit thumb instruction from buf. */
24890 static unsigned long
24891 get_thumb32_insn (char * buf
)
24893 unsigned long insn
;
24894 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
24895 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24900 /* We usually want to set the low bit on the address of thumb function
24901 symbols. In particular .word foo - . should have the low bit set.
24902 Generic code tries to fold the difference of two symbols to
24903 a constant. Prevent this and force a relocation when the first symbols
24904 is a thumb function. */
24907 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
24909 if (op
== O_subtract
24910 && l
->X_op
== O_symbol
24911 && r
->X_op
== O_symbol
24912 && THUMB_IS_FUNC (l
->X_add_symbol
))
24914 l
->X_op
= O_subtract
;
24915 l
->X_op_symbol
= r
->X_add_symbol
;
24916 l
->X_add_number
-= r
->X_add_number
;
24920 /* Process as normal. */
24924 /* Encode Thumb2 unconditional branches and calls. The encoding
24925 for the 2 are identical for the immediate values. */
24928 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
24930 #define T2I1I2MASK ((1 << 13) | (1 << 11))
24933 addressT S
, I1
, I2
, lo
, hi
;
24935 S
= (value
>> 24) & 0x01;
24936 I1
= (value
>> 23) & 0x01;
24937 I2
= (value
>> 22) & 0x01;
24938 hi
= (value
>> 12) & 0x3ff;
24939 lo
= (value
>> 1) & 0x7ff;
24940 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24941 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24942 newval
|= (S
<< 10) | hi
;
24943 newval2
&= ~T2I1I2MASK
;
24944 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
24945 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24946 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
24950 md_apply_fix (fixS
* fixP
,
24954 offsetT value
= * valP
;
24956 unsigned int newimm
;
24957 unsigned long temp
;
24959 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
24961 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
24963 /* Note whether this will delete the relocation. */
24965 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
24968 /* On a 64-bit host, silently truncate 'value' to 32 bits for
24969 consistency with the behaviour on 32-bit hosts. Remember value
24971 value
&= 0xffffffff;
24972 value
^= 0x80000000;
24973 value
-= 0x80000000;
24976 fixP
->fx_addnumber
= value
;
24978 /* Same treatment for fixP->fx_offset. */
24979 fixP
->fx_offset
&= 0xffffffff;
24980 fixP
->fx_offset
^= 0x80000000;
24981 fixP
->fx_offset
-= 0x80000000;
24983 switch (fixP
->fx_r_type
)
24985 case BFD_RELOC_NONE
:
24986 /* This will need to go in the object file. */
24990 case BFD_RELOC_ARM_IMMEDIATE
:
24991 /* We claim that this fixup has been processed here,
24992 even if in fact we generate an error because we do
24993 not have a reloc for it, so tc_gen_reloc will reject it. */
24996 if (fixP
->fx_addsy
)
24998 const char *msg
= 0;
25000 if (! S_IS_DEFINED (fixP
->fx_addsy
))
25001 msg
= _("undefined symbol %s used as an immediate value");
25002 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
25003 msg
= _("symbol %s is in a different section");
25004 else if (S_IS_WEAK (fixP
->fx_addsy
))
25005 msg
= _("symbol %s is weak and may be overridden later");
25009 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25010 msg
, S_GET_NAME (fixP
->fx_addsy
));
25015 temp
= md_chars_to_number (buf
, INSN_SIZE
);
25017 /* If the offset is negative, we should use encoding A2 for ADR. */
25018 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
25019 newimm
= negate_data_op (&temp
, value
);
25022 newimm
= encode_arm_immediate (value
);
25024 /* If the instruction will fail, see if we can fix things up by
25025 changing the opcode. */
25026 if (newimm
== (unsigned int) FAIL
)
25027 newimm
= negate_data_op (&temp
, value
);
25028 /* MOV accepts both ARM modified immediate (A1 encoding) and
25029 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
25030 When disassembling, MOV is preferred when there is no encoding
25032 if (newimm
== (unsigned int) FAIL
25033 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
25034 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
25035 && !((temp
>> SBIT_SHIFT
) & 0x1)
25036 && value
>= 0 && value
<= 0xffff)
25038 /* Clear bits[23:20] to change encoding from A1 to A2. */
25039 temp
&= 0xff0fffff;
25040 /* Encoding high 4bits imm. Code below will encode the remaining
25042 temp
|= (value
& 0x0000f000) << 4;
25043 newimm
= value
& 0x00000fff;
25047 if (newimm
== (unsigned int) FAIL
)
25049 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25050 _("invalid constant (%lx) after fixup"),
25051 (unsigned long) value
);
25055 newimm
|= (temp
& 0xfffff000);
25056 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
25059 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
25061 unsigned int highpart
= 0;
25062 unsigned int newinsn
= 0xe1a00000; /* nop. */
25064 if (fixP
->fx_addsy
)
25066 const char *msg
= 0;
25068 if (! S_IS_DEFINED (fixP
->fx_addsy
))
25069 msg
= _("undefined symbol %s used as an immediate value");
25070 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
25071 msg
= _("symbol %s is in a different section");
25072 else if (S_IS_WEAK (fixP
->fx_addsy
))
25073 msg
= _("symbol %s is weak and may be overridden later");
25077 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25078 msg
, S_GET_NAME (fixP
->fx_addsy
));
25083 newimm
= encode_arm_immediate (value
);
25084 temp
= md_chars_to_number (buf
, INSN_SIZE
);
25086 /* If the instruction will fail, see if we can fix things up by
25087 changing the opcode. */
25088 if (newimm
== (unsigned int) FAIL
25089 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
25091 /* No ? OK - try using two ADD instructions to generate
25093 newimm
= validate_immediate_twopart (value
, & highpart
);
25095 /* Yes - then make sure that the second instruction is
25097 if (newimm
!= (unsigned int) FAIL
)
25099 /* Still No ? Try using a negated value. */
25100 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
25101 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
25102 /* Otherwise - give up. */
25105 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25106 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
25111 /* Replace the first operand in the 2nd instruction (which
25112 is the PC) with the destination register. We have
25113 already added in the PC in the first instruction and we
25114 do not want to do it again. */
25115 newinsn
&= ~ 0xf0000;
25116 newinsn
|= ((newinsn
& 0x0f000) << 4);
25119 newimm
|= (temp
& 0xfffff000);
25120 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
25122 highpart
|= (newinsn
& 0xfffff000);
25123 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
25127 case BFD_RELOC_ARM_OFFSET_IMM
:
25128 if (!fixP
->fx_done
&& seg
->use_rela_p
)
25130 /* Fall through. */
25132 case BFD_RELOC_ARM_LITERAL
:
25138 if (validate_offset_imm (value
, 0) == FAIL
)
25140 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
25141 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25142 _("invalid literal constant: pool needs to be closer"));
25144 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25145 _("bad immediate value for offset (%ld)"),
25150 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25152 newval
&= 0xfffff000;
25155 newval
&= 0xff7ff000;
25156 newval
|= value
| (sign
? INDEX_UP
: 0);
25158 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25161 case BFD_RELOC_ARM_OFFSET_IMM8
:
25162 case BFD_RELOC_ARM_HWLITERAL
:
25168 if (validate_offset_imm (value
, 1) == FAIL
)
25170 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
25171 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25172 _("invalid literal constant: pool needs to be closer"));
25174 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25175 _("bad immediate value for 8-bit offset (%ld)"),
25180 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25182 newval
&= 0xfffff0f0;
25185 newval
&= 0xff7ff0f0;
25186 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
25188 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25191 case BFD_RELOC_ARM_T32_OFFSET_U8
:
25192 if (value
< 0 || value
> 1020 || value
% 4 != 0)
25193 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25194 _("bad immediate value for offset (%ld)"), (long) value
);
25197 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
25199 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
25202 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
25203 /* This is a complicated relocation used for all varieties of Thumb32
25204 load/store instruction with immediate offset:
25206 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
25207 *4, optional writeback(W)
25208 (doubleword load/store)
25210 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
25211 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
25212 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
25213 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
25214 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
25216 Uppercase letters indicate bits that are already encoded at
25217 this point. Lowercase letters are our problem. For the
25218 second block of instructions, the secondary opcode nybble
25219 (bits 8..11) is present, and bit 23 is zero, even if this is
25220 a PC-relative operation. */
25221 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25223 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
25225 if ((newval
& 0xf0000000) == 0xe0000000)
25227 /* Doubleword load/store: 8-bit offset, scaled by 4. */
25229 newval
|= (1 << 23);
25232 if (value
% 4 != 0)
25234 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25235 _("offset not a multiple of 4"));
25241 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25242 _("offset out of range"));
25247 else if ((newval
& 0x000f0000) == 0x000f0000)
25249 /* PC-relative, 12-bit offset. */
25251 newval
|= (1 << 23);
25256 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25257 _("offset out of range"));
25262 else if ((newval
& 0x00000100) == 0x00000100)
25264 /* Writeback: 8-bit, +/- offset. */
25266 newval
|= (1 << 9);
25271 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25272 _("offset out of range"));
25277 else if ((newval
& 0x00000f00) == 0x00000e00)
25279 /* T-instruction: positive 8-bit offset. */
25280 if (value
< 0 || value
> 0xff)
25282 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25283 _("offset out of range"));
25291 /* Positive 12-bit or negative 8-bit offset. */
25295 newval
|= (1 << 23);
25305 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25306 _("offset out of range"));
25313 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
25314 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
25317 case BFD_RELOC_ARM_SHIFT_IMM
:
25318 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25319 if (((unsigned long) value
) > 32
25321 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
25323 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25324 _("shift expression is too large"));
25329 /* Shifts of zero must be done as lsl. */
25331 else if (value
== 32)
25333 newval
&= 0xfffff07f;
25334 newval
|= (value
& 0x1f) << 7;
25335 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25338 case BFD_RELOC_ARM_T32_IMMEDIATE
:
25339 case BFD_RELOC_ARM_T32_ADD_IMM
:
25340 case BFD_RELOC_ARM_T32_IMM12
:
25341 case BFD_RELOC_ARM_T32_ADD_PC12
:
25342 /* We claim that this fixup has been processed here,
25343 even if in fact we generate an error because we do
25344 not have a reloc for it, so tc_gen_reloc will reject it. */
25348 && ! S_IS_DEFINED (fixP
->fx_addsy
))
25350 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25351 _("undefined symbol %s used as an immediate value"),
25352 S_GET_NAME (fixP
->fx_addsy
));
25356 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25358 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
25361 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
25362 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
25363 Thumb2 modified immediate encoding (T2). */
25364 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
25365 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
25367 newimm
= encode_thumb32_immediate (value
);
25368 if (newimm
== (unsigned int) FAIL
)
25369 newimm
= thumb32_negate_data_op (&newval
, value
);
25371 if (newimm
== (unsigned int) FAIL
)
25373 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
25375 /* Turn add/sum into addw/subw. */
25376 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
25377 newval
= (newval
& 0xfeffffff) | 0x02000000;
25378 /* No flat 12-bit imm encoding for addsw/subsw. */
25379 if ((newval
& 0x00100000) == 0)
25381 /* 12 bit immediate for addw/subw. */
25385 newval
^= 0x00a00000;
25388 newimm
= (unsigned int) FAIL
;
25395 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
25396 UINT16 (T3 encoding), MOVW only accepts UINT16. When
25397 disassembling, MOV is preferred when there is no encoding
25399 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
25400 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
25401 but with the Rn field [19:16] set to 1111. */
25402 && (((newval
>> 16) & 0xf) == 0xf)
25403 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
25404 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
25405 && value
>= 0 && value
<= 0xffff)
25407 /* Toggle bit[25] to change encoding from T2 to T3. */
25409 /* Clear bits[19:16]. */
25410 newval
&= 0xfff0ffff;
25411 /* Encoding high 4bits imm. Code below will encode the
25412 remaining low 12bits. */
25413 newval
|= (value
& 0x0000f000) << 4;
25414 newimm
= value
& 0x00000fff;
25419 if (newimm
== (unsigned int)FAIL
)
25421 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25422 _("invalid constant (%lx) after fixup"),
25423 (unsigned long) value
);
25427 newval
|= (newimm
& 0x800) << 15;
25428 newval
|= (newimm
& 0x700) << 4;
25429 newval
|= (newimm
& 0x0ff);
25431 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
25432 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
25435 case BFD_RELOC_ARM_SMC
:
25436 if (((unsigned long) value
) > 0xffff)
25437 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25438 _("invalid smc expression"));
25439 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25440 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
25441 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25444 case BFD_RELOC_ARM_HVC
:
25445 if (((unsigned long) value
) > 0xffff)
25446 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25447 _("invalid hvc expression"));
25448 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25449 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
25450 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25453 case BFD_RELOC_ARM_SWI
:
25454 if (fixP
->tc_fix_data
!= 0)
25456 if (((unsigned long) value
) > 0xff)
25457 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25458 _("invalid swi expression"));
25459 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25461 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25465 if (((unsigned long) value
) > 0x00ffffff)
25466 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25467 _("invalid swi expression"));
25468 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25470 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25474 case BFD_RELOC_ARM_MULTI
:
25475 if (((unsigned long) value
) > 0xffff)
25476 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25477 _("invalid expression in load/store multiple"));
25478 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
25479 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25483 case BFD_RELOC_ARM_PCREL_CALL
:
25485 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25487 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25488 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25489 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25490 /* Flip the bl to blx. This is a simple flip
25491 bit here because we generate PCREL_CALL for
25492 unconditional bls. */
25494 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25495 newval
= newval
| 0x10000000;
25496 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25502 goto arm_branch_common
;
25504 case BFD_RELOC_ARM_PCREL_JUMP
:
25505 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25507 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25508 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25509 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25511 /* This would map to a bl<cond>, b<cond>,
25512 b<always> to a Thumb function. We
25513 need to force a relocation for this particular
25515 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25518 /* Fall through. */
25520 case BFD_RELOC_ARM_PLT32
:
25522 case BFD_RELOC_ARM_PCREL_BRANCH
:
25524 goto arm_branch_common
;
25526 case BFD_RELOC_ARM_PCREL_BLX
:
25529 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25531 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25532 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25533 && ARM_IS_FUNC (fixP
->fx_addsy
))
25535 /* Flip the blx to a bl and warn. */
25536 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25537 newval
= 0xeb000000;
25538 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25539 _("blx to '%s' an ARM ISA state function changed to bl"),
25541 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25547 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25548 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
25552 /* We are going to store value (shifted right by two) in the
25553 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25554 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25557 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25558 _("misaligned branch destination"));
25559 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
25560 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
25561 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25563 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25565 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25566 newval
|= (value
>> 2) & 0x00ffffff;
25567 /* Set the H bit on BLX instructions. */
25571 newval
|= 0x01000000;
25573 newval
&= ~0x01000000;
25575 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25579 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
25580 /* CBZ can only branch forward. */
25582 /* Attempts to use CBZ to branch to the next instruction
25583 (which, strictly speaking, are prohibited) will be turned into
25586 FIXME: It may be better to remove the instruction completely and
25587 perform relaxation. */
25590 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25591 newval
= 0xbf00; /* NOP encoding T1 */
25592 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25597 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25599 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25601 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25602 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
25603 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25608 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
25609 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
25610 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25612 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25614 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25615 newval
|= (value
& 0x1ff) >> 1;
25616 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25620 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
25621 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
25622 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25624 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25626 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25627 newval
|= (value
& 0xfff) >> 1;
25628 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25632 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25634 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25635 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25636 && ARM_IS_FUNC (fixP
->fx_addsy
)
25637 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25639 /* Force a relocation for a branch 20 bits wide. */
25642 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
25643 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25644 _("conditional branch out of range"));
25646 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25649 addressT S
, J1
, J2
, lo
, hi
;
25651 S
= (value
& 0x00100000) >> 20;
25652 J2
= (value
& 0x00080000) >> 19;
25653 J1
= (value
& 0x00040000) >> 18;
25654 hi
= (value
& 0x0003f000) >> 12;
25655 lo
= (value
& 0x00000ffe) >> 1;
25657 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25658 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25659 newval
|= (S
<< 10) | hi
;
25660 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
25661 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25662 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25666 case BFD_RELOC_THUMB_PCREL_BLX
:
25667 /* If there is a blx from a thumb state function to
25668 another thumb function flip this to a bl and warn
25672 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25673 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25674 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25676 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25677 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25678 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25680 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25681 newval
= newval
| 0x1000;
25682 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25683 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25688 goto thumb_bl_common
;
25690 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25691 /* A bl from Thumb state ISA to an internal ARM state function
25692 is converted to a blx. */
25694 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25695 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25696 && ARM_IS_FUNC (fixP
->fx_addsy
)
25697 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25699 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25700 newval
= newval
& ~0x1000;
25701 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25702 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
25708 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25709 /* For a BLX instruction, make sure that the relocation is rounded up
25710 to a word boundary. This follows the semantics of the instruction
25711 which specifies that bit 1 of the target address will come from bit
25712 1 of the base address. */
25713 value
= (value
+ 3) & ~ 3;
25716 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
25717 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25718 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25721 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
25723 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
25724 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25725 else if ((value
& ~0x1ffffff)
25726 && ((value
& ~0x1ffffff) != ~0x1ffffff))
25727 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25728 _("Thumb2 branch out of range"));
25731 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25732 encode_thumb2_b_bl_offset (buf
, value
);
25736 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25737 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
25738 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25740 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25741 encode_thumb2_b_bl_offset (buf
, value
);
25746 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25751 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25752 md_number_to_chars (buf
, value
, 2);
25756 case BFD_RELOC_ARM_TLS_CALL
:
25757 case BFD_RELOC_ARM_THM_TLS_CALL
:
25758 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25759 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25760 case BFD_RELOC_ARM_TLS_GOTDESC
:
25761 case BFD_RELOC_ARM_TLS_GD32
:
25762 case BFD_RELOC_ARM_TLS_LE32
:
25763 case BFD_RELOC_ARM_TLS_IE32
:
25764 case BFD_RELOC_ARM_TLS_LDM32
:
25765 case BFD_RELOC_ARM_TLS_LDO32
:
25766 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25769 /* Same handling as above, but with the arm_fdpic guard. */
25770 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25771 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25772 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25775 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25779 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25780 _("Relocation supported only in FDPIC mode"));
25784 case BFD_RELOC_ARM_GOT32
:
25785 case BFD_RELOC_ARM_GOTOFF
:
25788 case BFD_RELOC_ARM_GOT_PREL
:
25789 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25790 md_number_to_chars (buf
, value
, 4);
25793 case BFD_RELOC_ARM_TARGET2
:
25794 /* TARGET2 is not partial-inplace, so we need to write the
25795 addend here for REL targets, because it won't be written out
25796 during reloc processing later. */
25797 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25798 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
25801 /* Relocations for FDPIC. */
25802 case BFD_RELOC_ARM_GOTFUNCDESC
:
25803 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25804 case BFD_RELOC_ARM_FUNCDESC
:
25807 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25808 md_number_to_chars (buf
, 0, 4);
25812 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25813 _("Relocation supported only in FDPIC mode"));
25818 case BFD_RELOC_RVA
:
25820 case BFD_RELOC_ARM_TARGET1
:
25821 case BFD_RELOC_ARM_ROSEGREL32
:
25822 case BFD_RELOC_ARM_SBREL32
:
25823 case BFD_RELOC_32_PCREL
:
25825 case BFD_RELOC_32_SECREL
:
25827 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25829 /* For WinCE we only do this for pcrel fixups. */
25830 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
25832 md_number_to_chars (buf
, value
, 4);
25836 case BFD_RELOC_ARM_PREL31
:
25837 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25839 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
25840 if ((value
^ (value
>> 1)) & 0x40000000)
25842 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25843 _("rel31 relocation overflow"));
25845 newval
|= value
& 0x7fffffff;
25846 md_number_to_chars (buf
, newval
, 4);
25851 case BFD_RELOC_ARM_CP_OFF_IMM
:
25852 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
25853 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
25854 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
25855 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25857 newval
= get_thumb32_insn (buf
);
25858 if ((newval
& 0x0f200f00) == 0x0d000900)
25860 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25861 has permitted values that are multiples of 2, in the range 0
25863 if (value
< -510 || value
> 510 || (value
& 1))
25864 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25865 _("co-processor offset out of range"));
25867 else if ((newval
& 0xfe001f80) == 0xec000f80)
25869 if (value
< -511 || value
> 512 || (value
& 3))
25870 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25871 _("co-processor offset out of range"));
25873 else if (value
< -1023 || value
> 1023 || (value
& 3))
25874 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25875 _("co-processor offset out of range"));
25880 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25881 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
25882 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25884 newval
= get_thumb32_insn (buf
);
25887 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
25888 newval
&= 0xffffff80;
25890 newval
&= 0xffffff00;
25894 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
25895 newval
&= 0xff7fff80;
25897 newval
&= 0xff7fff00;
25898 if ((newval
& 0x0f200f00) == 0x0d000900)
25900 /* This is a fp16 vstr/vldr.
25902 It requires the immediate offset in the instruction is shifted
25903 left by 1 to be a half-word offset.
25905 Here, left shift by 1 first, and later right shift by 2
25906 should get the right offset. */
25909 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
25911 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25912 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
25913 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25915 put_thumb32_insn (buf
, newval
);
25918 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
25919 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
25920 if (value
< -255 || value
> 255)
25921 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25922 _("co-processor offset out of range"));
25924 goto cp_off_common
;
25926 case BFD_RELOC_ARM_THUMB_OFFSET
:
25927 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25928 /* Exactly what ranges, and where the offset is inserted depends
25929 on the type of instruction, we can establish this from the
25931 switch (newval
>> 12)
25933 case 4: /* PC load. */
25934 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
25935 forced to zero for these loads; md_pcrel_from has already
25936 compensated for this. */
25938 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25939 _("invalid offset, target not word aligned (0x%08lX)"),
25940 (((unsigned long) fixP
->fx_frag
->fr_address
25941 + (unsigned long) fixP
->fx_where
) & ~3)
25942 + (unsigned long) value
);
25944 if (value
& ~0x3fc)
25945 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25946 _("invalid offset, value too big (0x%08lX)"),
25949 newval
|= value
>> 2;
25952 case 9: /* SP load/store. */
25953 if (value
& ~0x3fc)
25954 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25955 _("invalid offset, value too big (0x%08lX)"),
25957 newval
|= value
>> 2;
25960 case 6: /* Word load/store. */
25962 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25963 _("invalid offset, value too big (0x%08lX)"),
25965 newval
|= value
<< 4; /* 6 - 2. */
25968 case 7: /* Byte load/store. */
25970 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25971 _("invalid offset, value too big (0x%08lX)"),
25973 newval
|= value
<< 6;
25976 case 8: /* Halfword load/store. */
25978 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25979 _("invalid offset, value too big (0x%08lX)"),
25981 newval
|= value
<< 5; /* 6 - 1. */
25985 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25986 "Unable to process relocation for thumb opcode: %lx",
25987 (unsigned long) newval
);
25990 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25993 case BFD_RELOC_ARM_THUMB_ADD
:
25994 /* This is a complicated relocation, since we use it for all of
25995 the following immediate relocations:
25999 9bit ADD/SUB SP word-aligned
26000 10bit ADD PC/SP word-aligned
26002 The type of instruction being processed is encoded in the
26009 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26011 int rd
= (newval
>> 4) & 0xf;
26012 int rs
= newval
& 0xf;
26013 int subtract
= !!(newval
& 0x8000);
26015 /* Check for HI regs, only very restricted cases allowed:
26016 Adjusting SP, and using PC or SP to get an address. */
26017 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
26018 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
26019 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26020 _("invalid Hi register with immediate"));
26022 /* If value is negative, choose the opposite instruction. */
26026 subtract
= !subtract
;
26028 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26029 _("immediate value out of range"));
26034 if (value
& ~0x1fc)
26035 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26036 _("invalid immediate for stack address calculation"));
26037 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
26038 newval
|= value
>> 2;
26040 else if (rs
== REG_PC
|| rs
== REG_SP
)
26042 /* PR gas/18541. If the addition is for a defined symbol
26043 within range of an ADR instruction then accept it. */
26046 && fixP
->fx_addsy
!= NULL
)
26050 if (! S_IS_DEFINED (fixP
->fx_addsy
)
26051 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
26052 || S_IS_WEAK (fixP
->fx_addsy
))
26054 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26055 _("address calculation needs a strongly defined nearby symbol"));
26059 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
26061 /* Round up to the next 4-byte boundary. */
26066 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
26070 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26071 _("symbol too far away"));
26081 if (subtract
|| value
& ~0x3fc)
26082 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26083 _("invalid immediate for address calculation (value = 0x%08lX)"),
26084 (unsigned long) (subtract
? - value
: value
));
26085 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
26087 newval
|= value
>> 2;
26092 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26093 _("immediate value out of range"));
26094 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
26095 newval
|= (rd
<< 8) | value
;
26100 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26101 _("immediate value out of range"));
26102 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
26103 newval
|= rd
| (rs
<< 3) | (value
<< 6);
26106 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26109 case BFD_RELOC_ARM_THUMB_IMM
:
26110 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26111 if (value
< 0 || value
> 255)
26112 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26113 _("invalid immediate: %ld is out of range"),
26116 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26119 case BFD_RELOC_ARM_THUMB_SHIFT
:
26120 /* 5bit shift value (0..32). LSL cannot take 32. */
26121 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
26122 temp
= newval
& 0xf800;
26123 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
26124 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26125 _("invalid shift value: %ld"), (long) value
);
26126 /* Shifts of zero must be encoded as LSL. */
26128 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
26129 /* Shifts of 32 are encoded as zero. */
26130 else if (value
== 32)
26132 newval
|= value
<< 6;
26133 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26136 case BFD_RELOC_VTABLE_INHERIT
:
26137 case BFD_RELOC_VTABLE_ENTRY
:
26141 case BFD_RELOC_ARM_MOVW
:
26142 case BFD_RELOC_ARM_MOVT
:
26143 case BFD_RELOC_ARM_THUMB_MOVW
:
26144 case BFD_RELOC_ARM_THUMB_MOVT
:
26145 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26147 /* REL format relocations are limited to a 16-bit addend. */
26148 if (!fixP
->fx_done
)
26150 if (value
< -0x8000 || value
> 0x7fff)
26151 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26152 _("offset out of range"));
26154 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
26155 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
26160 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
26161 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
26163 newval
= get_thumb32_insn (buf
);
26164 newval
&= 0xfbf08f00;
26165 newval
|= (value
& 0xf000) << 4;
26166 newval
|= (value
& 0x0800) << 15;
26167 newval
|= (value
& 0x0700) << 4;
26168 newval
|= (value
& 0x00ff);
26169 put_thumb32_insn (buf
, newval
);
26173 newval
= md_chars_to_number (buf
, 4);
26174 newval
&= 0xfff0f000;
26175 newval
|= value
& 0x0fff;
26176 newval
|= (value
& 0xf000) << 4;
26177 md_number_to_chars (buf
, newval
, 4);
26182 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
26183 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
26184 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
26185 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
26186 gas_assert (!fixP
->fx_done
);
26189 bfd_boolean is_mov
;
26190 bfd_vma encoded_addend
= value
;
26192 /* Check that addend can be encoded in instruction. */
26193 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
26194 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26195 _("the offset 0x%08lX is not representable"),
26196 (unsigned long) encoded_addend
);
26198 /* Extract the instruction. */
26199 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
26200 is_mov
= (insn
& 0xf800) == 0x2000;
26205 if (!seg
->use_rela_p
)
26206 insn
|= encoded_addend
;
26212 /* Extract the instruction. */
26213 /* Encoding is the following
26218 /* The following conditions must be true :
26223 rd
= (insn
>> 4) & 0xf;
26225 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
26226 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26227 _("Unable to process relocation for thumb opcode: %lx"),
26228 (unsigned long) insn
);
26230 /* Encode as ADD immediate8 thumb 1 code. */
26231 insn
= 0x3000 | (rd
<< 8);
26233 /* Place the encoded addend into the first 8 bits of the
26235 if (!seg
->use_rela_p
)
26236 insn
|= encoded_addend
;
26239 /* Update the instruction. */
26240 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
26244 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
26245 case BFD_RELOC_ARM_ALU_PC_G0
:
26246 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
26247 case BFD_RELOC_ARM_ALU_PC_G1
:
26248 case BFD_RELOC_ARM_ALU_PC_G2
:
26249 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
26250 case BFD_RELOC_ARM_ALU_SB_G0
:
26251 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
26252 case BFD_RELOC_ARM_ALU_SB_G1
:
26253 case BFD_RELOC_ARM_ALU_SB_G2
:
26254 gas_assert (!fixP
->fx_done
);
26255 if (!seg
->use_rela_p
)
26258 bfd_vma encoded_addend
;
26259 bfd_vma addend_abs
= llabs (value
);
26261 /* Check that the absolute value of the addend can be
26262 expressed as an 8-bit constant plus a rotation. */
26263 encoded_addend
= encode_arm_immediate (addend_abs
);
26264 if (encoded_addend
== (unsigned int) FAIL
)
26265 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26266 _("the offset 0x%08lX is not representable"),
26267 (unsigned long) addend_abs
);
26269 /* Extract the instruction. */
26270 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26272 /* If the addend is positive, use an ADD instruction.
26273 Otherwise use a SUB. Take care not to destroy the S bit. */
26274 insn
&= 0xff1fffff;
26280 /* Place the encoded addend into the first 12 bits of the
26282 insn
&= 0xfffff000;
26283 insn
|= encoded_addend
;
26285 /* Update the instruction. */
26286 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26290 case BFD_RELOC_ARM_LDR_PC_G0
:
26291 case BFD_RELOC_ARM_LDR_PC_G1
:
26292 case BFD_RELOC_ARM_LDR_PC_G2
:
26293 case BFD_RELOC_ARM_LDR_SB_G0
:
26294 case BFD_RELOC_ARM_LDR_SB_G1
:
26295 case BFD_RELOC_ARM_LDR_SB_G2
:
26296 gas_assert (!fixP
->fx_done
);
26297 if (!seg
->use_rela_p
)
26300 bfd_vma addend_abs
= llabs (value
);
26302 /* Check that the absolute value of the addend can be
26303 encoded in 12 bits. */
26304 if (addend_abs
>= 0x1000)
26305 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26306 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
26307 (unsigned long) addend_abs
);
26309 /* Extract the instruction. */
26310 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26312 /* If the addend is negative, clear bit 23 of the instruction.
26313 Otherwise set it. */
26315 insn
&= ~(1 << 23);
26319 /* Place the absolute value of the addend into the first 12 bits
26320 of the instruction. */
26321 insn
&= 0xfffff000;
26322 insn
|= addend_abs
;
26324 /* Update the instruction. */
26325 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26329 case BFD_RELOC_ARM_LDRS_PC_G0
:
26330 case BFD_RELOC_ARM_LDRS_PC_G1
:
26331 case BFD_RELOC_ARM_LDRS_PC_G2
:
26332 case BFD_RELOC_ARM_LDRS_SB_G0
:
26333 case BFD_RELOC_ARM_LDRS_SB_G1
:
26334 case BFD_RELOC_ARM_LDRS_SB_G2
:
26335 gas_assert (!fixP
->fx_done
);
26336 if (!seg
->use_rela_p
)
26339 bfd_vma addend_abs
= llabs (value
);
26341 /* Check that the absolute value of the addend can be
26342 encoded in 8 bits. */
26343 if (addend_abs
>= 0x100)
26344 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26345 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
26346 (unsigned long) addend_abs
);
26348 /* Extract the instruction. */
26349 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26351 /* If the addend is negative, clear bit 23 of the instruction.
26352 Otherwise set it. */
26354 insn
&= ~(1 << 23);
26358 /* Place the first four bits of the absolute value of the addend
26359 into the first 4 bits of the instruction, and the remaining
26360 four into bits 8 .. 11. */
26361 insn
&= 0xfffff0f0;
26362 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
26364 /* Update the instruction. */
26365 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26369 case BFD_RELOC_ARM_LDC_PC_G0
:
26370 case BFD_RELOC_ARM_LDC_PC_G1
:
26371 case BFD_RELOC_ARM_LDC_PC_G2
:
26372 case BFD_RELOC_ARM_LDC_SB_G0
:
26373 case BFD_RELOC_ARM_LDC_SB_G1
:
26374 case BFD_RELOC_ARM_LDC_SB_G2
:
26375 gas_assert (!fixP
->fx_done
);
26376 if (!seg
->use_rela_p
)
26379 bfd_vma addend_abs
= llabs (value
);
26381 /* Check that the absolute value of the addend is a multiple of
26382 four and, when divided by four, fits in 8 bits. */
26383 if (addend_abs
& 0x3)
26384 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26385 _("bad offset 0x%08lX (must be word-aligned)"),
26386 (unsigned long) addend_abs
);
26388 if ((addend_abs
>> 2) > 0xff)
26389 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26390 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
26391 (unsigned long) addend_abs
);
26393 /* Extract the instruction. */
26394 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26396 /* If the addend is negative, clear bit 23 of the instruction.
26397 Otherwise set it. */
26399 insn
&= ~(1 << 23);
26403 /* Place the addend (divided by four) into the first eight
26404 bits of the instruction. */
26405 insn
&= 0xfffffff0;
26406 insn
|= addend_abs
>> 2;
26408 /* Update the instruction. */
26409 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26413 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26415 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26416 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26417 && ARM_IS_FUNC (fixP
->fx_addsy
)
26418 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26420 /* Force a relocation for a branch 5 bits wide. */
26423 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
26424 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26427 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26429 addressT boff
= value
>> 1;
26431 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26432 newval
|= (boff
<< 7);
26433 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26437 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26439 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26440 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26441 && ARM_IS_FUNC (fixP
->fx_addsy
)
26442 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26446 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
26447 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26448 _("branch out of range"));
26450 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26452 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26454 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
26455 addressT diff
= value
- boff
;
26459 newval
|= 1 << 1; /* T bit. */
26461 else if (diff
!= 2)
26463 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26464 _("out of range label-relative fixup value"));
26466 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26470 case BFD_RELOC_ARM_THUMB_BF17
:
26472 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26473 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26474 && ARM_IS_FUNC (fixP
->fx_addsy
)
26475 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26477 /* Force a relocation for a branch 17 bits wide. */
26481 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
26482 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26485 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26488 addressT immA
, immB
, immC
;
26490 immA
= (value
& 0x0001f000) >> 12;
26491 immB
= (value
& 0x00000ffc) >> 2;
26492 immC
= (value
& 0x00000002) >> 1;
26494 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26495 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26497 newval2
|= (immC
<< 11) | (immB
<< 1);
26498 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26499 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26503 case BFD_RELOC_ARM_THUMB_BF19
:
26505 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26506 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26507 && ARM_IS_FUNC (fixP
->fx_addsy
)
26508 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26510 /* Force a relocation for a branch 19 bits wide. */
26514 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
26515 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26518 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26521 addressT immA
, immB
, immC
;
26523 immA
= (value
& 0x0007f000) >> 12;
26524 immB
= (value
& 0x00000ffc) >> 2;
26525 immC
= (value
& 0x00000002) >> 1;
26527 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26528 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26530 newval2
|= (immC
<< 11) | (immB
<< 1);
26531 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26532 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26536 case BFD_RELOC_ARM_THUMB_BF13
:
26538 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26539 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26540 && ARM_IS_FUNC (fixP
->fx_addsy
)
26541 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26543 /* Force a relocation for a branch 13 bits wide. */
26547 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
26548 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26551 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26554 addressT immA
, immB
, immC
;
26556 immA
= (value
& 0x00001000) >> 12;
26557 immB
= (value
& 0x00000ffc) >> 2;
26558 immC
= (value
& 0x00000002) >> 1;
26560 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26561 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26563 newval2
|= (immC
<< 11) | (immB
<< 1);
26564 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26565 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26569 case BFD_RELOC_ARM_THUMB_LOOP12
:
26571 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26572 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26573 && ARM_IS_FUNC (fixP
->fx_addsy
)
26574 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26576 /* Force a relocation for a branch 12 bits wide. */
26580 bfd_vma insn
= get_thumb32_insn (buf
);
26581 /* le lr, <label> or le <label> */
26582 if (((insn
& 0xffffffff) == 0xf00fc001)
26583 || ((insn
& 0xffffffff) == 0xf02fc001))
26586 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
26587 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26589 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26591 addressT imml
, immh
;
26593 immh
= (value
& 0x00000ffc) >> 2;
26594 imml
= (value
& 0x00000002) >> 1;
26596 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26597 newval
|= (imml
<< 11) | (immh
<< 1);
26598 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
26602 case BFD_RELOC_ARM_V4BX
:
26603 /* This will need to go in the object file. */
26607 case BFD_RELOC_UNUSED
:
26609 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26610 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
26614 /* Translate internal representation of relocation info to BFD target
26618 tc_gen_reloc (asection
*section
, fixS
*fixp
)
26621 bfd_reloc_code_real_type code
;
26623 reloc
= XNEW (arelent
);
26625 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
26626 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
26627 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
26629 if (fixp
->fx_pcrel
)
26631 if (section
->use_rela_p
)
26632 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
26634 fixp
->fx_offset
= reloc
->address
;
26636 reloc
->addend
= fixp
->fx_offset
;
26638 switch (fixp
->fx_r_type
)
26641 if (fixp
->fx_pcrel
)
26643 code
= BFD_RELOC_8_PCREL
;
26646 /* Fall through. */
26649 if (fixp
->fx_pcrel
)
26651 code
= BFD_RELOC_16_PCREL
;
26654 /* Fall through. */
26657 if (fixp
->fx_pcrel
)
26659 code
= BFD_RELOC_32_PCREL
;
26662 /* Fall through. */
26664 case BFD_RELOC_ARM_MOVW
:
26665 if (fixp
->fx_pcrel
)
26667 code
= BFD_RELOC_ARM_MOVW_PCREL
;
26670 /* Fall through. */
26672 case BFD_RELOC_ARM_MOVT
:
26673 if (fixp
->fx_pcrel
)
26675 code
= BFD_RELOC_ARM_MOVT_PCREL
;
26678 /* Fall through. */
26680 case BFD_RELOC_ARM_THUMB_MOVW
:
26681 if (fixp
->fx_pcrel
)
26683 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
26686 /* Fall through. */
26688 case BFD_RELOC_ARM_THUMB_MOVT
:
26689 if (fixp
->fx_pcrel
)
26691 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
26694 /* Fall through. */
26696 case BFD_RELOC_NONE
:
26697 case BFD_RELOC_ARM_PCREL_BRANCH
:
26698 case BFD_RELOC_ARM_PCREL_BLX
:
26699 case BFD_RELOC_RVA
:
26700 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
26701 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
26702 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
26703 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26704 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26705 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26706 case BFD_RELOC_VTABLE_ENTRY
:
26707 case BFD_RELOC_VTABLE_INHERIT
:
26709 case BFD_RELOC_32_SECREL
:
26711 code
= fixp
->fx_r_type
;
26714 case BFD_RELOC_THUMB_PCREL_BLX
:
26716 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
26717 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
26720 code
= BFD_RELOC_THUMB_PCREL_BLX
;
26723 case BFD_RELOC_ARM_LITERAL
:
26724 case BFD_RELOC_ARM_HWLITERAL
:
26725 /* If this is called then the a literal has
26726 been referenced across a section boundary. */
26727 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26728 _("literal referenced across section boundary"));
26732 case BFD_RELOC_ARM_TLS_CALL
:
26733 case BFD_RELOC_ARM_THM_TLS_CALL
:
26734 case BFD_RELOC_ARM_TLS_DESCSEQ
:
26735 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
26736 case BFD_RELOC_ARM_GOT32
:
26737 case BFD_RELOC_ARM_GOTOFF
:
26738 case BFD_RELOC_ARM_GOT_PREL
:
26739 case BFD_RELOC_ARM_PLT32
:
26740 case BFD_RELOC_ARM_TARGET1
:
26741 case BFD_RELOC_ARM_ROSEGREL32
:
26742 case BFD_RELOC_ARM_SBREL32
:
26743 case BFD_RELOC_ARM_PREL31
:
26744 case BFD_RELOC_ARM_TARGET2
:
26745 case BFD_RELOC_ARM_TLS_LDO32
:
26746 case BFD_RELOC_ARM_PCREL_CALL
:
26747 case BFD_RELOC_ARM_PCREL_JUMP
:
26748 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
26749 case BFD_RELOC_ARM_ALU_PC_G0
:
26750 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
26751 case BFD_RELOC_ARM_ALU_PC_G1
:
26752 case BFD_RELOC_ARM_ALU_PC_G2
:
26753 case BFD_RELOC_ARM_LDR_PC_G0
:
26754 case BFD_RELOC_ARM_LDR_PC_G1
:
26755 case BFD_RELOC_ARM_LDR_PC_G2
:
26756 case BFD_RELOC_ARM_LDRS_PC_G0
:
26757 case BFD_RELOC_ARM_LDRS_PC_G1
:
26758 case BFD_RELOC_ARM_LDRS_PC_G2
:
26759 case BFD_RELOC_ARM_LDC_PC_G0
:
26760 case BFD_RELOC_ARM_LDC_PC_G1
:
26761 case BFD_RELOC_ARM_LDC_PC_G2
:
26762 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
26763 case BFD_RELOC_ARM_ALU_SB_G0
:
26764 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
26765 case BFD_RELOC_ARM_ALU_SB_G1
:
26766 case BFD_RELOC_ARM_ALU_SB_G2
:
26767 case BFD_RELOC_ARM_LDR_SB_G0
:
26768 case BFD_RELOC_ARM_LDR_SB_G1
:
26769 case BFD_RELOC_ARM_LDR_SB_G2
:
26770 case BFD_RELOC_ARM_LDRS_SB_G0
:
26771 case BFD_RELOC_ARM_LDRS_SB_G1
:
26772 case BFD_RELOC_ARM_LDRS_SB_G2
:
26773 case BFD_RELOC_ARM_LDC_SB_G0
:
26774 case BFD_RELOC_ARM_LDC_SB_G1
:
26775 case BFD_RELOC_ARM_LDC_SB_G2
:
26776 case BFD_RELOC_ARM_V4BX
:
26777 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
26778 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
26779 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
26780 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
26781 case BFD_RELOC_ARM_GOTFUNCDESC
:
26782 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
26783 case BFD_RELOC_ARM_FUNCDESC
:
26784 case BFD_RELOC_ARM_THUMB_BF17
:
26785 case BFD_RELOC_ARM_THUMB_BF19
:
26786 case BFD_RELOC_ARM_THUMB_BF13
:
26787 code
= fixp
->fx_r_type
;
26790 case BFD_RELOC_ARM_TLS_GOTDESC
:
26791 case BFD_RELOC_ARM_TLS_GD32
:
26792 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
26793 case BFD_RELOC_ARM_TLS_LE32
:
26794 case BFD_RELOC_ARM_TLS_IE32
:
26795 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
26796 case BFD_RELOC_ARM_TLS_LDM32
:
26797 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
26798 /* BFD will include the symbol's address in the addend.
26799 But we don't want that, so subtract it out again here. */
26800 if (!S_IS_COMMON (fixp
->fx_addsy
))
26801 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
26802 code
= fixp
->fx_r_type
;
26806 case BFD_RELOC_ARM_IMMEDIATE
:
26807 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26808 _("internal relocation (type: IMMEDIATE) not fixed up"));
26811 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
26812 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26813 _("ADRL used for a symbol not defined in the same file"));
26816 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26817 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26818 case BFD_RELOC_ARM_THUMB_LOOP12
:
26819 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26820 _("%s used for a symbol not defined in the same file"),
26821 bfd_get_reloc_code_name (fixp
->fx_r_type
));
26824 case BFD_RELOC_ARM_OFFSET_IMM
:
26825 if (section
->use_rela_p
)
26827 code
= fixp
->fx_r_type
;
26831 if (fixp
->fx_addsy
!= NULL
26832 && !S_IS_DEFINED (fixp
->fx_addsy
)
26833 && S_IS_LOCAL (fixp
->fx_addsy
))
26835 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26836 _("undefined local label `%s'"),
26837 S_GET_NAME (fixp
->fx_addsy
));
26841 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26842 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26849 switch (fixp
->fx_r_type
)
26851 case BFD_RELOC_NONE
: type
= "NONE"; break;
26852 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
26853 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
26854 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
26855 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
26856 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
26857 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
26858 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
26859 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
26860 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
26861 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
26862 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
26863 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
26864 default: type
= _("<unknown>"); break;
26866 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26867 _("cannot represent %s relocation in this object file format"),
26874 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
26876 && fixp
->fx_addsy
== GOT_symbol
)
26878 code
= BFD_RELOC_ARM_GOTPC
;
26879 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
26883 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
26885 if (reloc
->howto
== NULL
)
26887 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26888 _("cannot represent %s relocation in this object file format"),
26889 bfd_get_reloc_code_name (code
));
26893 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
26894 vtable entry to be used in the relocation's section offset. */
26895 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
26896 reloc
->address
= fixp
->fx_offset
;
26901 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
26904 cons_fix_new_arm (fragS
* frag
,
26908 bfd_reloc_code_real_type reloc
)
26913 FIXME: @@ Should look at CPU word size. */
26917 reloc
= BFD_RELOC_8
;
26920 reloc
= BFD_RELOC_16
;
26924 reloc
= BFD_RELOC_32
;
26927 reloc
= BFD_RELOC_64
;
26932 if (exp
->X_op
== O_secrel
)
26934 exp
->X_op
= O_symbol
;
26935 reloc
= BFD_RELOC_32_SECREL
;
26939 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
26942 #if defined (OBJ_COFF)
26944 arm_validate_fix (fixS
* fixP
)
26946 /* If the destination of the branch is a defined symbol which does not have
26947 the THUMB_FUNC attribute, then we must be calling a function which has
26948 the (interfacearm) attribute. We look for the Thumb entry point to that
26949 function and change the branch to refer to that function instead. */
26950 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
26951 && fixP
->fx_addsy
!= NULL
26952 && S_IS_DEFINED (fixP
->fx_addsy
)
26953 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
26955 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
26962 arm_force_relocation (struct fix
* fixp
)
26964 #if defined (OBJ_COFF) && defined (TE_PE)
26965 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
26969 /* In case we have a call or a branch to a function in ARM ISA mode from
26970 a thumb function or vice-versa force the relocation. These relocations
26971 are cleared off for some cores that might have blx and simple transformations
26975 switch (fixp
->fx_r_type
)
26977 case BFD_RELOC_ARM_PCREL_JUMP
:
26978 case BFD_RELOC_ARM_PCREL_CALL
:
26979 case BFD_RELOC_THUMB_PCREL_BLX
:
26980 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
26984 case BFD_RELOC_ARM_PCREL_BLX
:
26985 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26986 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26987 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26988 if (ARM_IS_FUNC (fixp
->fx_addsy
))
26997 /* Resolve these relocations even if the symbol is extern or weak.
26998 Technically this is probably wrong due to symbol preemption.
26999 In practice these relocations do not have enough range to be useful
27000 at dynamic link time, and some code (e.g. in the Linux kernel)
27001 expects these references to be resolved. */
27002 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
27003 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
27004 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
27005 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
27006 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
27007 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
27008 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
27009 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
27010 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
27011 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
27012 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
27013 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
27014 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
27015 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
27018 /* Always leave these relocations for the linker. */
27019 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
27020 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
27021 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
27024 /* Always generate relocations against function symbols. */
27025 if (fixp
->fx_r_type
== BFD_RELOC_32
27027 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
27030 return generic_force_reloc (fixp
);
27033 #if defined (OBJ_ELF) || defined (OBJ_COFF)
27034 /* Relocations against function names must be left unadjusted,
27035 so that the linker can use this information to generate interworking
27036 stubs. The MIPS version of this function
27037 also prevents relocations that are mips-16 specific, but I do not
27038 know why it does this.
27041 There is one other problem that ought to be addressed here, but
27042 which currently is not: Taking the address of a label (rather
27043 than a function) and then later jumping to that address. Such
27044 addresses also ought to have their bottom bit set (assuming that
27045 they reside in Thumb code), but at the moment they will not. */
27048 arm_fix_adjustable (fixS
* fixP
)
27050 if (fixP
->fx_addsy
== NULL
)
27053 /* Preserve relocations against symbols with function type. */
27054 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
27057 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
27058 && fixP
->fx_subsy
== NULL
)
27061 /* We need the symbol name for the VTABLE entries. */
27062 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
27063 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
27066 /* Don't allow symbols to be discarded on GOT related relocs. */
27067 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
27068 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
27069 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
27070 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
27071 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
27072 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
27073 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
27074 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
27075 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
27076 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
27077 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
27078 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
27079 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
27080 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
27081 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
27082 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
27083 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
27086 /* Similarly for group relocations. */
27087 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
27088 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
27089 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
27092 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
27093 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
27094 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
27095 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
27096 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
27097 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
27098 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
27099 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
27100 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
27103 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
27104 offsets, so keep these symbols. */
27105 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
27106 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
27111 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
27115 elf32_arm_target_format (void)
27118 return (target_big_endian
27119 ? "elf32-bigarm-symbian"
27120 : "elf32-littlearm-symbian");
27121 #elif defined (TE_VXWORKS)
27122 return (target_big_endian
27123 ? "elf32-bigarm-vxworks"
27124 : "elf32-littlearm-vxworks");
27125 #elif defined (TE_NACL)
27126 return (target_big_endian
27127 ? "elf32-bigarm-nacl"
27128 : "elf32-littlearm-nacl");
27132 if (target_big_endian
)
27133 return "elf32-bigarm-fdpic";
27135 return "elf32-littlearm-fdpic";
27139 if (target_big_endian
)
27140 return "elf32-bigarm";
27142 return "elf32-littlearm";
27148 armelf_frob_symbol (symbolS
* symp
,
27151 elf_frob_symbol (symp
, puntp
);
27155 /* MD interface: Finalization. */
27160 literal_pool
* pool
;
27162 /* Ensure that all the predication blocks are properly closed. */
27163 check_pred_blocks_finished ();
27165 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
27167 /* Put it at the end of the relevant section. */
27168 subseg_set (pool
->section
, pool
->sub_section
);
27170 arm_elf_change_section ();
27177 /* Remove any excess mapping symbols generated for alignment frags in
27178 SEC. We may have created a mapping symbol before a zero byte
27179 alignment; remove it if there's a mapping symbol after the
27182 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
27183 void *dummy ATTRIBUTE_UNUSED
)
27185 segment_info_type
*seginfo
= seg_info (sec
);
27188 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
27191 for (fragp
= seginfo
->frchainP
->frch_root
;
27193 fragp
= fragp
->fr_next
)
27195 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
27196 fragS
*next
= fragp
->fr_next
;
27198 /* Variable-sized frags have been converted to fixed size by
27199 this point. But if this was variable-sized to start with,
27200 there will be a fixed-size frag after it. So don't handle
27202 if (sym
== NULL
|| next
== NULL
)
27205 if (S_GET_VALUE (sym
) < next
->fr_address
)
27206 /* Not at the end of this frag. */
27208 know (S_GET_VALUE (sym
) == next
->fr_address
);
27212 if (next
->tc_frag_data
.first_map
!= NULL
)
27214 /* Next frag starts with a mapping symbol. Discard this
27216 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
27220 if (next
->fr_next
== NULL
)
27222 /* This mapping symbol is at the end of the section. Discard
27224 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
27225 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
27229 /* As long as we have empty frags without any mapping symbols,
27231 /* If the next frag is non-empty and does not start with a
27232 mapping symbol, then this mapping symbol is required. */
27233 if (next
->fr_address
!= next
->fr_next
->fr_address
)
27236 next
= next
->fr_next
;
27238 while (next
!= NULL
);
27243 /* Adjust the symbol table. This marks Thumb symbols as distinct from
27247 arm_adjust_symtab (void)
27252 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
27254 if (ARM_IS_THUMB (sym
))
27256 if (THUMB_IS_FUNC (sym
))
27258 /* Mark the symbol as a Thumb function. */
27259 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
27260 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
27261 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
27263 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
27264 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
27266 as_bad (_("%s: unexpected function type: %d"),
27267 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
27269 else switch (S_GET_STORAGE_CLASS (sym
))
27272 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
27275 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
27278 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
27286 if (ARM_IS_INTERWORK (sym
))
27287 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
27294 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
27296 if (ARM_IS_THUMB (sym
))
27298 elf_symbol_type
* elf_sym
;
27300 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
27301 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
27303 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
27304 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
27306 /* If it's a .thumb_func, declare it as so,
27307 otherwise tag label as .code 16. */
27308 if (THUMB_IS_FUNC (sym
))
27309 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
27310 ST_BRANCH_TO_THUMB
);
27311 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
27312 elf_sym
->internal_elf_sym
.st_info
=
27313 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
27318 /* Remove any overlapping mapping symbols generated by alignment frags. */
27319 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
27320 /* Now do generic ELF adjustments. */
27321 elf_adjust_symtab ();
27325 /* MD interface: Initialization. */
27328 set_constant_flonums (void)
27332 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
27333 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
27337 /* Auto-select Thumb mode if it's the only available instruction set for the
27338 given architecture. */
27341 autoselect_thumb_from_cpu_variant (void)
27343 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
27344 opcode_select (16);
27353 if ( (arm_ops_hsh
= hash_new ()) == NULL
27354 || (arm_cond_hsh
= hash_new ()) == NULL
27355 || (arm_vcond_hsh
= hash_new ()) == NULL
27356 || (arm_shift_hsh
= hash_new ()) == NULL
27357 || (arm_psr_hsh
= hash_new ()) == NULL
27358 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
27359 || (arm_reg_hsh
= hash_new ()) == NULL
27360 || (arm_reloc_hsh
= hash_new ()) == NULL
27361 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
27362 as_fatal (_("virtual memory exhausted"));
27364 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
27365 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
27366 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
27367 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
27368 for (i
= 0; i
< sizeof (vconds
) / sizeof (struct asm_cond
); i
++)
27369 hash_insert (arm_vcond_hsh
, vconds
[i
].template_name
, (void *) (vconds
+ i
));
27370 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
27371 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
27372 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
27373 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
27374 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
27375 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
27376 (void *) (v7m_psrs
+ i
));
27377 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
27378 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
27380 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
27382 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
27383 (void *) (barrier_opt_names
+ i
));
27385 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
27387 struct reloc_entry
* entry
= reloc_names
+ i
;
27389 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
27390 /* This makes encode_branch() use the EABI versions of this relocation. */
27391 entry
->reloc
= BFD_RELOC_UNUSED
;
27393 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
27397 set_constant_flonums ();
27399 /* Set the cpu variant based on the command-line options. We prefer
27400 -mcpu= over -march= if both are set (as for GCC); and we prefer
27401 -mfpu= over any other way of setting the floating point unit.
27402 Use of legacy options with new options are faulted. */
27405 if (mcpu_cpu_opt
|| march_cpu_opt
)
27406 as_bad (_("use of old and new-style options to set CPU type"));
27408 selected_arch
= *legacy_cpu
;
27410 else if (mcpu_cpu_opt
)
27412 selected_arch
= *mcpu_cpu_opt
;
27413 selected_ext
= *mcpu_ext_opt
;
27415 else if (march_cpu_opt
)
27417 selected_arch
= *march_cpu_opt
;
27418 selected_ext
= *march_ext_opt
;
27420 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
27425 as_bad (_("use of old and new-style options to set FPU type"));
27427 selected_fpu
= *legacy_fpu
;
27430 selected_fpu
= *mfpu_opt
;
27433 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
27434 || defined (TE_NetBSD) || defined (TE_VXWORKS))
27435 /* Some environments specify a default FPU. If they don't, infer it
27436 from the processor. */
27438 selected_fpu
= *mcpu_fpu_opt
;
27439 else if (march_fpu_opt
)
27440 selected_fpu
= *march_fpu_opt
;
27442 selected_fpu
= fpu_default
;
27446 if (ARM_FEATURE_ZERO (selected_fpu
))
27448 if (!no_cpu_selected ())
27449 selected_fpu
= fpu_default
;
27451 selected_fpu
= fpu_arch_fpa
;
27455 if (ARM_FEATURE_ZERO (selected_arch
))
27457 selected_arch
= cpu_default
;
27458 selected_cpu
= selected_arch
;
27460 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
27462 /* Autodection of feature mode: allow all features in cpu_variant but leave
27463 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
27464 after all instruction have been processed and we can decide what CPU
27465 should be selected. */
27466 if (ARM_FEATURE_ZERO (selected_arch
))
27467 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
27469 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
27472 autoselect_thumb_from_cpu_variant ();
27474 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
27476 #if defined OBJ_COFF || defined OBJ_ELF
27478 unsigned int flags
= 0;
27480 #if defined OBJ_ELF
27481 flags
= meabi_flags
;
27483 switch (meabi_flags
)
27485 case EF_ARM_EABI_UNKNOWN
:
27487 /* Set the flags in the private structure. */
27488 if (uses_apcs_26
) flags
|= F_APCS26
;
27489 if (support_interwork
) flags
|= F_INTERWORK
;
27490 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
27491 if (pic_code
) flags
|= F_PIC
;
27492 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
27493 flags
|= F_SOFT_FLOAT
;
27495 switch (mfloat_abi_opt
)
27497 case ARM_FLOAT_ABI_SOFT
:
27498 case ARM_FLOAT_ABI_SOFTFP
:
27499 flags
|= F_SOFT_FLOAT
;
27502 case ARM_FLOAT_ABI_HARD
:
27503 if (flags
& F_SOFT_FLOAT
)
27504 as_bad (_("hard-float conflicts with specified fpu"));
27508 /* Using pure-endian doubles (even if soft-float). */
27509 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
27510 flags
|= F_VFP_FLOAT
;
27512 #if defined OBJ_ELF
27513 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
27514 flags
|= EF_ARM_MAVERICK_FLOAT
;
27517 case EF_ARM_EABI_VER4
:
27518 case EF_ARM_EABI_VER5
:
27519 /* No additional flags to set. */
27526 bfd_set_private_flags (stdoutput
, flags
);
27528 /* We have run out flags in the COFF header to encode the
27529 status of ATPCS support, so instead we create a dummy,
27530 empty, debug section called .arm.atpcs. */
27535 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
27539 bfd_set_section_flags
27540 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
27541 bfd_set_section_size (stdoutput
, sec
, 0);
27542 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
27548 /* Record the CPU type as well. */
27549 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
27550 mach
= bfd_mach_arm_iWMMXt2
;
27551 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
27552 mach
= bfd_mach_arm_iWMMXt
;
27553 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
27554 mach
= bfd_mach_arm_XScale
;
27555 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
27556 mach
= bfd_mach_arm_ep9312
;
27557 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
27558 mach
= bfd_mach_arm_5TE
;
27559 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
27561 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27562 mach
= bfd_mach_arm_5T
;
27564 mach
= bfd_mach_arm_5
;
27566 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
27568 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27569 mach
= bfd_mach_arm_4T
;
27571 mach
= bfd_mach_arm_4
;
27573 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
27574 mach
= bfd_mach_arm_3M
;
27575 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
27576 mach
= bfd_mach_arm_3
;
27577 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
27578 mach
= bfd_mach_arm_2a
;
27579 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
27580 mach
= bfd_mach_arm_2
;
27582 mach
= bfd_mach_arm_unknown
;
27584 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
27587 /* Command line processing. */
27590 Invocation line includes a switch not recognized by the base assembler.
27591 See if it's a processor-specific option.
27593 This routine is somewhat complicated by the need for backwards
27594 compatibility (since older releases of gcc can't be changed).
27595 The new options try to make the interface as compatible as
27598 New options (supported) are:
27600 -mcpu=<cpu name> Assemble for selected processor
27601 -march=<architecture name> Assemble for selected architecture
27602 -mfpu=<fpu architecture> Assemble for selected FPU.
27603 -EB/-mbig-endian Big-endian
27604 -EL/-mlittle-endian Little-endian
27605 -k Generate PIC code
27606 -mthumb Start in Thumb mode
27607 -mthumb-interwork Code supports ARM/Thumb interworking
27609 -m[no-]warn-deprecated Warn about deprecated features
27610 -m[no-]warn-syms Warn when symbols match instructions
27612 For now we will also provide support for:
27614 -mapcs-32 32-bit Program counter
27615 -mapcs-26 26-bit Program counter
27616 -macps-float Floats passed in FP registers
27617 -mapcs-reentrant Reentrant code
27619 (sometime these will probably be replaced with -mapcs=<list of options>
27620 and -matpcs=<list of options>)
27622 The remaining options are only supported for back-wards compatibility.
27623 Cpu variants, the arm part is optional:
27624 -m[arm]1 Currently not supported.
27625 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27626 -m[arm]3 Arm 3 processor
27627 -m[arm]6[xx], Arm 6 processors
27628 -m[arm]7[xx][t][[d]m] Arm 7 processors
27629 -m[arm]8[10] Arm 8 processors
27630 -m[arm]9[20][tdmi] Arm 9 processors
27631 -mstrongarm[110[0]] StrongARM processors
27632 -mxscale XScale processors
27633 -m[arm]v[2345[t[e]]] Arm architectures
27634 -mall All (except the ARM1)
27636 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27637 -mfpe-old (No float load/store multiples)
27638 -mvfpxd VFP Single precision
27640 -mno-fpu Disable all floating point instructions
27642 The following CPU names are recognized:
27643 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27644 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27645 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27646 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27647 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27648 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27649 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27653 const char * md_shortopts
= "m:k";
27655 #ifdef ARM_BI_ENDIAN
27656 #define OPTION_EB (OPTION_MD_BASE + 0)
27657 #define OPTION_EL (OPTION_MD_BASE + 1)
27659 #if TARGET_BYTES_BIG_ENDIAN
27660 #define OPTION_EB (OPTION_MD_BASE + 0)
27662 #define OPTION_EL (OPTION_MD_BASE + 1)
27665 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27666 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27668 struct option md_longopts
[] =
27671 {"EB", no_argument
, NULL
, OPTION_EB
},
27674 {"EL", no_argument
, NULL
, OPTION_EL
},
27676 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
27678 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
27680 {NULL
, no_argument
, NULL
, 0}
27683 size_t md_longopts_size
= sizeof (md_longopts
);
27685 struct arm_option_table
27687 const char * option
; /* Option name to match. */
27688 const char * help
; /* Help information. */
27689 int * var
; /* Variable to change. */
27690 int value
; /* What to change it to. */
27691 const char * deprecated
; /* If non-null, print this message. */
27694 struct arm_option_table arm_opts
[] =
27696 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
27697 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
27698 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27699 &support_interwork
, 1, NULL
},
27700 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
27701 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
27702 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
27704 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
27705 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
27706 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
27707 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
27710 /* These are recognized by the assembler, but have no affect on code. */
27711 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
27712 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
27714 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
27715 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27716 &warn_on_deprecated
, 0, NULL
},
27717 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
27718 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
27719 {NULL
, NULL
, NULL
, 0, NULL
}
27722 struct arm_legacy_option_table
27724 const char * option
; /* Option name to match. */
27725 const arm_feature_set
** var
; /* Variable to change. */
27726 const arm_feature_set value
; /* What to change it to. */
27727 const char * deprecated
; /* If non-null, print this message. */
27730 const struct arm_legacy_option_table arm_legacy_opts
[] =
27732 /* DON'T add any new processors to this list -- we want the whole list
27733 to go away... Add them to the processors table instead. */
27734 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27735 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27736 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27737 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27738 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27739 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27740 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27741 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27742 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27743 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27744 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27745 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27746 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27747 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27748 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27749 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27750 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27751 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27752 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27753 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27754 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27755 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27756 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27757 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27758 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27759 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27760 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27761 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27762 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27763 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27764 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27765 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27766 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27767 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27768 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27769 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27770 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27771 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27772 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27773 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27774 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27775 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27776 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27777 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27778 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27779 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27780 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27781 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27782 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27783 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27784 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27785 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27786 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27787 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27788 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27789 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27790 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27791 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27792 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27793 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27794 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27795 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27796 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27797 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27798 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27799 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27800 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27801 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27802 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
27803 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
27804 N_("use -mcpu=strongarm110")},
27805 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
27806 N_("use -mcpu=strongarm1100")},
27807 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
27808 N_("use -mcpu=strongarm1110")},
27809 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
27810 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
27811 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
27813 /* Architecture variants -- don't add any more to this list either. */
27814 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27815 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27816 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27817 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27818 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27819 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27820 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27821 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27822 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27823 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27824 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27825 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27826 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27827 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27828 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27829 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27830 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27831 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27833 /* Floating point variants -- don't add any more to this list either. */
27834 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
27835 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
27836 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
27837 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
27838 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27840 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
27843 struct arm_cpu_option_table
27847 const arm_feature_set value
;
27848 const arm_feature_set ext
;
27849 /* For some CPUs we assume an FPU unless the user explicitly sets
27851 const arm_feature_set default_fpu
;
27852 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27854 const char * canonical_name
;
27857 /* This list should, at a minimum, contain all the cpu names
27858 recognized by GCC. */
27859 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27861 static const struct arm_cpu_option_table arm_cpus
[] =
27863 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
27866 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
27869 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
27872 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
27875 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
27878 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
27881 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
27884 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
27887 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
27890 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
27893 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
27896 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
27899 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
27902 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
27905 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
27908 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
27911 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
27914 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
27917 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
27920 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
27923 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
27926 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
27929 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
27932 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
27935 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
27938 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
27941 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
27944 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
27947 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
27950 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
27953 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
27956 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
27959 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
27962 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
27965 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
27968 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
27971 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
27974 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
27977 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
27980 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
27983 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
27986 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
27989 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
27992 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
27995 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
27998 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
28002 /* For V5 or later processors we default to using VFP; but the user
28003 should really set the FPU type explicitly. */
28004 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
28007 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
28010 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
28013 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
28016 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
28019 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
28022 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
28025 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
28028 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
28031 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
28034 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
28037 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
28040 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
28043 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
28046 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
28049 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
28052 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
28055 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
28058 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
28061 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
28064 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
28067 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
28070 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
28073 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
28076 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
28079 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
28082 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
28085 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
28088 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
28091 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
28094 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
28097 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
28100 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
28103 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
28106 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
28109 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
28112 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
28113 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
28115 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
28117 FPU_ARCH_NEON_VFP_V4
),
28118 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
28119 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28120 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
28121 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
28122 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
28123 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
28124 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
28126 FPU_ARCH_NEON_VFP_V4
),
28127 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
28129 FPU_ARCH_NEON_VFP_V4
),
28130 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
28132 FPU_ARCH_NEON_VFP_V4
),
28133 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
28134 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28135 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28136 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
28137 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28138 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28139 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
28140 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28141 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28142 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
28143 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28144 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28145 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
28146 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28147 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28148 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
28149 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28150 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28151 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
28152 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28153 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28154 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
28155 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28156 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28157 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
28158 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28159 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28160 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
28161 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28162 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28163 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
28166 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
28168 FPU_ARCH_VFP_V3D16
),
28169 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
28170 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
28172 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
28173 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
28174 FPU_ARCH_VFP_V3D16
),
28175 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
28176 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
28177 FPU_ARCH_VFP_V3D16
),
28178 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
28179 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28180 FPU_ARCH_NEON_VFP_ARMV8
),
28181 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
28182 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28184 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
28187 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
28190 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
28193 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
28196 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
28199 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
28202 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
28205 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
28206 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28207 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28208 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
28209 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28210 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28211 /* ??? XSCALE is really an architecture. */
28212 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
28216 /* ??? iwmmxt is not a processor. */
28217 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
28220 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
28223 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
28228 ARM_CPU_OPT ("ep9312", "ARM920T",
28229 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
28230 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
28232 /* Marvell processors. */
28233 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
28234 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
28235 FPU_ARCH_VFP_V3D16
),
28236 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
28237 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
28238 FPU_ARCH_NEON_VFP_V4
),
28240 /* APM X-Gene family. */
28241 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
28243 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28244 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
28245 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28246 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28248 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
28252 struct arm_ext_table
28256 const arm_feature_set merge
;
28257 const arm_feature_set clear
;
28260 struct arm_arch_option_table
28264 const arm_feature_set value
;
28265 const arm_feature_set default_fpu
;
28266 const struct arm_ext_table
* ext_table
;
28269 /* Used to add support for +E and +noE extension. */
28270 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
28271 /* Used to add support for a +E extension. */
28272 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
28273 /* Used to add support for a +noE extension. */
28274 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
28276 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
28277 ~0 & ~FPU_ENDIAN_PURE)
28279 static const struct arm_ext_table armv5te_ext_table
[] =
28281 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
28282 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28285 static const struct arm_ext_table armv7_ext_table
[] =
28287 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
28288 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28291 static const struct arm_ext_table armv7ve_ext_table
[] =
28293 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
28294 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
28295 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
28296 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
28297 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
28298 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
28299 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
28301 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
28302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
28304 /* Aliases for +simd. */
28305 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
28307 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28308 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28309 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
28311 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28314 static const struct arm_ext_table armv7a_ext_table
[] =
28316 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
28317 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
28318 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
28319 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
28320 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
28321 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
28322 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
28324 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
28325 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
28327 /* Aliases for +simd. */
28328 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28329 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28331 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
28332 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
28334 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
28335 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
28336 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28339 static const struct arm_ext_table armv7r_ext_table
[] =
28341 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
28342 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
28343 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
28344 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
28345 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
28346 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
28347 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28348 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
28349 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28352 static const struct arm_ext_table armv7em_ext_table
[] =
28354 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
28355 /* Alias for +fp, used to be known as fpv4-sp-d16. */
28356 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
28357 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
28358 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
28359 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
28360 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28363 static const struct arm_ext_table armv8a_ext_table
[] =
28365 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
28366 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
28367 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28368 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28370 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28371 should use the +simd option to turn on FP. */
28372 ARM_REMOVE ("fp", ALL_FP
),
28373 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28374 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28375 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28379 static const struct arm_ext_table armv81a_ext_table
[] =
28381 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
28382 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
28383 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28385 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28386 should use the +simd option to turn on FP. */
28387 ARM_REMOVE ("fp", ALL_FP
),
28388 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28389 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28390 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28393 static const struct arm_ext_table armv82a_ext_table
[] =
28395 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
28396 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
28397 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
28398 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
28399 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28400 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28402 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28403 should use the +simd option to turn on FP. */
28404 ARM_REMOVE ("fp", ALL_FP
),
28405 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28406 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28407 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28410 static const struct arm_ext_table armv84a_ext_table
[] =
28412 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28413 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
28414 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
28415 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28417 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28418 should use the +simd option to turn on FP. */
28419 ARM_REMOVE ("fp", ALL_FP
),
28420 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28421 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28422 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28425 static const struct arm_ext_table armv85a_ext_table
[] =
28427 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28428 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
28429 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
28430 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28432 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28433 should use the +simd option to turn on FP. */
28434 ARM_REMOVE ("fp", ALL_FP
),
28435 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28438 static const struct arm_ext_table armv8m_main_ext_table
[] =
28440 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28441 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
28442 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
28443 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
28444 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28447 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
28449 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28450 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
28452 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28453 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
28456 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28457 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
28458 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE
),
28459 ARM_FEATURE_COPROC (FPU_MVE
| FPU_MVE_FP
)),
28461 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28462 FPU_MVE
| FPU_MVE_FP
| FPU_VFP_V5_SP_D16
|
28463 FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
28464 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28467 static const struct arm_ext_table armv8r_ext_table
[] =
28469 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
28470 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
28471 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28472 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28473 ARM_REMOVE ("fp", ALL_FP
),
28474 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
28475 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28478 /* This list should, at a minimum, contain all the architecture names
28479 recognized by GCC. */
28480 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
28481 #define ARM_ARCH_OPT2(N, V, DF, ext) \
28482 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
28484 static const struct arm_arch_option_table arm_archs
[] =
28486 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
28487 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
28488 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
28489 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
28490 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
28491 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
28492 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
28493 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
28494 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
28495 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
28496 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
28497 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
28498 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
28499 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
28500 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
28501 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
28502 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
28503 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28504 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28505 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
28506 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
28507 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28508 kept to preserve existing behaviour. */
28509 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28510 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28511 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
28512 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
28513 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
28514 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28515 kept to preserve existing behaviour. */
28516 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28517 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28518 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
28519 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
28520 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
28521 /* The official spelling of the ARMv7 profile variants is the dashed form.
28522 Accept the non-dashed form for compatibility with old toolchains. */
28523 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28524 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
28525 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28526 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28527 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28528 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28529 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28530 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
28531 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
28532 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
28534 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
28536 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
28537 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
28538 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
28539 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
28540 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
28541 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
28542 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
28543 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
28544 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
28545 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
28546 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
28548 #undef ARM_ARCH_OPT
28550 /* ISA extensions in the co-processor and main instruction set space. */
28552 struct arm_option_extension_value_table
28556 const arm_feature_set merge_value
;
28557 const arm_feature_set clear_value
;
28558 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28559 indicates that an extension is available for all architectures while
28560 ARM_ANY marks an empty entry. */
28561 const arm_feature_set allowed_archs
[2];
28564 /* The following table must be in alphabetical order with a NULL last entry. */
28566 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28567 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28569 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28570 use the context sensitive approach using arm_ext_table's. */
28571 static const struct arm_option_extension_value_table arm_extensions
[] =
28573 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28574 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28575 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28576 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
28577 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28578 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
28579 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
28581 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28582 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28583 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
28584 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
28585 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28586 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28587 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28589 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28590 | ARM_EXT2_FP16_FML
),
28591 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28592 | ARM_EXT2_FP16_FML
),
28594 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28595 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28596 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28597 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28598 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28599 Thumb divide instruction. Due to this having the same name as the
28600 previous entry, this will be ignored when doing command-line parsing and
28601 only considered by build attribute selection code. */
28602 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28603 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28604 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
28605 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
28606 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
28607 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
28608 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
28609 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
28610 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
28611 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28612 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28613 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28614 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28615 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28616 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28617 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
28618 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
28619 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
28620 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28621 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28622 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28624 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
28625 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
28626 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28627 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
28628 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
28629 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28630 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28631 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28633 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28634 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28635 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
28636 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28637 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
28638 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
28639 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28640 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
28642 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
28643 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28644 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
28645 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
28646 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
28650 /* ISA floating-point and Advanced SIMD extensions. */
28651 struct arm_option_fpu_value_table
28654 const arm_feature_set value
;
28657 /* This list should, at a minimum, contain all the fpu names
28658 recognized by GCC. */
28659 static const struct arm_option_fpu_value_table arm_fpus
[] =
28661 {"softfpa", FPU_NONE
},
28662 {"fpe", FPU_ARCH_FPE
},
28663 {"fpe2", FPU_ARCH_FPE
},
28664 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
28665 {"fpa", FPU_ARCH_FPA
},
28666 {"fpa10", FPU_ARCH_FPA
},
28667 {"fpa11", FPU_ARCH_FPA
},
28668 {"arm7500fe", FPU_ARCH_FPA
},
28669 {"softvfp", FPU_ARCH_VFP
},
28670 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
28671 {"vfp", FPU_ARCH_VFP_V2
},
28672 {"vfp9", FPU_ARCH_VFP_V2
},
28673 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
28674 {"vfp10", FPU_ARCH_VFP_V2
},
28675 {"vfp10-r0", FPU_ARCH_VFP_V1
},
28676 {"vfpxd", FPU_ARCH_VFP_V1xD
},
28677 {"vfpv2", FPU_ARCH_VFP_V2
},
28678 {"vfpv3", FPU_ARCH_VFP_V3
},
28679 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
28680 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
28681 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
28682 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
28683 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
28684 {"arm1020t", FPU_ARCH_VFP_V1
},
28685 {"arm1020e", FPU_ARCH_VFP_V2
},
28686 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
28687 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
28688 {"maverick", FPU_ARCH_MAVERICK
},
28689 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28690 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28691 {"neon-fp16", FPU_ARCH_NEON_FP16
},
28692 {"vfpv4", FPU_ARCH_VFP_V4
},
28693 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
28694 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
28695 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
28696 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
28697 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
28698 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
28699 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
28700 {"crypto-neon-fp-armv8",
28701 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
28702 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
28703 {"crypto-neon-fp-armv8.1",
28704 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
28705 {NULL
, ARM_ARCH_NONE
}
28708 struct arm_option_value_table
28714 static const struct arm_option_value_table arm_float_abis
[] =
28716 {"hard", ARM_FLOAT_ABI_HARD
},
28717 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
28718 {"soft", ARM_FLOAT_ABI_SOFT
},
28723 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28724 static const struct arm_option_value_table arm_eabis
[] =
28726 {"gnu", EF_ARM_EABI_UNKNOWN
},
28727 {"4", EF_ARM_EABI_VER4
},
28728 {"5", EF_ARM_EABI_VER5
},
28733 struct arm_long_option_table
28735 const char * option
; /* Substring to match. */
28736 const char * help
; /* Help information. */
28737 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
28738 const char * deprecated
; /* If non-null, print this message. */
28742 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
28743 arm_feature_set
*ext_set
,
28744 const struct arm_ext_table
*ext_table
)
28746 /* We insist on extensions being specified in alphabetical order, and with
28747 extensions being added before being removed. We achieve this by having
28748 the global ARM_EXTENSIONS table in alphabetical order, and using the
28749 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28750 or removing it (0) and only allowing it to change in the order
28752 const struct arm_option_extension_value_table
* opt
= NULL
;
28753 const arm_feature_set arm_any
= ARM_ANY
;
28754 int adding_value
= -1;
28756 while (str
!= NULL
&& *str
!= 0)
28763 as_bad (_("invalid architectural extension"));
28768 ext
= strchr (str
, '+');
28773 len
= strlen (str
);
28775 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
28777 if (adding_value
!= 0)
28780 opt
= arm_extensions
;
28788 if (adding_value
== -1)
28791 opt
= arm_extensions
;
28793 else if (adding_value
!= 1)
28795 as_bad (_("must specify extensions to add before specifying "
28796 "those to remove"));
28803 as_bad (_("missing architectural extension"));
28807 gas_assert (adding_value
!= -1);
28808 gas_assert (opt
!= NULL
);
28810 if (ext_table
!= NULL
)
28812 const struct arm_ext_table
* ext_opt
= ext_table
;
28813 bfd_boolean found
= FALSE
;
28814 for (; ext_opt
->name
!= NULL
; ext_opt
++)
28815 if (ext_opt
->name_len
== len
28816 && strncmp (ext_opt
->name
, str
, len
) == 0)
28820 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
28821 /* TODO: Option not supported. When we remove the
28822 legacy table this case should error out. */
28825 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
28829 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
28830 /* TODO: Option not supported. When we remove the
28831 legacy table this case should error out. */
28833 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
28845 /* Scan over the options table trying to find an exact match. */
28846 for (; opt
->name
!= NULL
; opt
++)
28847 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28849 int i
, nb_allowed_archs
=
28850 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28851 /* Check we can apply the extension to this architecture. */
28852 for (i
= 0; i
< nb_allowed_archs
; i
++)
28855 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
28857 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
28860 if (i
== nb_allowed_archs
)
28862 as_bad (_("extension does not apply to the base architecture"));
28866 /* Add or remove the extension. */
28868 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
28870 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
28872 /* Allowing Thumb division instructions for ARMv7 in autodetection
28873 rely on this break so that duplicate extensions (extensions
28874 with the same name as a previous extension in the list) are not
28875 considered for command-line parsing. */
28879 if (opt
->name
== NULL
)
28881 /* Did we fail to find an extension because it wasn't specified in
28882 alphabetical order, or because it does not exist? */
28884 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28885 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28888 if (opt
->name
== NULL
)
28889 as_bad (_("unknown architectural extension `%s'"), str
);
28891 as_bad (_("architectural extensions must be specified in "
28892 "alphabetical order"));
28898 /* We should skip the extension we've just matched the next time
28910 arm_parse_cpu (const char *str
)
28912 const struct arm_cpu_option_table
*opt
;
28913 const char *ext
= strchr (str
, '+');
28919 len
= strlen (str
);
28923 as_bad (_("missing cpu name `%s'"), str
);
28927 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
28928 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28930 mcpu_cpu_opt
= &opt
->value
;
28931 if (mcpu_ext_opt
== NULL
)
28932 mcpu_ext_opt
= XNEW (arm_feature_set
);
28933 *mcpu_ext_opt
= opt
->ext
;
28934 mcpu_fpu_opt
= &opt
->default_fpu
;
28935 if (opt
->canonical_name
)
28937 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
28938 strcpy (selected_cpu_name
, opt
->canonical_name
);
28944 if (len
>= sizeof selected_cpu_name
)
28945 len
= (sizeof selected_cpu_name
) - 1;
28947 for (i
= 0; i
< len
; i
++)
28948 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
28949 selected_cpu_name
[i
] = 0;
28953 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
28958 as_bad (_("unknown cpu `%s'"), str
);
28963 arm_parse_arch (const char *str
)
28965 const struct arm_arch_option_table
*opt
;
28966 const char *ext
= strchr (str
, '+');
28972 len
= strlen (str
);
28976 as_bad (_("missing architecture name `%s'"), str
);
28980 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
28981 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28983 march_cpu_opt
= &opt
->value
;
28984 if (march_ext_opt
== NULL
)
28985 march_ext_opt
= XNEW (arm_feature_set
);
28986 *march_ext_opt
= arm_arch_none
;
28987 march_fpu_opt
= &opt
->default_fpu
;
28988 strcpy (selected_cpu_name
, opt
->name
);
28991 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
28997 as_bad (_("unknown architecture `%s'\n"), str
);
29002 arm_parse_fpu (const char * str
)
29004 const struct arm_option_fpu_value_table
* opt
;
29006 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
29007 if (streq (opt
->name
, str
))
29009 mfpu_opt
= &opt
->value
;
29013 as_bad (_("unknown floating point format `%s'\n"), str
);
29018 arm_parse_float_abi (const char * str
)
29020 const struct arm_option_value_table
* opt
;
29022 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
29023 if (streq (opt
->name
, str
))
29025 mfloat_abi_opt
= opt
->value
;
29029 as_bad (_("unknown floating point abi `%s'\n"), str
);
29035 arm_parse_eabi (const char * str
)
29037 const struct arm_option_value_table
*opt
;
29039 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
29040 if (streq (opt
->name
, str
))
29042 meabi_flags
= opt
->value
;
29045 as_bad (_("unknown EABI `%s'\n"), str
);
29051 arm_parse_it_mode (const char * str
)
29053 bfd_boolean ret
= TRUE
;
29055 if (streq ("arm", str
))
29056 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
29057 else if (streq ("thumb", str
))
29058 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
29059 else if (streq ("always", str
))
29060 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
29061 else if (streq ("never", str
))
29062 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
29065 as_bad (_("unknown implicit IT mode `%s', should be "\
29066 "arm, thumb, always, or never."), str
);
29074 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
29076 codecomposer_syntax
= TRUE
;
29077 arm_comment_chars
[0] = ';';
29078 arm_line_separator_chars
[0] = 0;
29082 struct arm_long_option_table arm_long_opts
[] =
29084 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
29085 arm_parse_cpu
, NULL
},
29086 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
29087 arm_parse_arch
, NULL
},
29088 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
29089 arm_parse_fpu
, NULL
},
29090 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
29091 arm_parse_float_abi
, NULL
},
29093 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
29094 arm_parse_eabi
, NULL
},
29096 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
29097 arm_parse_it_mode
, NULL
},
29098 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
29099 arm_ccs_mode
, NULL
},
29100 {NULL
, NULL
, 0, NULL
}
29104 md_parse_option (int c
, const char * arg
)
29106 struct arm_option_table
*opt
;
29107 const struct arm_legacy_option_table
*fopt
;
29108 struct arm_long_option_table
*lopt
;
29114 target_big_endian
= 1;
29120 target_big_endian
= 0;
29124 case OPTION_FIX_V4BX
:
29132 #endif /* OBJ_ELF */
29135 /* Listing option. Just ignore these, we don't support additional
29140 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
29142 if (c
== opt
->option
[0]
29143 && ((arg
== NULL
&& opt
->option
[1] == 0)
29144 || streq (arg
, opt
->option
+ 1)))
29146 /* If the option is deprecated, tell the user. */
29147 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
29148 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
29149 arg
? arg
: "", _(opt
->deprecated
));
29151 if (opt
->var
!= NULL
)
29152 *opt
->var
= opt
->value
;
29158 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
29160 if (c
== fopt
->option
[0]
29161 && ((arg
== NULL
&& fopt
->option
[1] == 0)
29162 || streq (arg
, fopt
->option
+ 1)))
29164 /* If the option is deprecated, tell the user. */
29165 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
29166 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
29167 arg
? arg
: "", _(fopt
->deprecated
));
29169 if (fopt
->var
!= NULL
)
29170 *fopt
->var
= &fopt
->value
;
29176 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
29178 /* These options are expected to have an argument. */
29179 if (c
== lopt
->option
[0]
29181 && strncmp (arg
, lopt
->option
+ 1,
29182 strlen (lopt
->option
+ 1)) == 0)
29184 /* If the option is deprecated, tell the user. */
29185 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
29186 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
29187 _(lopt
->deprecated
));
29189 /* Call the sup-option parser. */
29190 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
29201 md_show_usage (FILE * fp
)
29203 struct arm_option_table
*opt
;
29204 struct arm_long_option_table
*lopt
;
29206 fprintf (fp
, _(" ARM-specific assembler options:\n"));
29208 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
29209 if (opt
->help
!= NULL
)
29210 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
29212 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
29213 if (lopt
->help
!= NULL
)
29214 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
29218 -EB assemble code for a big-endian cpu\n"));
29223 -EL assemble code for a little-endian cpu\n"));
29227 --fix-v4bx Allow BX in ARMv4 code\n"));
29231 --fdpic generate an FDPIC object file\n"));
29232 #endif /* OBJ_ELF */
29240 arm_feature_set flags
;
29241 } cpu_arch_ver_table
;
29243 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
29244 chronologically for architectures, with an exception for ARMv6-M and
29245 ARMv6S-M due to legacy reasons. No new architecture should have a
29246 special case. This allows for build attribute selection results to be
29247 stable when new architectures are added. */
29248 static const cpu_arch_ver_table cpu_arch_ver
[] =
29250 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
29251 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
29252 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
29253 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
29254 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
29255 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
29256 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
29257 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
29258 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
29259 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
29260 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
29261 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
29262 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
29263 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
29264 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
29265 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
29266 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
29267 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
29268 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
29269 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
29270 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
29271 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
29272 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
29273 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
29275 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
29276 always selected build attributes to match those of ARMv6-M
29277 (resp. ARMv6S-M). However, due to these architectures being a strict
29278 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
29279 would be selected when fully respecting chronology of architectures.
29280 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
29281 move them before ARMv7 architectures. */
29282 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
29283 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
29285 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
29286 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
29287 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
29288 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
29289 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
29290 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
29291 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
29292 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
29293 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
29294 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
29295 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
29296 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
29297 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
29298 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
29299 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
29300 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
29301 {-1, ARM_ARCH_NONE
}
29304 /* Set an attribute if it has not already been set by the user. */
29307 aeabi_set_attribute_int (int tag
, int value
)
29310 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
29311 || !attributes_set_explicitly
[tag
])
29312 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
29316 aeabi_set_attribute_string (int tag
, const char *value
)
29319 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
29320 || !attributes_set_explicitly
[tag
])
29321 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
29324 /* Return whether features in the *NEEDED feature set are available via
29325 extensions for the architecture whose feature set is *ARCH_FSET. */
29328 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
29329 const arm_feature_set
*needed
)
29331 int i
, nb_allowed_archs
;
29332 arm_feature_set ext_fset
;
29333 const struct arm_option_extension_value_table
*opt
;
29335 ext_fset
= arm_arch_none
;
29336 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29338 /* Extension does not provide any feature we need. */
29339 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
29343 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
29344 for (i
= 0; i
< nb_allowed_archs
; i
++)
29347 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
29350 /* Extension is available, add it. */
29351 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
29352 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
29356 /* Can we enable all features in *needed? */
29357 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
29360 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
29361 a given architecture feature set *ARCH_EXT_FSET including extension feature
29362 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
29363 - if true, check for an exact match of the architecture modulo extensions;
29364 - otherwise, select build attribute value of the first superset
29365 architecture released so that results remains stable when new architectures
29367 For -march/-mcpu=all the build attribute value of the most featureful
29368 architecture is returned. Tag_CPU_arch_profile result is returned in
29372 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
29373 const arm_feature_set
*ext_fset
,
29374 char *profile
, int exact_match
)
29376 arm_feature_set arch_fset
;
29377 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
29379 /* Select most featureful architecture with all its extensions if building
29380 for -march=all as the feature sets used to set build attributes. */
29381 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
29383 /* Force revisiting of decision for each new architecture. */
29384 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
29386 return TAG_CPU_ARCH_V8
;
29389 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
29391 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
29393 arm_feature_set known_arch_fset
;
29395 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
29398 /* Base architecture match user-specified architecture and
29399 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
29400 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
29405 /* Base architecture match user-specified architecture only
29406 (eg. ARMv6-M in the same case as above). Record it in case we
29407 find a match with above condition. */
29408 else if (p_ver_ret
== NULL
29409 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
29415 /* Architecture has all features wanted. */
29416 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
29418 arm_feature_set added_fset
;
29420 /* Compute features added by this architecture over the one
29421 recorded in p_ver_ret. */
29422 if (p_ver_ret
!= NULL
)
29423 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
29425 /* First architecture that match incl. with extensions, or the
29426 only difference in features over the recorded match is
29427 features that were optional and are now mandatory. */
29428 if (p_ver_ret
== NULL
29429 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
29435 else if (p_ver_ret
== NULL
)
29437 arm_feature_set needed_ext_fset
;
29439 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
29441 /* Architecture has all features needed when using some
29442 extensions. Record it and continue searching in case there
29443 exist an architecture providing all needed features without
29444 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
29446 if (have_ext_for_needed_feat_p (&known_arch_fset
,
29453 if (p_ver_ret
== NULL
)
29457 /* Tag_CPU_arch_profile. */
29458 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
29459 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
29460 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
29461 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
29463 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
29465 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
29469 return p_ver_ret
->val
;
29472 /* Set the public EABI object attributes. */
29475 aeabi_set_public_attributes (void)
29477 char profile
= '\0';
29480 int fp16_optional
= 0;
29481 int skip_exact_match
= 0;
29482 arm_feature_set flags
, flags_arch
, flags_ext
;
29484 /* Autodetection mode, choose the architecture based the instructions
29486 if (no_cpu_selected ())
29488 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
29490 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
29491 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
29493 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
29494 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
29496 /* Code run during relaxation relies on selected_cpu being set. */
29497 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29498 flags_ext
= arm_arch_none
;
29499 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
29500 selected_ext
= flags_ext
;
29501 selected_cpu
= flags
;
29503 /* Otherwise, choose the architecture based on the capabilities of the
29507 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
29508 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
29509 flags_ext
= selected_ext
;
29510 flags
= selected_cpu
;
29512 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
29514 /* Allow the user to override the reported architecture. */
29515 if (!ARM_FEATURE_ZERO (selected_object_arch
))
29517 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
29518 flags_ext
= arm_arch_none
;
29521 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
29523 /* When this function is run again after relaxation has happened there is no
29524 way to determine whether an architecture or CPU was specified by the user:
29525 - selected_cpu is set above for relaxation to work;
29526 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29527 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29528 Therefore, if not in -march=all case we first try an exact match and fall
29529 back to autodetection. */
29530 if (!skip_exact_match
)
29531 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
29533 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
29535 as_bad (_("no architecture contains all the instructions used\n"));
29537 /* Tag_CPU_name. */
29538 if (selected_cpu_name
[0])
29542 q
= selected_cpu_name
;
29543 if (strncmp (q
, "armv", 4) == 0)
29548 for (i
= 0; q
[i
]; i
++)
29549 q
[i
] = TOUPPER (q
[i
]);
29551 aeabi_set_attribute_string (Tag_CPU_name
, q
);
29554 /* Tag_CPU_arch. */
29555 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
29557 /* Tag_CPU_arch_profile. */
29558 if (profile
!= '\0')
29559 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
29561 /* Tag_DSP_extension. */
29562 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
29563 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
29565 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29566 /* Tag_ARM_ISA_use. */
29567 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
29568 || ARM_FEATURE_ZERO (flags_arch
))
29569 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
29571 /* Tag_THUMB_ISA_use. */
29572 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
29573 || ARM_FEATURE_ZERO (flags_arch
))
29577 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29578 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
29580 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
29584 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
29587 /* Tag_VFP_arch. */
29588 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
29589 aeabi_set_attribute_int (Tag_VFP_arch
,
29590 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29592 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
29593 aeabi_set_attribute_int (Tag_VFP_arch
,
29594 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29596 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
29599 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
29601 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
29603 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
29606 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
29607 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
29608 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
29609 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
29610 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
29612 /* Tag_ABI_HardFP_use. */
29613 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
29614 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
29615 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
29617 /* Tag_WMMX_arch. */
29618 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
29619 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
29620 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
29621 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
29623 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29624 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
29625 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
29626 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
29627 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
29628 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
29630 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
29632 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
29636 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
29641 if (ARM_CPU_HAS_FEATURE (flags
, mve_fp_ext
))
29642 aeabi_set_attribute_int (Tag_MVE_arch
, 2);
29643 else if (ARM_CPU_HAS_FEATURE (flags
, mve_ext
))
29644 aeabi_set_attribute_int (Tag_MVE_arch
, 1);
29646 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29647 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
29648 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
29652 We set Tag_DIV_use to two when integer divide instructions have been used
29653 in ARM state, or when Thumb integer divide instructions have been used,
29654 but we have no architecture profile set, nor have we any ARM instructions.
29656 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29657 by the base architecture.
29659 For new architectures we will have to check these tests. */
29660 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
29661 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29662 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
29663 aeabi_set_attribute_int (Tag_DIV_use
, 0);
29664 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
29665 || (profile
== '\0'
29666 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
29667 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
29668 aeabi_set_attribute_int (Tag_DIV_use
, 2);
29670 /* Tag_MP_extension_use. */
29671 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
29672 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
29674 /* Tag Virtualization_use. */
29675 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
29677 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
29680 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
29683 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29684 finished and free extension feature bits which will not be used anymore. */
29687 arm_md_post_relax (void)
29689 aeabi_set_public_attributes ();
29690 XDELETE (mcpu_ext_opt
);
29691 mcpu_ext_opt
= NULL
;
29692 XDELETE (march_ext_opt
);
29693 march_ext_opt
= NULL
;
29696 /* Add the default contents for the .ARM.attributes section. */
29701 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
29704 aeabi_set_public_attributes ();
29706 #endif /* OBJ_ELF */
29708 /* Parse a .cpu directive. */
29711 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
29713 const struct arm_cpu_option_table
*opt
;
29717 name
= input_line_pointer
;
29718 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29719 input_line_pointer
++;
29720 saved_char
= *input_line_pointer
;
29721 *input_line_pointer
= 0;
29723 /* Skip the first "all" entry. */
29724 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
29725 if (streq (opt
->name
, name
))
29727 selected_arch
= opt
->value
;
29728 selected_ext
= opt
->ext
;
29729 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29730 if (opt
->canonical_name
)
29731 strcpy (selected_cpu_name
, opt
->canonical_name
);
29735 for (i
= 0; opt
->name
[i
]; i
++)
29736 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
29738 selected_cpu_name
[i
] = 0;
29740 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29742 *input_line_pointer
= saved_char
;
29743 demand_empty_rest_of_line ();
29746 as_bad (_("unknown cpu `%s'"), name
);
29747 *input_line_pointer
= saved_char
;
29748 ignore_rest_of_line ();
29751 /* Parse a .arch directive. */
29754 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
29756 const struct arm_arch_option_table
*opt
;
29760 name
= input_line_pointer
;
29761 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29762 input_line_pointer
++;
29763 saved_char
= *input_line_pointer
;
29764 *input_line_pointer
= 0;
29766 /* Skip the first "all" entry. */
29767 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29768 if (streq (opt
->name
, name
))
29770 selected_arch
= opt
->value
;
29771 selected_ext
= arm_arch_none
;
29772 selected_cpu
= selected_arch
;
29773 strcpy (selected_cpu_name
, opt
->name
);
29774 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29775 *input_line_pointer
= saved_char
;
29776 demand_empty_rest_of_line ();
29780 as_bad (_("unknown architecture `%s'\n"), name
);
29781 *input_line_pointer
= saved_char
;
29782 ignore_rest_of_line ();
29785 /* Parse a .object_arch directive. */
29788 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
29790 const struct arm_arch_option_table
*opt
;
29794 name
= input_line_pointer
;
29795 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29796 input_line_pointer
++;
29797 saved_char
= *input_line_pointer
;
29798 *input_line_pointer
= 0;
29800 /* Skip the first "all" entry. */
29801 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29802 if (streq (opt
->name
, name
))
29804 selected_object_arch
= opt
->value
;
29805 *input_line_pointer
= saved_char
;
29806 demand_empty_rest_of_line ();
29810 as_bad (_("unknown architecture `%s'\n"), name
);
29811 *input_line_pointer
= saved_char
;
29812 ignore_rest_of_line ();
29815 /* Parse a .arch_extension directive. */
29818 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
29820 const struct arm_option_extension_value_table
*opt
;
29823 int adding_value
= 1;
29825 name
= input_line_pointer
;
29826 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29827 input_line_pointer
++;
29828 saved_char
= *input_line_pointer
;
29829 *input_line_pointer
= 0;
29831 if (strlen (name
) >= 2
29832 && strncmp (name
, "no", 2) == 0)
29838 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29839 if (streq (opt
->name
, name
))
29841 int i
, nb_allowed_archs
=
29842 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
29843 for (i
= 0; i
< nb_allowed_archs
; i
++)
29846 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
29848 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
29852 if (i
== nb_allowed_archs
)
29854 as_bad (_("architectural extension `%s' is not allowed for the "
29855 "current base architecture"), name
);
29860 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
29863 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
29865 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29866 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29867 *input_line_pointer
= saved_char
;
29868 demand_empty_rest_of_line ();
29869 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29870 on this return so that duplicate extensions (extensions with the
29871 same name as a previous extension in the list) are not considered
29872 for command-line parsing. */
29876 if (opt
->name
== NULL
)
29877 as_bad (_("unknown architecture extension `%s'\n"), name
);
29879 *input_line_pointer
= saved_char
;
29880 ignore_rest_of_line ();
29883 /* Parse a .fpu directive. */
29886 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
29888 const struct arm_option_fpu_value_table
*opt
;
29892 name
= input_line_pointer
;
29893 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29894 input_line_pointer
++;
29895 saved_char
= *input_line_pointer
;
29896 *input_line_pointer
= 0;
29898 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
29899 if (streq (opt
->name
, name
))
29901 selected_fpu
= opt
->value
;
29902 #ifndef CPU_DEFAULT
29903 if (no_cpu_selected ())
29904 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
29907 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29908 *input_line_pointer
= saved_char
;
29909 demand_empty_rest_of_line ();
29913 as_bad (_("unknown floating point format `%s'\n"), name
);
29914 *input_line_pointer
= saved_char
;
29915 ignore_rest_of_line ();
29918 /* Copy symbol information. */
29921 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
29923 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
29927 /* Given a symbolic attribute NAME, return the proper integer value.
29928 Returns -1 if the attribute is not known. */
29931 arm_convert_symbolic_attribute (const char *name
)
29933 static const struct
29938 attribute_table
[] =
29940 /* When you modify this table you should
29941 also modify the list in doc/c-arm.texi. */
29942 #define T(tag) {#tag, tag}
29943 T (Tag_CPU_raw_name
),
29946 T (Tag_CPU_arch_profile
),
29947 T (Tag_ARM_ISA_use
),
29948 T (Tag_THUMB_ISA_use
),
29952 T (Tag_Advanced_SIMD_arch
),
29953 T (Tag_PCS_config
),
29954 T (Tag_ABI_PCS_R9_use
),
29955 T (Tag_ABI_PCS_RW_data
),
29956 T (Tag_ABI_PCS_RO_data
),
29957 T (Tag_ABI_PCS_GOT_use
),
29958 T (Tag_ABI_PCS_wchar_t
),
29959 T (Tag_ABI_FP_rounding
),
29960 T (Tag_ABI_FP_denormal
),
29961 T (Tag_ABI_FP_exceptions
),
29962 T (Tag_ABI_FP_user_exceptions
),
29963 T (Tag_ABI_FP_number_model
),
29964 T (Tag_ABI_align_needed
),
29965 T (Tag_ABI_align8_needed
),
29966 T (Tag_ABI_align_preserved
),
29967 T (Tag_ABI_align8_preserved
),
29968 T (Tag_ABI_enum_size
),
29969 T (Tag_ABI_HardFP_use
),
29970 T (Tag_ABI_VFP_args
),
29971 T (Tag_ABI_WMMX_args
),
29972 T (Tag_ABI_optimization_goals
),
29973 T (Tag_ABI_FP_optimization_goals
),
29974 T (Tag_compatibility
),
29975 T (Tag_CPU_unaligned_access
),
29976 T (Tag_FP_HP_extension
),
29977 T (Tag_VFP_HP_extension
),
29978 T (Tag_ABI_FP_16bit_format
),
29979 T (Tag_MPextension_use
),
29981 T (Tag_nodefaults
),
29982 T (Tag_also_compatible_with
),
29983 T (Tag_conformance
),
29985 T (Tag_Virtualization_use
),
29986 T (Tag_DSP_extension
),
29988 /* We deliberately do not include Tag_MPextension_use_legacy. */
29996 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
29997 if (streq (name
, attribute_table
[i
].name
))
29998 return attribute_table
[i
].tag
;
30003 /* Apply sym value for relocations only in the case that they are for
30004 local symbols in the same segment as the fixup and you have the
30005 respective architectural feature for blx and simple switches. */
30008 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
30011 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
30012 /* PR 17444: If the local symbol is in a different section then a reloc
30013 will always be generated for it, so applying the symbol value now
30014 will result in a double offset being stored in the relocation. */
30015 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
30016 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
30018 switch (fixP
->fx_r_type
)
30020 case BFD_RELOC_ARM_PCREL_BLX
:
30021 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
30022 if (ARM_IS_FUNC (fixP
->fx_addsy
))
30026 case BFD_RELOC_ARM_PCREL_CALL
:
30027 case BFD_RELOC_THUMB_PCREL_BLX
:
30028 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
30039 #endif /* OBJ_ELF */