1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
305 static const arm_feature_set mve_ext
=
306 ARM_FEATURE_COPROC (FPU_MVE
);
307 static const arm_feature_set mve_fp_ext
=
308 ARM_FEATURE_COPROC (FPU_MVE_FP
);
310 static const arm_feature_set fpu_vfp_fp16
=
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
312 static const arm_feature_set fpu_neon_ext_fma
=
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
315 static const arm_feature_set fpu_vfp_ext_fma
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
317 static const arm_feature_set fpu_vfp_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
319 static const arm_feature_set fpu_vfp_ext_armv8xd
=
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
321 static const arm_feature_set fpu_neon_ext_armv8
=
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
323 static const arm_feature_set fpu_crypto_ext_armv8
=
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
325 static const arm_feature_set crc_ext_armv8
=
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
327 static const arm_feature_set fpu_neon_ext_v8_1
=
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
329 static const arm_feature_set fpu_neon_ext_dotprod
=
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
332 static int mfloat_abi_opt
= -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
335 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
338 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
342 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu
= FPU_NONE
;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name
[20];
350 extern FLONUM_TYPE generic_floating_point_number
;
352 /* Return if no cpu was selected on command-line. */
354 no_cpu_selected (void)
356 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
361 static int meabi_flags
= EABI_DEFAULT
;
363 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
366 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
371 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS
* GOT_symbol
;
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
384 static int thumb_mode
= 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
393 IMPLICIT_IT_MODE_NEVER
= 0x00,
394 IMPLICIT_IT_MODE_ARM
= 0x01,
395 IMPLICIT_IT_MODE_THUMB
= 0x02,
396 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
398 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
412 Important differences from the old Thumb mode:
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
423 static bfd_boolean unified_syntax
= FALSE
;
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars
[] = "#[]{}";
444 enum neon_el_type type
;
448 #define NEON_MAX_TYPE_ELS 4
452 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
456 enum pred_instruction_type
462 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
463 if inside, should be the last one. */
464 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
465 i.e. BKPT and NOP. */
466 IT_INSN
, /* The IT insn has been parsed. */
467 VPT_INSN
, /* The VPT/VPST insn has been parsed. */
468 MVE_OUTSIDE_PRED_INSN
, /* Instruction to indicate a MVE instruction without
469 a predication code. */
470 MVE_UNPREDICABLE_INSN
/* MVE instruction that is non-predicable. */
473 /* The maximum number of operands we need. */
474 #define ARM_IT_MAX_OPERANDS 6
475 #define ARM_IT_MAX_RELOCS 3
480 unsigned long instruction
;
484 /* "uncond_value" is set to the value in place of the conditional field in
485 unconditional versions of the instruction, or -1 if nothing is
488 struct neon_type vectype
;
489 /* This does not indicate an actual NEON instruction, only that
490 the mnemonic accepts neon-style type suffixes. */
492 /* Set to the opcode if the instruction needs relaxation.
493 Zero if the instruction is not relaxed. */
497 bfd_reloc_code_real_type type
;
500 } relocs
[ARM_IT_MAX_RELOCS
];
502 enum pred_instruction_type pred_insn_type
;
508 struct neon_type_el vectype
;
509 unsigned present
: 1; /* Operand present. */
510 unsigned isreg
: 1; /* Operand was a register. */
511 unsigned immisreg
: 2; /* .imm field is a second register.
512 0: imm, 1: gpr, 2: MVE Q-register. */
513 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
514 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
515 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
516 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
517 instructions. This allows us to disambiguate ARM <-> vector insns. */
518 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
519 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
520 unsigned isquad
: 1; /* Operand is SIMD quad register. */
521 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
522 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
523 unsigned writeback
: 1; /* Operand has trailing ! */
524 unsigned preind
: 1; /* Preindexed address. */
525 unsigned postind
: 1; /* Postindexed address. */
526 unsigned negative
: 1; /* Index register was negated. */
527 unsigned shifted
: 1; /* Shift applied to operation. */
528 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
529 } operands
[ARM_IT_MAX_OPERANDS
];
532 static struct arm_it inst
;
534 #define NUM_FLOAT_VALS 8
536 const char * fp_const
[] =
538 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
541 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
551 #define CP_T_X 0x00008000
552 #define CP_T_Y 0x00400000
554 #define CONDS_BIT 0x00100000
555 #define LOAD_BIT 0x00100000
557 #define DOUBLE_LOAD_FLAG 0x00000001
561 const char * template_name
;
565 #define COND_ALWAYS 0xE
569 const char * template_name
;
573 struct asm_barrier_opt
575 const char * template_name
;
577 const arm_feature_set arch
;
580 /* The bit that distinguishes CPSR and SPSR. */
581 #define SPSR_BIT (1 << 22)
583 /* The individual PSR flag bits. */
584 #define PSR_c (1 << 16)
585 #define PSR_x (1 << 17)
586 #define PSR_s (1 << 18)
587 #define PSR_f (1 << 19)
592 bfd_reloc_code_real_type reloc
;
597 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
598 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
603 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
606 /* Bits for DEFINED field in neon_typed_alias. */
607 #define NTA_HASTYPE 1
608 #define NTA_HASINDEX 2
610 struct neon_typed_alias
612 unsigned char defined
;
614 struct neon_type_el eltype
;
617 /* ARM register categories. This includes coprocessor numbers and various
618 architecture extensions' registers. Each entry should have an error message
619 in reg_expected_msgs below. */
648 /* Structure for a hash table entry for a register.
649 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
650 information which states whether a vector type or index is specified (for a
651 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
657 unsigned char builtin
;
658 struct neon_typed_alias
* neon
;
661 /* Diagnostics used when we don't get a register of the expected type. */
662 const char * const reg_expected_msgs
[] =
664 [REG_TYPE_RN
] = N_("ARM register expected"),
665 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
666 [REG_TYPE_CN
] = N_("co-processor register expected"),
667 [REG_TYPE_FN
] = N_("FPA register expected"),
668 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
669 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
670 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
671 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
672 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
673 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
674 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
676 [REG_TYPE_VFC
] = N_("VFP system register expected"),
677 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
678 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
679 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
680 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
681 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
682 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
683 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
684 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
685 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
686 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
687 [REG_TYPE_MQ
] = N_("MVE vector register expected"),
688 [REG_TYPE_RNB
] = N_("")
691 /* Some well known registers that we refer to directly elsewhere. */
697 /* ARM instructions take 4bytes in the object file, Thumb instructions
703 /* Basic string to match. */
704 const char * template_name
;
706 /* Parameters to instruction. */
707 unsigned int operands
[8];
709 /* Conditional tag - see opcode_lookup. */
710 unsigned int tag
: 4;
712 /* Basic instruction code. */
715 /* Thumb-format instruction code. */
718 /* Which architecture variant provides this instruction. */
719 const arm_feature_set
* avariant
;
720 const arm_feature_set
* tvariant
;
722 /* Function to call to encode instruction in ARM format. */
723 void (* aencode
) (void);
725 /* Function to call to encode instruction in Thumb format. */
726 void (* tencode
) (void);
728 /* Indicates whether this instruction may be vector predicated. */
729 unsigned int mayBeVecPred
: 1;
732 /* Defines for various bits that we will want to toggle. */
733 #define INST_IMMEDIATE 0x02000000
734 #define OFFSET_REG 0x02000000
735 #define HWOFFSET_IMM 0x00400000
736 #define SHIFT_BY_REG 0x00000010
737 #define PRE_INDEX 0x01000000
738 #define INDEX_UP 0x00800000
739 #define WRITE_BACK 0x00200000
740 #define LDM_TYPE_2_OR_3 0x00400000
741 #define CPSI_MMOD 0x00020000
743 #define LITERAL_MASK 0xf000f000
744 #define OPCODE_MASK 0xfe1fffff
745 #define V4_STR_BIT 0x00000020
746 #define VLDR_VMOV_SAME 0x0040f000
748 #define T2_SUBS_PC_LR 0xf3de8f00
750 #define DATA_OP_SHIFT 21
751 #define SBIT_SHIFT 20
753 #define T2_OPCODE_MASK 0xfe1fffff
754 #define T2_DATA_OP_SHIFT 21
755 #define T2_SBIT_SHIFT 20
757 #define A_COND_MASK 0xf0000000
758 #define A_PUSH_POP_OP_MASK 0x0fff0000
760 /* Opcodes for pushing/poping registers to/from the stack. */
761 #define A1_OPCODE_PUSH 0x092d0000
762 #define A2_OPCODE_PUSH 0x052d0004
763 #define A2_OPCODE_POP 0x049d0004
765 /* Codes to distinguish the arithmetic instructions. */
776 #define OPCODE_CMP 10
777 #define OPCODE_CMN 11
778 #define OPCODE_ORR 12
779 #define OPCODE_MOV 13
780 #define OPCODE_BIC 14
781 #define OPCODE_MVN 15
783 #define T2_OPCODE_AND 0
784 #define T2_OPCODE_BIC 1
785 #define T2_OPCODE_ORR 2
786 #define T2_OPCODE_ORN 3
787 #define T2_OPCODE_EOR 4
788 #define T2_OPCODE_ADD 8
789 #define T2_OPCODE_ADC 10
790 #define T2_OPCODE_SBC 11
791 #define T2_OPCODE_SUB 13
792 #define T2_OPCODE_RSB 14
794 #define T_OPCODE_MUL 0x4340
795 #define T_OPCODE_TST 0x4200
796 #define T_OPCODE_CMN 0x42c0
797 #define T_OPCODE_NEG 0x4240
798 #define T_OPCODE_MVN 0x43c0
800 #define T_OPCODE_ADD_R3 0x1800
801 #define T_OPCODE_SUB_R3 0x1a00
802 #define T_OPCODE_ADD_HI 0x4400
803 #define T_OPCODE_ADD_ST 0xb000
804 #define T_OPCODE_SUB_ST 0xb080
805 #define T_OPCODE_ADD_SP 0xa800
806 #define T_OPCODE_ADD_PC 0xa000
807 #define T_OPCODE_ADD_I8 0x3000
808 #define T_OPCODE_SUB_I8 0x3800
809 #define T_OPCODE_ADD_I3 0x1c00
810 #define T_OPCODE_SUB_I3 0x1e00
812 #define T_OPCODE_ASR_R 0x4100
813 #define T_OPCODE_LSL_R 0x4080
814 #define T_OPCODE_LSR_R 0x40c0
815 #define T_OPCODE_ROR_R 0x41c0
816 #define T_OPCODE_ASR_I 0x1000
817 #define T_OPCODE_LSL_I 0x0000
818 #define T_OPCODE_LSR_I 0x0800
820 #define T_OPCODE_MOV_I8 0x2000
821 #define T_OPCODE_CMP_I8 0x2800
822 #define T_OPCODE_CMP_LR 0x4280
823 #define T_OPCODE_MOV_HR 0x4600
824 #define T_OPCODE_CMP_HR 0x4500
826 #define T_OPCODE_LDR_PC 0x4800
827 #define T_OPCODE_LDR_SP 0x9800
828 #define T_OPCODE_STR_SP 0x9000
829 #define T_OPCODE_LDR_IW 0x6800
830 #define T_OPCODE_STR_IW 0x6000
831 #define T_OPCODE_LDR_IH 0x8800
832 #define T_OPCODE_STR_IH 0x8000
833 #define T_OPCODE_LDR_IB 0x7800
834 #define T_OPCODE_STR_IB 0x7000
835 #define T_OPCODE_LDR_RW 0x5800
836 #define T_OPCODE_STR_RW 0x5000
837 #define T_OPCODE_LDR_RH 0x5a00
838 #define T_OPCODE_STR_RH 0x5200
839 #define T_OPCODE_LDR_RB 0x5c00
840 #define T_OPCODE_STR_RB 0x5400
842 #define T_OPCODE_PUSH 0xb400
843 #define T_OPCODE_POP 0xbc00
845 #define T_OPCODE_BRANCH 0xe000
847 #define THUMB_SIZE 2 /* Size of thumb instruction. */
848 #define THUMB_PP_PC_LR 0x0100
849 #define THUMB_LOAD_BIT 0x0800
850 #define THUMB2_LOAD_BIT 0x00100000
852 #define BAD_SYNTAX _("syntax error")
853 #define BAD_ARGS _("bad arguments to instruction")
854 #define BAD_SP _("r13 not allowed here")
855 #define BAD_PC _("r15 not allowed here")
856 #define BAD_ODD _("Odd register not allowed here")
857 #define BAD_EVEN _("Even register not allowed here")
858 #define BAD_COND _("instruction cannot be conditional")
859 #define BAD_OVERLAP _("registers may not be the same")
860 #define BAD_HIREG _("lo register required")
861 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
862 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode")
863 #define BAD_BRANCH _("branch must be last instruction in IT block")
864 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
865 #define BAD_NOT_IT _("instruction not allowed in IT block")
866 #define BAD_NOT_VPT _("instruction missing MVE vector predication code")
867 #define BAD_FPU _("selected FPU does not support instruction")
868 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
869 #define BAD_OUT_VPT \
870 _("vector predicated instruction should be in VPT/VPST block")
871 #define BAD_IT_COND _("incorrect condition in IT block")
872 #define BAD_VPT_COND _("incorrect condition in VPT/VPST block")
873 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
874 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
875 #define BAD_PC_ADDRESSING \
876 _("cannot use register index with PC-relative addressing")
877 #define BAD_PC_WRITEBACK \
878 _("cannot use writeback with PC-relative addressing")
879 #define BAD_RANGE _("branch out of range")
880 #define BAD_FP16 _("selected processor does not support fp16 instruction")
881 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
882 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
883 #define MVE_NOT_IT _("Warning: instruction is UNPREDICTABLE in an IT " \
885 #define MVE_NOT_VPT _("Warning: instruction is UNPREDICTABLE in a VPT " \
887 #define MVE_BAD_PC _("Warning: instruction is UNPREDICTABLE with PC" \
889 #define MVE_BAD_SP _("Warning: instruction is UNPREDICTABLE with SP" \
891 #define BAD_SIMD_TYPE _("bad type in SIMD instruction")
892 #define BAD_MVE_AUTO \
893 _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
894 " use a valid -march or -mcpu option.")
895 #define BAD_MVE_SRCDEST _("Warning: 32-bit element size and same destination "\
896 "and source operands makes instruction UNPREDICTABLE")
897 #define BAD_EL_TYPE _("bad element type for instruction")
899 static struct hash_control
* arm_ops_hsh
;
900 static struct hash_control
* arm_cond_hsh
;
901 static struct hash_control
* arm_vcond_hsh
;
902 static struct hash_control
* arm_shift_hsh
;
903 static struct hash_control
* arm_psr_hsh
;
904 static struct hash_control
* arm_v7m_psr_hsh
;
905 static struct hash_control
* arm_reg_hsh
;
906 static struct hash_control
* arm_reloc_hsh
;
907 static struct hash_control
* arm_barrier_opt_hsh
;
909 /* Stuff needed to resolve the label ambiguity
918 symbolS
* last_label_seen
;
919 static int label_is_thumb_function_name
= FALSE
;
921 /* Literal pool structure. Held on a per-section
922 and per-sub-section basis. */
924 #define MAX_LITERAL_POOL_SIZE 1024
925 typedef struct literal_pool
927 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
928 unsigned int next_free_entry
;
934 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
936 struct literal_pool
* next
;
937 unsigned int alignment
;
940 /* Pointer to a linked list of literal pools. */
941 literal_pool
* list_of_pools
= NULL
;
943 typedef enum asmfunc_states
946 WAITING_ASMFUNC_NAME
,
950 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
953 # define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
955 static struct current_pred now_pred
;
959 now_pred_compatible (int cond
)
961 return (cond
& ~1) == (now_pred
.cc
& ~1);
965 conditional_insn (void)
967 return inst
.cond
!= COND_ALWAYS
;
970 static int in_pred_block (void);
972 static int handle_pred_state (void);
974 static void force_automatic_it_block_close (void);
976 static void it_fsm_post_encode (void);
978 #define set_pred_insn_type(type) \
981 inst.pred_insn_type = type; \
982 if (handle_pred_state () == FAIL) \
987 #define set_pred_insn_type_nonvoid(type, failret) \
990 inst.pred_insn_type = type; \
991 if (handle_pred_state () == FAIL) \
996 #define set_pred_insn_type_last() \
999 if (inst.cond == COND_ALWAYS) \
1000 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN); \
1002 set_pred_insn_type (INSIDE_IT_LAST_INSN); \
1008 /* This array holds the chars that always start a comment. If the
1009 pre-processor is disabled, these aren't very useful. */
1010 char arm_comment_chars
[] = "@";
1012 /* This array holds the chars that only start a comment at the beginning of
1013 a line. If the line seems to have the form '# 123 filename'
1014 .line and .file directives will appear in the pre-processed output. */
1015 /* Note that input_file.c hand checks for '#' at the beginning of the
1016 first line of the input file. This is because the compiler outputs
1017 #NO_APP at the beginning of its output. */
1018 /* Also note that comments like this one will always work. */
1019 const char line_comment_chars
[] = "#";
1021 char arm_line_separator_chars
[] = ";";
1023 /* Chars that can be used to separate mant
1024 from exp in floating point numbers. */
1025 const char EXP_CHARS
[] = "eE";
1027 /* Chars that mean this number is a floating point constant. */
1028 /* As in 0f12.456 */
1029 /* or 0d1.2345e12 */
1031 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
1033 /* Prefix characters that indicate the start of an immediate
1035 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1037 /* Separator character handling. */
1039 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1042 skip_past_char (char ** str
, char c
)
1044 /* PR gas/14987: Allow for whitespace before the expected character. */
1045 skip_whitespace (*str
);
1056 #define skip_past_comma(str) skip_past_char (str, ',')
1058 /* Arithmetic expressions (possibly involving symbols). */
1060 /* Return TRUE if anything in the expression is a bignum. */
1063 walk_no_bignums (symbolS
* sp
)
1065 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1068 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1070 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1071 || (symbol_get_value_expression (sp
)->X_op_symbol
1072 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1078 static bfd_boolean in_my_get_expression
= FALSE
;
1080 /* Third argument to my_get_expression. */
1081 #define GE_NO_PREFIX 0
1082 #define GE_IMM_PREFIX 1
1083 #define GE_OPT_PREFIX 2
1084 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1085 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1086 #define GE_OPT_PREFIX_BIG 3
1089 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1093 /* In unified syntax, all prefixes are optional. */
1095 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1098 switch (prefix_mode
)
1100 case GE_NO_PREFIX
: break;
1102 if (!is_immediate_prefix (**str
))
1104 inst
.error
= _("immediate expression requires a # prefix");
1110 case GE_OPT_PREFIX_BIG
:
1111 if (is_immediate_prefix (**str
))
1118 memset (ep
, 0, sizeof (expressionS
));
1120 save_in
= input_line_pointer
;
1121 input_line_pointer
= *str
;
1122 in_my_get_expression
= TRUE
;
1124 in_my_get_expression
= FALSE
;
1126 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1128 /* We found a bad or missing expression in md_operand(). */
1129 *str
= input_line_pointer
;
1130 input_line_pointer
= save_in
;
1131 if (inst
.error
== NULL
)
1132 inst
.error
= (ep
->X_op
== O_absent
1133 ? _("missing expression") :_("bad expression"));
1137 /* Get rid of any bignums now, so that we don't generate an error for which
1138 we can't establish a line number later on. Big numbers are never valid
1139 in instructions, which is where this routine is always called. */
1140 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1141 && (ep
->X_op
== O_big
1142 || (ep
->X_add_symbol
1143 && (walk_no_bignums (ep
->X_add_symbol
)
1145 && walk_no_bignums (ep
->X_op_symbol
))))))
1147 inst
.error
= _("invalid constant");
1148 *str
= input_line_pointer
;
1149 input_line_pointer
= save_in
;
1153 *str
= input_line_pointer
;
1154 input_line_pointer
= save_in
;
1158 /* Turn a string in input_line_pointer into a floating point constant
1159 of type TYPE, and store the appropriate bytes in *LITP. The number
1160 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1161 returned, or NULL on OK.
1163 Note that fp constants aren't represent in the normal way on the ARM.
1164 In big endian mode, things are as expected. However, in little endian
1165 mode fp constants are big-endian word-wise, and little-endian byte-wise
1166 within the words. For example, (double) 1.1 in big endian mode is
1167 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1168 the byte sequence 99 99 f1 3f 9a 99 99 99.
1170 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1173 md_atof (int type
, char * litP
, int * sizeP
)
1176 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1208 return _("Unrecognized or unsupported floating point constant");
1211 t
= atof_ieee (input_line_pointer
, type
, words
);
1213 input_line_pointer
= t
;
1214 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1216 if (target_big_endian
)
1218 for (i
= 0; i
< prec
; i
++)
1220 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1221 litP
+= sizeof (LITTLENUM_TYPE
);
1226 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1227 for (i
= prec
- 1; i
>= 0; i
--)
1229 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1230 litP
+= sizeof (LITTLENUM_TYPE
);
1233 /* For a 4 byte float the order of elements in `words' is 1 0.
1234 For an 8 byte float the order is 1 0 3 2. */
1235 for (i
= 0; i
< prec
; i
+= 2)
1237 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1238 sizeof (LITTLENUM_TYPE
));
1239 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1240 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1241 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1248 /* We handle all bad expressions here, so that we can report the faulty
1249 instruction in the error message. */
1252 md_operand (expressionS
* exp
)
1254 if (in_my_get_expression
)
1255 exp
->X_op
= O_illegal
;
1258 /* Immediate values. */
1261 /* Generic immediate-value read function for use in directives.
1262 Accepts anything that 'expression' can fold to a constant.
1263 *val receives the number. */
1266 immediate_for_directive (int *val
)
1269 exp
.X_op
= O_illegal
;
1271 if (is_immediate_prefix (*input_line_pointer
))
1273 input_line_pointer
++;
1277 if (exp
.X_op
!= O_constant
)
1279 as_bad (_("expected #constant"));
1280 ignore_rest_of_line ();
1283 *val
= exp
.X_add_number
;
1288 /* Register parsing. */
1290 /* Generic register parser. CCP points to what should be the
1291 beginning of a register name. If it is indeed a valid register
1292 name, advance CCP over it and return the reg_entry structure;
1293 otherwise return NULL. Does not issue diagnostics. */
1295 static struct reg_entry
*
1296 arm_reg_parse_multi (char **ccp
)
1300 struct reg_entry
*reg
;
1302 skip_whitespace (start
);
1304 #ifdef REGISTER_PREFIX
1305 if (*start
!= REGISTER_PREFIX
)
1309 #ifdef OPTIONAL_REGISTER_PREFIX
1310 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1315 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1320 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1322 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1332 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1333 enum arm_reg_type type
)
1335 /* Alternative syntaxes are accepted for a few register classes. */
1342 /* Generic coprocessor register names are allowed for these. */
1343 if (reg
&& reg
->type
== REG_TYPE_CN
)
1348 /* For backward compatibility, a bare number is valid here. */
1350 unsigned long processor
= strtoul (start
, ccp
, 10);
1351 if (*ccp
!= start
&& processor
<= 15)
1356 case REG_TYPE_MMXWC
:
1357 /* WC includes WCG. ??? I'm not sure this is true for all
1358 instructions that take WC registers. */
1359 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1370 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1371 return value is the register number or FAIL. */
1374 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1377 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1380 /* Do not allow a scalar (reg+index) to parse as a register. */
1381 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1384 if (reg
&& reg
->type
== type
)
1387 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1394 /* Parse a Neon type specifier. *STR should point at the leading '.'
1395 character. Does no verification at this stage that the type fits the opcode
1402 Can all be legally parsed by this function.
1404 Fills in neon_type struct pointer with parsed information, and updates STR
1405 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1406 type, FAIL if not. */
1409 parse_neon_type (struct neon_type
*type
, char **str
)
1416 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1418 enum neon_el_type thistype
= NT_untyped
;
1419 unsigned thissize
= -1u;
1426 /* Just a size without an explicit type. */
1430 switch (TOLOWER (*ptr
))
1432 case 'i': thistype
= NT_integer
; break;
1433 case 'f': thistype
= NT_float
; break;
1434 case 'p': thistype
= NT_poly
; break;
1435 case 's': thistype
= NT_signed
; break;
1436 case 'u': thistype
= NT_unsigned
; break;
1438 thistype
= NT_float
;
1443 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1449 /* .f is an abbreviation for .f32. */
1450 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1455 thissize
= strtoul (ptr
, &ptr
, 10);
1457 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1460 as_bad (_("bad size %d in type specifier"), thissize
);
1468 type
->el
[type
->elems
].type
= thistype
;
1469 type
->el
[type
->elems
].size
= thissize
;
1474 /* Empty/missing type is not a successful parse. */
1475 if (type
->elems
== 0)
1483 /* Errors may be set multiple times during parsing or bit encoding
1484 (particularly in the Neon bits), but usually the earliest error which is set
1485 will be the most meaningful. Avoid overwriting it with later (cascading)
1486 errors by calling this function. */
1489 first_error (const char *err
)
1495 /* Parse a single type, e.g. ".s32", leading period included. */
1497 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1500 struct neon_type optype
;
1504 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1506 if (optype
.elems
== 1)
1507 *vectype
= optype
.el
[0];
1510 first_error (_("only one type should be specified for operand"));
1516 first_error (_("vector type expected"));
1528 /* Special meanings for indices (which have a range of 0-7), which will fit into
1531 #define NEON_ALL_LANES 15
1532 #define NEON_INTERLEAVE_LANES 14
1534 /* Record a use of the given feature. */
1536 record_feature_use (const arm_feature_set
*feature
)
1539 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
1541 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
1544 /* If the given feature available in the selected CPU, mark it as used.
1545 Returns TRUE iff feature is available. */
1547 mark_feature_used (const arm_feature_set
*feature
)
1550 /* Do not support the use of MVE only instructions when in auto-detection or
1552 if (((feature
== &mve_ext
) || (feature
== &mve_fp_ext
))
1553 && ARM_CPU_IS_ANY (cpu_variant
))
1555 first_error (BAD_MVE_AUTO
);
1558 /* Ensure the option is valid on the current architecture. */
1559 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
1562 /* Add the appropriate architecture feature for the barrier option used.
1564 record_feature_use (feature
);
1569 /* Parse either a register or a scalar, with an optional type. Return the
1570 register number, and optionally fill in the actual type of the register
1571 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1572 type/index information in *TYPEINFO. */
1575 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1576 enum arm_reg_type
*rtype
,
1577 struct neon_typed_alias
*typeinfo
)
1580 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1581 struct neon_typed_alias atype
;
1582 struct neon_type_el parsetype
;
1586 atype
.eltype
.type
= NT_invtype
;
1587 atype
.eltype
.size
= -1;
1589 /* Try alternate syntax for some types of register. Note these are mutually
1590 exclusive with the Neon syntax extensions. */
1593 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1601 /* Undo polymorphism when a set of register types may be accepted. */
1602 if ((type
== REG_TYPE_NDQ
1603 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1604 || (type
== REG_TYPE_VFSD
1605 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1606 || (type
== REG_TYPE_NSDQ
1607 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1608 || reg
->type
== REG_TYPE_NQ
))
1609 || (type
== REG_TYPE_NSD
1610 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1611 || (type
== REG_TYPE_MMXWC
1612 && (reg
->type
== REG_TYPE_MMXWCG
)))
1613 type
= (enum arm_reg_type
) reg
->type
;
1615 if (type
== REG_TYPE_MQ
)
1617 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
1620 if (!reg
|| reg
->type
!= REG_TYPE_NQ
)
1623 if (reg
->number
> 14 && !mark_feature_used (&fpu_vfp_ext_d32
))
1625 first_error (_("expected MVE register [q0..q7]"));
1630 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
1631 && (type
== REG_TYPE_NQ
))
1635 if (type
!= reg
->type
)
1641 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1643 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1645 first_error (_("can't redefine type for operand"));
1648 atype
.defined
|= NTA_HASTYPE
;
1649 atype
.eltype
= parsetype
;
1652 if (skip_past_char (&str
, '[') == SUCCESS
)
1654 if (type
!= REG_TYPE_VFD
1655 && !(type
== REG_TYPE_VFS
1656 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1658 first_error (_("only D registers may be indexed"));
1662 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1664 first_error (_("can't change index for operand"));
1668 atype
.defined
|= NTA_HASINDEX
;
1670 if (skip_past_char (&str
, ']') == SUCCESS
)
1671 atype
.index
= NEON_ALL_LANES
;
1676 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1678 if (exp
.X_op
!= O_constant
)
1680 first_error (_("constant expression required"));
1684 if (skip_past_char (&str
, ']') == FAIL
)
1687 atype
.index
= exp
.X_add_number
;
1702 /* Like arm_reg_parse, but also allow the following extra features:
1703 - If RTYPE is non-zero, return the (possibly restricted) type of the
1704 register (e.g. Neon double or quad reg when either has been requested).
1705 - If this is a Neon vector type with additional type information, fill
1706 in the struct pointed to by VECTYPE (if non-NULL).
1707 This function will fault on encountering a scalar. */
1710 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1711 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1713 struct neon_typed_alias atype
;
1715 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1720 /* Do not allow regname(... to parse as a register. */
1724 /* Do not allow a scalar (reg+index) to parse as a register. */
1725 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1727 first_error (_("register operand expected, but got scalar"));
1732 *vectype
= atype
.eltype
;
1739 #define NEON_SCALAR_REG(X) ((X) >> 4)
1740 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1742 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1743 have enough information to be able to do a good job bounds-checking. So, we
1744 just do easy checks here, and do further checks later. */
1747 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1751 struct neon_typed_alias atype
;
1752 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1755 reg_type
= REG_TYPE_VFS
;
1757 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1759 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1762 if (atype
.index
== NEON_ALL_LANES
)
1764 first_error (_("scalar must have an index"));
1767 else if (atype
.index
>= 64 / elsize
)
1769 first_error (_("scalar index out of range"));
1774 *type
= atype
.eltype
;
1778 return reg
* 16 + atype
.index
;
1781 /* Types of registers in a list. */
1794 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1797 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1803 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1805 /* We come back here if we get ranges concatenated by '+' or '|'. */
1808 skip_whitespace (str
);
1821 const char apsr_str
[] = "apsr";
1822 int apsr_str_len
= strlen (apsr_str
);
1824 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1825 if (etype
== REGLIST_CLRM
)
1827 if (reg
== REG_SP
|| reg
== REG_PC
)
1829 else if (reg
== FAIL
1830 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1831 && !ISALPHA (*(str
+ apsr_str_len
)))
1834 str
+= apsr_str_len
;
1839 first_error (_("r0-r12, lr or APSR expected"));
1843 else /* etype == REGLIST_RN. */
1847 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1858 first_error (_("bad range in register list"));
1862 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1864 if (range
& (1 << i
))
1866 (_("Warning: duplicated register (r%d) in register list"),
1874 if (range
& (1 << reg
))
1875 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1877 else if (reg
<= cur_reg
)
1878 as_tsktsk (_("Warning: register range not in ascending order"));
1883 while (skip_past_comma (&str
) != FAIL
1884 || (in_range
= 1, *str
++ == '-'));
1887 if (skip_past_char (&str
, '}') == FAIL
)
1889 first_error (_("missing `}'"));
1893 else if (etype
== REGLIST_RN
)
1897 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1900 if (exp
.X_op
== O_constant
)
1902 if (exp
.X_add_number
1903 != (exp
.X_add_number
& 0x0000ffff))
1905 inst
.error
= _("invalid register mask");
1909 if ((range
& exp
.X_add_number
) != 0)
1911 int regno
= range
& exp
.X_add_number
;
1914 regno
= (1 << regno
) - 1;
1916 (_("Warning: duplicated register (r%d) in register list"),
1920 range
|= exp
.X_add_number
;
1924 if (inst
.relocs
[0].type
!= 0)
1926 inst
.error
= _("expression too complex");
1930 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1931 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1932 inst
.relocs
[0].pc_rel
= 0;
1936 if (*str
== '|' || *str
== '+')
1942 while (another_range
);
1948 /* Parse a VFP register list. If the string is invalid return FAIL.
1949 Otherwise return the number of registers, and set PBASE to the first
1950 register. Parses registers of type ETYPE.
1951 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1952 - Q registers can be used to specify pairs of D registers
1953 - { } can be omitted from around a singleton register list
1954 FIXME: This is not implemented, as it would require backtracking in
1957 This could be done (the meaning isn't really ambiguous), but doesn't
1958 fit in well with the current parsing framework.
1959 - 32 D registers may be used (also true for VFPv3).
1960 FIXME: Types are ignored in these register lists, which is probably a
1964 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
1965 bfd_boolean
*partial_match
)
1970 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1974 unsigned long mask
= 0;
1976 bfd_boolean vpr_seen
= FALSE
;
1977 bfd_boolean expect_vpr
=
1978 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
1980 if (skip_past_char (&str
, '{') == FAIL
)
1982 inst
.error
= _("expecting {");
1989 case REGLIST_VFP_S_VPR
:
1990 regtype
= REG_TYPE_VFS
;
1995 case REGLIST_VFP_D_VPR
:
1996 regtype
= REG_TYPE_VFD
;
1999 case REGLIST_NEON_D
:
2000 regtype
= REG_TYPE_NDQ
;
2007 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
2009 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
2010 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
2014 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
2017 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
2024 base_reg
= max_regs
;
2025 *partial_match
= FALSE
;
2029 int setmask
= 1, addregs
= 1;
2030 const char vpr_str
[] = "vpr";
2031 int vpr_str_len
= strlen (vpr_str
);
2033 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
2037 if (new_base
== FAIL
2038 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
2039 && !ISALPHA (*(str
+ vpr_str_len
))
2045 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
2049 first_error (_("VPR expected last"));
2052 else if (new_base
== FAIL
)
2054 if (regtype
== REG_TYPE_VFS
)
2055 first_error (_("VFP single precision register or VPR "
2057 else /* regtype == REG_TYPE_VFD. */
2058 first_error (_("VFP/Neon double precision register or VPR "
2063 else if (new_base
== FAIL
)
2065 first_error (_(reg_expected_msgs
[regtype
]));
2069 *partial_match
= TRUE
;
2073 if (new_base
>= max_regs
)
2075 first_error (_("register out of range in list"));
2079 /* Note: a value of 2 * n is returned for the register Q<n>. */
2080 if (regtype
== REG_TYPE_NQ
)
2086 if (new_base
< base_reg
)
2087 base_reg
= new_base
;
2089 if (mask
& (setmask
<< new_base
))
2091 first_error (_("invalid register list"));
2095 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2097 as_tsktsk (_("register list not in ascending order"));
2101 mask
|= setmask
<< new_base
;
2104 if (*str
== '-') /* We have the start of a range expression */
2110 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2113 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2117 if (high_range
>= max_regs
)
2119 first_error (_("register out of range in list"));
2123 if (regtype
== REG_TYPE_NQ
)
2124 high_range
= high_range
+ 1;
2126 if (high_range
<= new_base
)
2128 inst
.error
= _("register range not in ascending order");
2132 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2134 if (mask
& (setmask
<< new_base
))
2136 inst
.error
= _("invalid register list");
2140 mask
|= setmask
<< new_base
;
2145 while (skip_past_comma (&str
) != FAIL
);
2149 /* Sanity check -- should have raised a parse error above. */
2150 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2155 if (expect_vpr
&& !vpr_seen
)
2157 first_error (_("VPR expected last"));
2161 /* Final test -- the registers must be consecutive. */
2163 for (i
= 0; i
< count
; i
++)
2165 if ((mask
& (1u << i
)) == 0)
2167 inst
.error
= _("non-contiguous register range");
2177 /* True if two alias types are the same. */
2180 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2188 if (a
->defined
!= b
->defined
)
2191 if ((a
->defined
& NTA_HASTYPE
) != 0
2192 && (a
->eltype
.type
!= b
->eltype
.type
2193 || a
->eltype
.size
!= b
->eltype
.size
))
2196 if ((a
->defined
& NTA_HASINDEX
) != 0
2197 && (a
->index
!= b
->index
))
2203 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2204 The base register is put in *PBASE.
2205 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2207 The register stride (minus one) is put in bit 4 of the return value.
2208 Bits [6:5] encode the list length (minus one).
2209 The type of the list elements is put in *ELTYPE, if non-NULL. */
2211 #define NEON_LANE(X) ((X) & 0xf)
2212 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2213 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2216 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2218 struct neon_type_el
*eltype
)
2225 int leading_brace
= 0;
2226 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2227 const char *const incr_error
= mve
? _("register stride must be 1") :
2228 _("register stride must be 1 or 2");
2229 const char *const type_error
= _("mismatched element/structure types in list");
2230 struct neon_typed_alias firsttype
;
2231 firsttype
.defined
= 0;
2232 firsttype
.eltype
.type
= NT_invtype
;
2233 firsttype
.eltype
.size
= -1;
2234 firsttype
.index
= -1;
2236 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2241 struct neon_typed_alias atype
;
2243 rtype
= REG_TYPE_MQ
;
2244 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2248 first_error (_(reg_expected_msgs
[rtype
]));
2255 if (rtype
== REG_TYPE_NQ
)
2261 else if (reg_incr
== -1)
2263 reg_incr
= getreg
- base_reg
;
2264 if (reg_incr
< 1 || reg_incr
> 2)
2266 first_error (_(incr_error
));
2270 else if (getreg
!= base_reg
+ reg_incr
* count
)
2272 first_error (_(incr_error
));
2276 if (! neon_alias_types_same (&atype
, &firsttype
))
2278 first_error (_(type_error
));
2282 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2286 struct neon_typed_alias htype
;
2287 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2289 lane
= NEON_INTERLEAVE_LANES
;
2290 else if (lane
!= NEON_INTERLEAVE_LANES
)
2292 first_error (_(type_error
));
2297 else if (reg_incr
!= 1)
2299 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2303 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2306 first_error (_(reg_expected_msgs
[rtype
]));
2309 if (! neon_alias_types_same (&htype
, &firsttype
))
2311 first_error (_(type_error
));
2314 count
+= hireg
+ dregs
- getreg
;
2318 /* If we're using Q registers, we can't use [] or [n] syntax. */
2319 if (rtype
== REG_TYPE_NQ
)
2325 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2329 else if (lane
!= atype
.index
)
2331 first_error (_(type_error
));
2335 else if (lane
== -1)
2336 lane
= NEON_INTERLEAVE_LANES
;
2337 else if (lane
!= NEON_INTERLEAVE_LANES
)
2339 first_error (_(type_error
));
2344 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2346 /* No lane set by [x]. We must be interleaving structures. */
2348 lane
= NEON_INTERLEAVE_LANES
;
2351 if (lane
== -1 || base_reg
== -1 || count
< 1 || (!mve
&& count
> 4)
2352 || (count
> 1 && reg_incr
== -1))
2354 first_error (_("error parsing element/structure list"));
2358 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2360 first_error (_("expected }"));
2368 *eltype
= firsttype
.eltype
;
2373 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2376 /* Parse an explicit relocation suffix on an expression. This is
2377 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2378 arm_reloc_hsh contains no entries, so this function can only
2379 succeed if there is no () after the word. Returns -1 on error,
2380 BFD_RELOC_UNUSED if there wasn't any suffix. */
2383 parse_reloc (char **str
)
2385 struct reloc_entry
*r
;
2389 return BFD_RELOC_UNUSED
;
2394 while (*q
&& *q
!= ')' && *q
!= ',')
2399 if ((r
= (struct reloc_entry
*)
2400 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2407 /* Directives: register aliases. */
2409 static struct reg_entry
*
2410 insert_reg_alias (char *str
, unsigned number
, int type
)
2412 struct reg_entry
*new_reg
;
2415 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2417 if (new_reg
->builtin
)
2418 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2420 /* Only warn about a redefinition if it's not defined as the
2422 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2423 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2428 name
= xstrdup (str
);
2429 new_reg
= XNEW (struct reg_entry
);
2431 new_reg
->name
= name
;
2432 new_reg
->number
= number
;
2433 new_reg
->type
= type
;
2434 new_reg
->builtin
= FALSE
;
2435 new_reg
->neon
= NULL
;
2437 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2444 insert_neon_reg_alias (char *str
, int number
, int type
,
2445 struct neon_typed_alias
*atype
)
2447 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2451 first_error (_("attempt to redefine typed alias"));
2457 reg
->neon
= XNEW (struct neon_typed_alias
);
2458 *reg
->neon
= *atype
;
2462 /* Look for the .req directive. This is of the form:
2464 new_register_name .req existing_register_name
2466 If we find one, or if it looks sufficiently like one that we want to
2467 handle any error here, return TRUE. Otherwise return FALSE. */
2470 create_register_alias (char * newname
, char *p
)
2472 struct reg_entry
*old
;
2473 char *oldname
, *nbuf
;
2476 /* The input scrubber ensures that whitespace after the mnemonic is
2477 collapsed to single spaces. */
2479 if (strncmp (oldname
, " .req ", 6) != 0)
2483 if (*oldname
== '\0')
2486 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2489 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2493 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2494 the desired alias name, and p points to its end. If not, then
2495 the desired alias name is in the global original_case_string. */
2496 #ifdef TC_CASE_SENSITIVE
2499 newname
= original_case_string
;
2500 nlen
= strlen (newname
);
2503 nbuf
= xmemdup0 (newname
, nlen
);
2505 /* Create aliases under the new name as stated; an all-lowercase
2506 version of the new name; and an all-uppercase version of the new
2508 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2510 for (p
= nbuf
; *p
; p
++)
2513 if (strncmp (nbuf
, newname
, nlen
))
2515 /* If this attempt to create an additional alias fails, do not bother
2516 trying to create the all-lower case alias. We will fail and issue
2517 a second, duplicate error message. This situation arises when the
2518 programmer does something like:
2521 The second .req creates the "Foo" alias but then fails to create
2522 the artificial FOO alias because it has already been created by the
2524 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2531 for (p
= nbuf
; *p
; p
++)
2534 if (strncmp (nbuf
, newname
, nlen
))
2535 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2542 /* Create a Neon typed/indexed register alias using directives, e.g.:
2547 These typed registers can be used instead of the types specified after the
2548 Neon mnemonic, so long as all operands given have types. Types can also be
2549 specified directly, e.g.:
2550 vadd d0.s32, d1.s32, d2.s32 */
2553 create_neon_reg_alias (char *newname
, char *p
)
2555 enum arm_reg_type basetype
;
2556 struct reg_entry
*basereg
;
2557 struct reg_entry mybasereg
;
2558 struct neon_type ntype
;
2559 struct neon_typed_alias typeinfo
;
2560 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2563 typeinfo
.defined
= 0;
2564 typeinfo
.eltype
.type
= NT_invtype
;
2565 typeinfo
.eltype
.size
= -1;
2566 typeinfo
.index
= -1;
2570 if (strncmp (p
, " .dn ", 5) == 0)
2571 basetype
= REG_TYPE_VFD
;
2572 else if (strncmp (p
, " .qn ", 5) == 0)
2573 basetype
= REG_TYPE_NQ
;
2582 basereg
= arm_reg_parse_multi (&p
);
2584 if (basereg
&& basereg
->type
!= basetype
)
2586 as_bad (_("bad type for register"));
2590 if (basereg
== NULL
)
2593 /* Try parsing as an integer. */
2594 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2595 if (exp
.X_op
!= O_constant
)
2597 as_bad (_("expression must be constant"));
2600 basereg
= &mybasereg
;
2601 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2607 typeinfo
= *basereg
->neon
;
2609 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2611 /* We got a type. */
2612 if (typeinfo
.defined
& NTA_HASTYPE
)
2614 as_bad (_("can't redefine the type of a register alias"));
2618 typeinfo
.defined
|= NTA_HASTYPE
;
2619 if (ntype
.elems
!= 1)
2621 as_bad (_("you must specify a single type only"));
2624 typeinfo
.eltype
= ntype
.el
[0];
2627 if (skip_past_char (&p
, '[') == SUCCESS
)
2630 /* We got a scalar index. */
2632 if (typeinfo
.defined
& NTA_HASINDEX
)
2634 as_bad (_("can't redefine the index of a scalar alias"));
2638 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2640 if (exp
.X_op
!= O_constant
)
2642 as_bad (_("scalar index must be constant"));
2646 typeinfo
.defined
|= NTA_HASINDEX
;
2647 typeinfo
.index
= exp
.X_add_number
;
2649 if (skip_past_char (&p
, ']') == FAIL
)
2651 as_bad (_("expecting ]"));
2656 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2657 the desired alias name, and p points to its end. If not, then
2658 the desired alias name is in the global original_case_string. */
2659 #ifdef TC_CASE_SENSITIVE
2660 namelen
= nameend
- newname
;
2662 newname
= original_case_string
;
2663 namelen
= strlen (newname
);
2666 namebuf
= xmemdup0 (newname
, namelen
);
2668 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2669 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2671 /* Insert name in all uppercase. */
2672 for (p
= namebuf
; *p
; p
++)
2675 if (strncmp (namebuf
, newname
, namelen
))
2676 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2677 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2679 /* Insert name in all lowercase. */
2680 for (p
= namebuf
; *p
; p
++)
2683 if (strncmp (namebuf
, newname
, namelen
))
2684 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2685 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2691 /* Should never be called, as .req goes between the alias and the
2692 register name, not at the beginning of the line. */
2695 s_req (int a ATTRIBUTE_UNUSED
)
2697 as_bad (_("invalid syntax for .req directive"));
2701 s_dn (int a ATTRIBUTE_UNUSED
)
2703 as_bad (_("invalid syntax for .dn directive"));
2707 s_qn (int a ATTRIBUTE_UNUSED
)
2709 as_bad (_("invalid syntax for .qn directive"));
2712 /* The .unreq directive deletes an alias which was previously defined
2713 by .req. For example:
2719 s_unreq (int a ATTRIBUTE_UNUSED
)
2724 name
= input_line_pointer
;
2726 while (*input_line_pointer
!= 0
2727 && *input_line_pointer
!= ' '
2728 && *input_line_pointer
!= '\n')
2729 ++input_line_pointer
;
2731 saved_char
= *input_line_pointer
;
2732 *input_line_pointer
= 0;
2735 as_bad (_("invalid syntax for .unreq directive"));
2738 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2742 as_bad (_("unknown register alias '%s'"), name
);
2743 else if (reg
->builtin
)
2744 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2751 hash_delete (arm_reg_hsh
, name
, FALSE
);
2752 free ((char *) reg
->name
);
2757 /* Also locate the all upper case and all lower case versions.
2758 Do not complain if we cannot find one or the other as it
2759 was probably deleted above. */
2761 nbuf
= strdup (name
);
2762 for (p
= nbuf
; *p
; p
++)
2764 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2767 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2768 free ((char *) reg
->name
);
2774 for (p
= nbuf
; *p
; p
++)
2776 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2779 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2780 free ((char *) reg
->name
);
2790 *input_line_pointer
= saved_char
;
2791 demand_empty_rest_of_line ();
2794 /* Directives: Instruction set selection. */
2797 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2798 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2799 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2800 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2802 /* Create a new mapping symbol for the transition to STATE. */
2805 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2808 const char * symname
;
2815 type
= BSF_NO_FLAGS
;
2819 type
= BSF_NO_FLAGS
;
2823 type
= BSF_NO_FLAGS
;
2829 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2830 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2835 THUMB_SET_FUNC (symbolP
, 0);
2836 ARM_SET_THUMB (symbolP
, 0);
2837 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2841 THUMB_SET_FUNC (symbolP
, 1);
2842 ARM_SET_THUMB (symbolP
, 1);
2843 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2851 /* Save the mapping symbols for future reference. Also check that
2852 we do not place two mapping symbols at the same offset within a
2853 frag. We'll handle overlap between frags in
2854 check_mapping_symbols.
2856 If .fill or other data filling directive generates zero sized data,
2857 the mapping symbol for the following code will have the same value
2858 as the one generated for the data filling directive. In this case,
2859 we replace the old symbol with the new one at the same address. */
2862 if (frag
->tc_frag_data
.first_map
!= NULL
)
2864 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2865 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2867 frag
->tc_frag_data
.first_map
= symbolP
;
2869 if (frag
->tc_frag_data
.last_map
!= NULL
)
2871 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2872 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2873 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2875 frag
->tc_frag_data
.last_map
= symbolP
;
2878 /* We must sometimes convert a region marked as code to data during
2879 code alignment, if an odd number of bytes have to be padded. The
2880 code mapping symbol is pushed to an aligned address. */
2883 insert_data_mapping_symbol (enum mstate state
,
2884 valueT value
, fragS
*frag
, offsetT bytes
)
2886 /* If there was already a mapping symbol, remove it. */
2887 if (frag
->tc_frag_data
.last_map
!= NULL
2888 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2890 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2894 know (frag
->tc_frag_data
.first_map
== symp
);
2895 frag
->tc_frag_data
.first_map
= NULL
;
2897 frag
->tc_frag_data
.last_map
= NULL
;
2898 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2901 make_mapping_symbol (MAP_DATA
, value
, frag
);
2902 make_mapping_symbol (state
, value
+ bytes
, frag
);
2905 static void mapping_state_2 (enum mstate state
, int max_chars
);
2907 /* Set the mapping state to STATE. Only call this when about to
2908 emit some STATE bytes to the file. */
2910 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2912 mapping_state (enum mstate state
)
2914 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2916 if (mapstate
== state
)
2917 /* The mapping symbol has already been emitted.
2918 There is nothing else to do. */
2921 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2923 All ARM instructions require 4-byte alignment.
2924 (Almost) all Thumb instructions require 2-byte alignment.
2926 When emitting instructions into any section, mark the section
2929 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2930 but themselves require 2-byte alignment; this applies to some
2931 PC- relative forms. However, these cases will involve implicit
2932 literal pool generation or an explicit .align >=2, both of
2933 which will cause the section to me marked with sufficient
2934 alignment. Thus, we don't handle those cases here. */
2935 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2937 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2938 /* This case will be evaluated later. */
2941 mapping_state_2 (state
, 0);
2944 /* Same as mapping_state, but MAX_CHARS bytes have already been
2945 allocated. Put the mapping symbol that far back. */
2948 mapping_state_2 (enum mstate state
, int max_chars
)
2950 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2952 if (!SEG_NORMAL (now_seg
))
2955 if (mapstate
== state
)
2956 /* The mapping symbol has already been emitted.
2957 There is nothing else to do. */
2960 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2961 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2963 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2964 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2967 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2970 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2971 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2975 #define mapping_state(x) ((void)0)
2976 #define mapping_state_2(x, y) ((void)0)
2979 /* Find the real, Thumb encoded start of a Thumb function. */
2983 find_real_start (symbolS
* symbolP
)
2986 const char * name
= S_GET_NAME (symbolP
);
2987 symbolS
* new_target
;
2989 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2990 #define STUB_NAME ".real_start_of"
2995 /* The compiler may generate BL instructions to local labels because
2996 it needs to perform a branch to a far away location. These labels
2997 do not have a corresponding ".real_start_of" label. We check
2998 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2999 the ".real_start_of" convention for nonlocal branches. */
3000 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
3003 real_start
= concat (STUB_NAME
, name
, NULL
);
3004 new_target
= symbol_find (real_start
);
3007 if (new_target
== NULL
)
3009 as_warn (_("Failed to find real start of function: %s\n"), name
);
3010 new_target
= symbolP
;
3018 opcode_select (int width
)
3025 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
3026 as_bad (_("selected processor does not support THUMB opcodes"));
3029 /* No need to force the alignment, since we will have been
3030 coming from ARM mode, which is word-aligned. */
3031 record_alignment (now_seg
, 1);
3038 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
3039 as_bad (_("selected processor does not support ARM opcodes"));
3044 frag_align (2, 0, 0);
3046 record_alignment (now_seg
, 1);
3051 as_bad (_("invalid instruction size selected (%d)"), width
);
3056 s_arm (int ignore ATTRIBUTE_UNUSED
)
3059 demand_empty_rest_of_line ();
3063 s_thumb (int ignore ATTRIBUTE_UNUSED
)
3066 demand_empty_rest_of_line ();
3070 s_code (int unused ATTRIBUTE_UNUSED
)
3074 temp
= get_absolute_expression ();
3079 opcode_select (temp
);
3083 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
3088 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
3090 /* If we are not already in thumb mode go into it, EVEN if
3091 the target processor does not support thumb instructions.
3092 This is used by gcc/config/arm/lib1funcs.asm for example
3093 to compile interworking support functions even if the
3094 target processor should not support interworking. */
3098 record_alignment (now_seg
, 1);
3101 demand_empty_rest_of_line ();
3105 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3109 /* The following label is the name/address of the start of a Thumb function.
3110 We need to know this for the interworking support. */
3111 label_is_thumb_function_name
= TRUE
;
3114 /* Perform a .set directive, but also mark the alias as
3115 being a thumb function. */
3118 s_thumb_set (int equiv
)
3120 /* XXX the following is a duplicate of the code for s_set() in read.c
3121 We cannot just call that code as we need to get at the symbol that
3128 /* Especial apologies for the random logic:
3129 This just grew, and could be parsed much more simply!
3131 delim
= get_symbol_name (& name
);
3132 end_name
= input_line_pointer
;
3133 (void) restore_line_pointer (delim
);
3135 if (*input_line_pointer
!= ',')
3138 as_bad (_("expected comma after name \"%s\""), name
);
3140 ignore_rest_of_line ();
3144 input_line_pointer
++;
3147 if (name
[0] == '.' && name
[1] == '\0')
3149 /* XXX - this should not happen to .thumb_set. */
3153 if ((symbolP
= symbol_find (name
)) == NULL
3154 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3157 /* When doing symbol listings, play games with dummy fragments living
3158 outside the normal fragment chain to record the file and line info
3160 if (listing
& LISTING_SYMBOLS
)
3162 extern struct list_info_struct
* listing_tail
;
3163 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3165 memset (dummy_frag
, 0, sizeof (fragS
));
3166 dummy_frag
->fr_type
= rs_fill
;
3167 dummy_frag
->line
= listing_tail
;
3168 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3169 dummy_frag
->fr_symbol
= symbolP
;
3173 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3176 /* "set" symbols are local unless otherwise specified. */
3177 SF_SET_LOCAL (symbolP
);
3178 #endif /* OBJ_COFF */
3179 } /* Make a new symbol. */
3181 symbol_table_insert (symbolP
);
3186 && S_IS_DEFINED (symbolP
)
3187 && S_GET_SEGMENT (symbolP
) != reg_section
)
3188 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3190 pseudo_set (symbolP
);
3192 demand_empty_rest_of_line ();
3194 /* XXX Now we come to the Thumb specific bit of code. */
3196 THUMB_SET_FUNC (symbolP
, 1);
3197 ARM_SET_THUMB (symbolP
, 1);
3198 #if defined OBJ_ELF || defined OBJ_COFF
3199 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3203 /* Directives: Mode selection. */
3205 /* .syntax [unified|divided] - choose the new unified syntax
3206 (same for Arm and Thumb encoding, modulo slight differences in what
3207 can be represented) or the old divergent syntax for each mode. */
3209 s_syntax (int unused ATTRIBUTE_UNUSED
)
3213 delim
= get_symbol_name (& name
);
3215 if (!strcasecmp (name
, "unified"))
3216 unified_syntax
= TRUE
;
3217 else if (!strcasecmp (name
, "divided"))
3218 unified_syntax
= FALSE
;
3221 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3224 (void) restore_line_pointer (delim
);
3225 demand_empty_rest_of_line ();
3228 /* Directives: sectioning and alignment. */
3231 s_bss (int ignore ATTRIBUTE_UNUSED
)
3233 /* We don't support putting frags in the BSS segment, we fake it by
3234 marking in_bss, then looking at s_skip for clues. */
3235 subseg_set (bss_section
, 0);
3236 demand_empty_rest_of_line ();
3238 #ifdef md_elf_section_change_hook
3239 md_elf_section_change_hook ();
3244 s_even (int ignore ATTRIBUTE_UNUSED
)
3246 /* Never make frag if expect extra pass. */
3248 frag_align (1, 0, 0);
3250 record_alignment (now_seg
, 1);
3252 demand_empty_rest_of_line ();
3255 /* Directives: CodeComposer Studio. */
3257 /* .ref (for CodeComposer Studio syntax only). */
3259 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3261 if (codecomposer_syntax
)
3262 ignore_rest_of_line ();
3264 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3267 /* If name is not NULL, then it is used for marking the beginning of a
3268 function, whereas if it is NULL then it means the function end. */
3270 asmfunc_debug (const char * name
)
3272 static const char * last_name
= NULL
;
3276 gas_assert (last_name
== NULL
);
3279 if (debug_type
== DEBUG_STABS
)
3280 stabs_generate_asm_func (name
, name
);
3284 gas_assert (last_name
!= NULL
);
3286 if (debug_type
== DEBUG_STABS
)
3287 stabs_generate_asm_endfunc (last_name
, last_name
);
3294 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3296 if (codecomposer_syntax
)
3298 switch (asmfunc_state
)
3300 case OUTSIDE_ASMFUNC
:
3301 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3304 case WAITING_ASMFUNC_NAME
:
3305 as_bad (_(".asmfunc repeated."));
3308 case WAITING_ENDASMFUNC
:
3309 as_bad (_(".asmfunc without function."));
3312 demand_empty_rest_of_line ();
3315 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3319 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3321 if (codecomposer_syntax
)
3323 switch (asmfunc_state
)
3325 case OUTSIDE_ASMFUNC
:
3326 as_bad (_(".endasmfunc without a .asmfunc."));
3329 case WAITING_ASMFUNC_NAME
:
3330 as_bad (_(".endasmfunc without function."));
3333 case WAITING_ENDASMFUNC
:
3334 asmfunc_state
= OUTSIDE_ASMFUNC
;
3335 asmfunc_debug (NULL
);
3338 demand_empty_rest_of_line ();
3341 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3345 s_ccs_def (int name
)
3347 if (codecomposer_syntax
)
3350 as_bad (_(".def pseudo-op only available with -mccs flag."));
3353 /* Directives: Literal pools. */
3355 static literal_pool
*
3356 find_literal_pool (void)
3358 literal_pool
* pool
;
3360 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3362 if (pool
->section
== now_seg
3363 && pool
->sub_section
== now_subseg
)
3370 static literal_pool
*
3371 find_or_make_literal_pool (void)
3373 /* Next literal pool ID number. */
3374 static unsigned int latest_pool_num
= 1;
3375 literal_pool
* pool
;
3377 pool
= find_literal_pool ();
3381 /* Create a new pool. */
3382 pool
= XNEW (literal_pool
);
3386 pool
->next_free_entry
= 0;
3387 pool
->section
= now_seg
;
3388 pool
->sub_section
= now_subseg
;
3389 pool
->next
= list_of_pools
;
3390 pool
->symbol
= NULL
;
3391 pool
->alignment
= 2;
3393 /* Add it to the list. */
3394 list_of_pools
= pool
;
3397 /* New pools, and emptied pools, will have a NULL symbol. */
3398 if (pool
->symbol
== NULL
)
3400 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3401 (valueT
) 0, &zero_address_frag
);
3402 pool
->id
= latest_pool_num
++;
3409 /* Add the literal in the global 'inst'
3410 structure to the relevant literal pool. */
3413 add_to_lit_pool (unsigned int nbytes
)
3415 #define PADDING_SLOT 0x1
3416 #define LIT_ENTRY_SIZE_MASK 0xFF
3417 literal_pool
* pool
;
3418 unsigned int entry
, pool_size
= 0;
3419 bfd_boolean padding_slot_p
= FALSE
;
3425 imm1
= inst
.operands
[1].imm
;
3426 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3427 : inst
.relocs
[0].exp
.X_unsigned
? 0
3428 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3429 if (target_big_endian
)
3432 imm2
= inst
.operands
[1].imm
;
3436 pool
= find_or_make_literal_pool ();
3438 /* Check if this literal value is already in the pool. */
3439 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3443 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3444 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3445 && (pool
->literals
[entry
].X_add_number
3446 == inst
.relocs
[0].exp
.X_add_number
)
3447 && (pool
->literals
[entry
].X_md
== nbytes
)
3448 && (pool
->literals
[entry
].X_unsigned
3449 == inst
.relocs
[0].exp
.X_unsigned
))
3452 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3453 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3454 && (pool
->literals
[entry
].X_add_number
3455 == inst
.relocs
[0].exp
.X_add_number
)
3456 && (pool
->literals
[entry
].X_add_symbol
3457 == inst
.relocs
[0].exp
.X_add_symbol
)
3458 && (pool
->literals
[entry
].X_op_symbol
3459 == inst
.relocs
[0].exp
.X_op_symbol
)
3460 && (pool
->literals
[entry
].X_md
== nbytes
))
3463 else if ((nbytes
== 8)
3464 && !(pool_size
& 0x7)
3465 && ((entry
+ 1) != pool
->next_free_entry
)
3466 && (pool
->literals
[entry
].X_op
== O_constant
)
3467 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3468 && (pool
->literals
[entry
].X_unsigned
3469 == inst
.relocs
[0].exp
.X_unsigned
)
3470 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3471 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3472 && (pool
->literals
[entry
+ 1].X_unsigned
3473 == inst
.relocs
[0].exp
.X_unsigned
))
3476 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3477 if (padding_slot_p
&& (nbytes
== 4))
3483 /* Do we need to create a new entry? */
3484 if (entry
== pool
->next_free_entry
)
3486 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3488 inst
.error
= _("literal pool overflow");
3494 /* For 8-byte entries, we align to an 8-byte boundary,
3495 and split it into two 4-byte entries, because on 32-bit
3496 host, 8-byte constants are treated as big num, thus
3497 saved in "generic_bignum" which will be overwritten
3498 by later assignments.
3500 We also need to make sure there is enough space for
3503 We also check to make sure the literal operand is a
3505 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3506 || inst
.relocs
[0].exp
.X_op
== O_big
))
3508 inst
.error
= _("invalid type for literal pool");
3511 else if (pool_size
& 0x7)
3513 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3515 inst
.error
= _("literal pool overflow");
3519 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3520 pool
->literals
[entry
].X_op
= O_constant
;
3521 pool
->literals
[entry
].X_add_number
= 0;
3522 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3523 pool
->next_free_entry
+= 1;
3526 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3528 inst
.error
= _("literal pool overflow");
3532 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3533 pool
->literals
[entry
].X_op
= O_constant
;
3534 pool
->literals
[entry
].X_add_number
= imm1
;
3535 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3536 pool
->literals
[entry
++].X_md
= 4;
3537 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3538 pool
->literals
[entry
].X_op
= O_constant
;
3539 pool
->literals
[entry
].X_add_number
= imm2
;
3540 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3541 pool
->literals
[entry
].X_md
= 4;
3542 pool
->alignment
= 3;
3543 pool
->next_free_entry
+= 1;
3547 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3548 pool
->literals
[entry
].X_md
= 4;
3552 /* PR ld/12974: Record the location of the first source line to reference
3553 this entry in the literal pool. If it turns out during linking that the
3554 symbol does not exist we will be able to give an accurate line number for
3555 the (first use of the) missing reference. */
3556 if (debug_type
== DEBUG_DWARF2
)
3557 dwarf2_where (pool
->locs
+ entry
);
3559 pool
->next_free_entry
+= 1;
3561 else if (padding_slot_p
)
3563 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3564 pool
->literals
[entry
].X_md
= nbytes
;
3567 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3568 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3569 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3575 tc_start_label_without_colon (void)
3577 bfd_boolean ret
= TRUE
;
3579 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3581 const char *label
= input_line_pointer
;
3583 while (!is_end_of_line
[(int) label
[-1]])
3588 as_bad (_("Invalid label '%s'"), label
);
3592 asmfunc_debug (label
);
3594 asmfunc_state
= WAITING_ENDASMFUNC
;
3600 /* Can't use symbol_new here, so have to create a symbol and then at
3601 a later date assign it a value. That's what these functions do. */
3604 symbol_locate (symbolS
* symbolP
,
3605 const char * name
, /* It is copied, the caller can modify. */
3606 segT segment
, /* Segment identifier (SEG_<something>). */
3607 valueT valu
, /* Symbol value. */
3608 fragS
* frag
) /* Associated fragment. */
3611 char * preserved_copy_of_name
;
3613 name_length
= strlen (name
) + 1; /* +1 for \0. */
3614 obstack_grow (¬es
, name
, name_length
);
3615 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3617 #ifdef tc_canonicalize_symbol_name
3618 preserved_copy_of_name
=
3619 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3622 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3624 S_SET_SEGMENT (symbolP
, segment
);
3625 S_SET_VALUE (symbolP
, valu
);
3626 symbol_clear_list_pointers (symbolP
);
3628 symbol_set_frag (symbolP
, frag
);
3630 /* Link to end of symbol chain. */
3632 extern int symbol_table_frozen
;
3634 if (symbol_table_frozen
)
3638 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3640 obj_symbol_new_hook (symbolP
);
3642 #ifdef tc_symbol_new_hook
3643 tc_symbol_new_hook (symbolP
);
3647 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3648 #endif /* DEBUG_SYMS */
3652 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3655 literal_pool
* pool
;
3658 pool
= find_literal_pool ();
3660 || pool
->symbol
== NULL
3661 || pool
->next_free_entry
== 0)
3664 /* Align pool as you have word accesses.
3665 Only make a frag if we have to. */
3667 frag_align (pool
->alignment
, 0, 0);
3669 record_alignment (now_seg
, 2);
3672 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3673 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3675 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3677 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3678 (valueT
) frag_now_fix (), frag_now
);
3679 symbol_table_insert (pool
->symbol
);
3681 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3683 #if defined OBJ_COFF || defined OBJ_ELF
3684 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3687 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3690 if (debug_type
== DEBUG_DWARF2
)
3691 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3693 /* First output the expression in the instruction to the pool. */
3694 emit_expr (&(pool
->literals
[entry
]),
3695 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3698 /* Mark the pool as empty. */
3699 pool
->next_free_entry
= 0;
3700 pool
->symbol
= NULL
;
3704 /* Forward declarations for functions below, in the MD interface
3706 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3707 static valueT
create_unwind_entry (int);
3708 static void start_unwind_section (const segT
, int);
3709 static void add_unwind_opcode (valueT
, int);
3710 static void flush_pending_unwind (void);
3712 /* Directives: Data. */
3715 s_arm_elf_cons (int nbytes
)
3719 #ifdef md_flush_pending_output
3720 md_flush_pending_output ();
3723 if (is_it_end_of_statement ())
3725 demand_empty_rest_of_line ();
3729 #ifdef md_cons_align
3730 md_cons_align (nbytes
);
3733 mapping_state (MAP_DATA
);
3737 char *base
= input_line_pointer
;
3741 if (exp
.X_op
!= O_symbol
)
3742 emit_expr (&exp
, (unsigned int) nbytes
);
3745 char *before_reloc
= input_line_pointer
;
3746 reloc
= parse_reloc (&input_line_pointer
);
3749 as_bad (_("unrecognized relocation suffix"));
3750 ignore_rest_of_line ();
3753 else if (reloc
== BFD_RELOC_UNUSED
)
3754 emit_expr (&exp
, (unsigned int) nbytes
);
3757 reloc_howto_type
*howto
= (reloc_howto_type
*)
3758 bfd_reloc_type_lookup (stdoutput
,
3759 (bfd_reloc_code_real_type
) reloc
);
3760 int size
= bfd_get_reloc_size (howto
);
3762 if (reloc
== BFD_RELOC_ARM_PLT32
)
3764 as_bad (_("(plt) is only valid on branch targets"));
3765 reloc
= BFD_RELOC_UNUSED
;
3770 as_bad (ngettext ("%s relocations do not fit in %d byte",
3771 "%s relocations do not fit in %d bytes",
3773 howto
->name
, nbytes
);
3776 /* We've parsed an expression stopping at O_symbol.
3777 But there may be more expression left now that we
3778 have parsed the relocation marker. Parse it again.
3779 XXX Surely there is a cleaner way to do this. */
3780 char *p
= input_line_pointer
;
3782 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3784 memcpy (save_buf
, base
, input_line_pointer
- base
);
3785 memmove (base
+ (input_line_pointer
- before_reloc
),
3786 base
, before_reloc
- base
);
3788 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3790 memcpy (base
, save_buf
, p
- base
);
3792 offset
= nbytes
- size
;
3793 p
= frag_more (nbytes
);
3794 memset (p
, 0, nbytes
);
3795 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3796 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3802 while (*input_line_pointer
++ == ',');
3804 /* Put terminator back into stream. */
3805 input_line_pointer
--;
3806 demand_empty_rest_of_line ();
3809 /* Emit an expression containing a 32-bit thumb instruction.
3810 Implementation based on put_thumb32_insn. */
3813 emit_thumb32_expr (expressionS
* exp
)
3815 expressionS exp_high
= *exp
;
3817 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3818 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3819 exp
->X_add_number
&= 0xffff;
3820 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3823 /* Guess the instruction size based on the opcode. */
3826 thumb_insn_size (int opcode
)
3828 if ((unsigned int) opcode
< 0xe800u
)
3830 else if ((unsigned int) opcode
>= 0xe8000000u
)
3837 emit_insn (expressionS
*exp
, int nbytes
)
3841 if (exp
->X_op
== O_constant
)
3846 size
= thumb_insn_size (exp
->X_add_number
);
3850 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3852 as_bad (_(".inst.n operand too big. "\
3853 "Use .inst.w instead"));
3858 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
3859 set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN
, 0);
3861 set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3863 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3864 emit_thumb32_expr (exp
);
3866 emit_expr (exp
, (unsigned int) size
);
3868 it_fsm_post_encode ();
3872 as_bad (_("cannot determine Thumb instruction size. " \
3873 "Use .inst.n/.inst.w instead"));
3876 as_bad (_("constant expression required"));
3881 /* Like s_arm_elf_cons but do not use md_cons_align and
3882 set the mapping state to MAP_ARM/MAP_THUMB. */
3885 s_arm_elf_inst (int nbytes
)
3887 if (is_it_end_of_statement ())
3889 demand_empty_rest_of_line ();
3893 /* Calling mapping_state () here will not change ARM/THUMB,
3894 but will ensure not to be in DATA state. */
3897 mapping_state (MAP_THUMB
);
3902 as_bad (_("width suffixes are invalid in ARM mode"));
3903 ignore_rest_of_line ();
3909 mapping_state (MAP_ARM
);
3918 if (! emit_insn (& exp
, nbytes
))
3920 ignore_rest_of_line ();
3924 while (*input_line_pointer
++ == ',');
3926 /* Put terminator back into stream. */
3927 input_line_pointer
--;
3928 demand_empty_rest_of_line ();
3931 /* Parse a .rel31 directive. */
3934 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3941 if (*input_line_pointer
== '1')
3942 highbit
= 0x80000000;
3943 else if (*input_line_pointer
!= '0')
3944 as_bad (_("expected 0 or 1"));
3946 input_line_pointer
++;
3947 if (*input_line_pointer
!= ',')
3948 as_bad (_("missing comma"));
3949 input_line_pointer
++;
3951 #ifdef md_flush_pending_output
3952 md_flush_pending_output ();
3955 #ifdef md_cons_align
3959 mapping_state (MAP_DATA
);
3964 md_number_to_chars (p
, highbit
, 4);
3965 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3966 BFD_RELOC_ARM_PREL31
);
3968 demand_empty_rest_of_line ();
3971 /* Directives: AEABI stack-unwind tables. */
3973 /* Parse an unwind_fnstart directive. Simply records the current location. */
3976 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3978 demand_empty_rest_of_line ();
3979 if (unwind
.proc_start
)
3981 as_bad (_("duplicate .fnstart directive"));
3985 /* Mark the start of the function. */
3986 unwind
.proc_start
= expr_build_dot ();
3988 /* Reset the rest of the unwind info. */
3989 unwind
.opcode_count
= 0;
3990 unwind
.table_entry
= NULL
;
3991 unwind
.personality_routine
= NULL
;
3992 unwind
.personality_index
= -1;
3993 unwind
.frame_size
= 0;
3994 unwind
.fp_offset
= 0;
3995 unwind
.fp_reg
= REG_SP
;
3997 unwind
.sp_restored
= 0;
4001 /* Parse a handlerdata directive. Creates the exception handling table entry
4002 for the function. */
4005 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
4007 demand_empty_rest_of_line ();
4008 if (!unwind
.proc_start
)
4009 as_bad (MISSING_FNSTART
);
4011 if (unwind
.table_entry
)
4012 as_bad (_("duplicate .handlerdata directive"));
4014 create_unwind_entry (1);
4017 /* Parse an unwind_fnend directive. Generates the index table entry. */
4020 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
4025 unsigned int marked_pr_dependency
;
4027 demand_empty_rest_of_line ();
4029 if (!unwind
.proc_start
)
4031 as_bad (_(".fnend directive without .fnstart"));
4035 /* Add eh table entry. */
4036 if (unwind
.table_entry
== NULL
)
4037 val
= create_unwind_entry (0);
4041 /* Add index table entry. This is two words. */
4042 start_unwind_section (unwind
.saved_seg
, 1);
4043 frag_align (2, 0, 0);
4044 record_alignment (now_seg
, 2);
4046 ptr
= frag_more (8);
4048 where
= frag_now_fix () - 8;
4050 /* Self relative offset of the function start. */
4051 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
4052 BFD_RELOC_ARM_PREL31
);
4054 /* Indicate dependency on EHABI-defined personality routines to the
4055 linker, if it hasn't been done already. */
4056 marked_pr_dependency
4057 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
4058 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
4059 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
4061 static const char *const name
[] =
4063 "__aeabi_unwind_cpp_pr0",
4064 "__aeabi_unwind_cpp_pr1",
4065 "__aeabi_unwind_cpp_pr2"
4067 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
4068 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
4069 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
4070 |= 1 << unwind
.personality_index
;
4074 /* Inline exception table entry. */
4075 md_number_to_chars (ptr
+ 4, val
, 4);
4077 /* Self relative offset of the table entry. */
4078 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
4079 BFD_RELOC_ARM_PREL31
);
4081 /* Restore the original section. */
4082 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
4084 unwind
.proc_start
= NULL
;
4088 /* Parse an unwind_cantunwind directive. */
4091 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
4093 demand_empty_rest_of_line ();
4094 if (!unwind
.proc_start
)
4095 as_bad (MISSING_FNSTART
);
4097 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4098 as_bad (_("personality routine specified for cantunwind frame"));
4100 unwind
.personality_index
= -2;
4104 /* Parse a personalityindex directive. */
4107 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4111 if (!unwind
.proc_start
)
4112 as_bad (MISSING_FNSTART
);
4114 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4115 as_bad (_("duplicate .personalityindex directive"));
4119 if (exp
.X_op
!= O_constant
4120 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4122 as_bad (_("bad personality routine number"));
4123 ignore_rest_of_line ();
4127 unwind
.personality_index
= exp
.X_add_number
;
4129 demand_empty_rest_of_line ();
4133 /* Parse a personality directive. */
4136 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4140 if (!unwind
.proc_start
)
4141 as_bad (MISSING_FNSTART
);
4143 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4144 as_bad (_("duplicate .personality directive"));
4146 c
= get_symbol_name (& name
);
4147 p
= input_line_pointer
;
4149 ++ input_line_pointer
;
4150 unwind
.personality_routine
= symbol_find_or_make (name
);
4152 demand_empty_rest_of_line ();
4156 /* Parse a directive saving core registers. */
4159 s_arm_unwind_save_core (void)
4165 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4168 as_bad (_("expected register list"));
4169 ignore_rest_of_line ();
4173 demand_empty_rest_of_line ();
4175 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4176 into .unwind_save {..., sp...}. We aren't bothered about the value of
4177 ip because it is clobbered by calls. */
4178 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4179 && (range
& 0x3000) == 0x1000)
4181 unwind
.opcode_count
--;
4182 unwind
.sp_restored
= 0;
4183 range
= (range
| 0x2000) & ~0x1000;
4184 unwind
.pending_offset
= 0;
4190 /* See if we can use the short opcodes. These pop a block of up to 8
4191 registers starting with r4, plus maybe r14. */
4192 for (n
= 0; n
< 8; n
++)
4194 /* Break at the first non-saved register. */
4195 if ((range
& (1 << (n
+ 4))) == 0)
4198 /* See if there are any other bits set. */
4199 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4201 /* Use the long form. */
4202 op
= 0x8000 | ((range
>> 4) & 0xfff);
4203 add_unwind_opcode (op
, 2);
4207 /* Use the short form. */
4209 op
= 0xa8; /* Pop r14. */
4211 op
= 0xa0; /* Do not pop r14. */
4213 add_unwind_opcode (op
, 1);
4220 op
= 0xb100 | (range
& 0xf);
4221 add_unwind_opcode (op
, 2);
4224 /* Record the number of bytes pushed. */
4225 for (n
= 0; n
< 16; n
++)
4227 if (range
& (1 << n
))
4228 unwind
.frame_size
+= 4;
4233 /* Parse a directive saving FPA registers. */
4236 s_arm_unwind_save_fpa (int reg
)
4242 /* Get Number of registers to transfer. */
4243 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4246 exp
.X_op
= O_illegal
;
4248 if (exp
.X_op
!= O_constant
)
4250 as_bad (_("expected , <constant>"));
4251 ignore_rest_of_line ();
4255 num_regs
= exp
.X_add_number
;
4257 if (num_regs
< 1 || num_regs
> 4)
4259 as_bad (_("number of registers must be in the range [1:4]"));
4260 ignore_rest_of_line ();
4264 demand_empty_rest_of_line ();
4269 op
= 0xb4 | (num_regs
- 1);
4270 add_unwind_opcode (op
, 1);
4275 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4276 add_unwind_opcode (op
, 2);
4278 unwind
.frame_size
+= num_regs
* 12;
4282 /* Parse a directive saving VFP registers for ARMv6 and above. */
4285 s_arm_unwind_save_vfp_armv6 (void)
4290 int num_vfpv3_regs
= 0;
4291 int num_regs_below_16
;
4292 bfd_boolean partial_match
;
4294 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4298 as_bad (_("expected register list"));
4299 ignore_rest_of_line ();
4303 demand_empty_rest_of_line ();
4305 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4306 than FSTMX/FLDMX-style ones). */
4308 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4310 num_vfpv3_regs
= count
;
4311 else if (start
+ count
> 16)
4312 num_vfpv3_regs
= start
+ count
- 16;
4314 if (num_vfpv3_regs
> 0)
4316 int start_offset
= start
> 16 ? start
- 16 : 0;
4317 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4318 add_unwind_opcode (op
, 2);
4321 /* Generate opcode for registers numbered in the range 0 .. 15. */
4322 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4323 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4324 if (num_regs_below_16
> 0)
4326 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4327 add_unwind_opcode (op
, 2);
4330 unwind
.frame_size
+= count
* 8;
4334 /* Parse a directive saving VFP registers for pre-ARMv6. */
4337 s_arm_unwind_save_vfp (void)
4342 bfd_boolean partial_match
;
4344 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4348 as_bad (_("expected register list"));
4349 ignore_rest_of_line ();
4353 demand_empty_rest_of_line ();
4358 op
= 0xb8 | (count
- 1);
4359 add_unwind_opcode (op
, 1);
4364 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4365 add_unwind_opcode (op
, 2);
4367 unwind
.frame_size
+= count
* 8 + 4;
4371 /* Parse a directive saving iWMMXt data registers. */
4374 s_arm_unwind_save_mmxwr (void)
4382 if (*input_line_pointer
== '{')
4383 input_line_pointer
++;
4387 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4391 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4396 as_tsktsk (_("register list not in ascending order"));
4399 if (*input_line_pointer
== '-')
4401 input_line_pointer
++;
4402 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4405 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4408 else if (reg
>= hi_reg
)
4410 as_bad (_("bad register range"));
4413 for (; reg
< hi_reg
; reg
++)
4417 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4419 skip_past_char (&input_line_pointer
, '}');
4421 demand_empty_rest_of_line ();
4423 /* Generate any deferred opcodes because we're going to be looking at
4425 flush_pending_unwind ();
4427 for (i
= 0; i
< 16; i
++)
4429 if (mask
& (1 << i
))
4430 unwind
.frame_size
+= 8;
4433 /* Attempt to combine with a previous opcode. We do this because gcc
4434 likes to output separate unwind directives for a single block of
4436 if (unwind
.opcode_count
> 0)
4438 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4439 if ((i
& 0xf8) == 0xc0)
4442 /* Only merge if the blocks are contiguous. */
4445 if ((mask
& 0xfe00) == (1 << 9))
4447 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4448 unwind
.opcode_count
--;
4451 else if (i
== 6 && unwind
.opcode_count
>= 2)
4453 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4457 op
= 0xffff << (reg
- 1);
4459 && ((mask
& op
) == (1u << (reg
- 1))))
4461 op
= (1 << (reg
+ i
+ 1)) - 1;
4462 op
&= ~((1 << reg
) - 1);
4464 unwind
.opcode_count
-= 2;
4471 /* We want to generate opcodes in the order the registers have been
4472 saved, ie. descending order. */
4473 for (reg
= 15; reg
>= -1; reg
--)
4475 /* Save registers in blocks. */
4477 || !(mask
& (1 << reg
)))
4479 /* We found an unsaved reg. Generate opcodes to save the
4486 op
= 0xc0 | (hi_reg
- 10);
4487 add_unwind_opcode (op
, 1);
4492 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4493 add_unwind_opcode (op
, 2);
4502 ignore_rest_of_line ();
4506 s_arm_unwind_save_mmxwcg (void)
4513 if (*input_line_pointer
== '{')
4514 input_line_pointer
++;
4516 skip_whitespace (input_line_pointer
);
4520 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4524 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4530 as_tsktsk (_("register list not in ascending order"));
4533 if (*input_line_pointer
== '-')
4535 input_line_pointer
++;
4536 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4539 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4542 else if (reg
>= hi_reg
)
4544 as_bad (_("bad register range"));
4547 for (; reg
< hi_reg
; reg
++)
4551 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4553 skip_past_char (&input_line_pointer
, '}');
4555 demand_empty_rest_of_line ();
4557 /* Generate any deferred opcodes because we're going to be looking at
4559 flush_pending_unwind ();
4561 for (reg
= 0; reg
< 16; reg
++)
4563 if (mask
& (1 << reg
))
4564 unwind
.frame_size
+= 4;
4567 add_unwind_opcode (op
, 2);
4570 ignore_rest_of_line ();
4574 /* Parse an unwind_save directive.
4575 If the argument is non-zero, this is a .vsave directive. */
4578 s_arm_unwind_save (int arch_v6
)
4581 struct reg_entry
*reg
;
4582 bfd_boolean had_brace
= FALSE
;
4584 if (!unwind
.proc_start
)
4585 as_bad (MISSING_FNSTART
);
4587 /* Figure out what sort of save we have. */
4588 peek
= input_line_pointer
;
4596 reg
= arm_reg_parse_multi (&peek
);
4600 as_bad (_("register expected"));
4601 ignore_rest_of_line ();
4610 as_bad (_("FPA .unwind_save does not take a register list"));
4611 ignore_rest_of_line ();
4614 input_line_pointer
= peek
;
4615 s_arm_unwind_save_fpa (reg
->number
);
4619 s_arm_unwind_save_core ();
4624 s_arm_unwind_save_vfp_armv6 ();
4626 s_arm_unwind_save_vfp ();
4629 case REG_TYPE_MMXWR
:
4630 s_arm_unwind_save_mmxwr ();
4633 case REG_TYPE_MMXWCG
:
4634 s_arm_unwind_save_mmxwcg ();
4638 as_bad (_(".unwind_save does not support this kind of register"));
4639 ignore_rest_of_line ();
4644 /* Parse an unwind_movsp directive. */
4647 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4653 if (!unwind
.proc_start
)
4654 as_bad (MISSING_FNSTART
);
4656 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4659 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4660 ignore_rest_of_line ();
4664 /* Optional constant. */
4665 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4667 if (immediate_for_directive (&offset
) == FAIL
)
4673 demand_empty_rest_of_line ();
4675 if (reg
== REG_SP
|| reg
== REG_PC
)
4677 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4681 if (unwind
.fp_reg
!= REG_SP
)
4682 as_bad (_("unexpected .unwind_movsp directive"));
4684 /* Generate opcode to restore the value. */
4686 add_unwind_opcode (op
, 1);
4688 /* Record the information for later. */
4689 unwind
.fp_reg
= reg
;
4690 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4691 unwind
.sp_restored
= 1;
4694 /* Parse an unwind_pad directive. */
4697 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4701 if (!unwind
.proc_start
)
4702 as_bad (MISSING_FNSTART
);
4704 if (immediate_for_directive (&offset
) == FAIL
)
4709 as_bad (_("stack increment must be multiple of 4"));
4710 ignore_rest_of_line ();
4714 /* Don't generate any opcodes, just record the details for later. */
4715 unwind
.frame_size
+= offset
;
4716 unwind
.pending_offset
+= offset
;
4718 demand_empty_rest_of_line ();
4721 /* Parse an unwind_setfp directive. */
4724 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4730 if (!unwind
.proc_start
)
4731 as_bad (MISSING_FNSTART
);
4733 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4734 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4737 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4739 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4741 as_bad (_("expected <reg>, <reg>"));
4742 ignore_rest_of_line ();
4746 /* Optional constant. */
4747 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4749 if (immediate_for_directive (&offset
) == FAIL
)
4755 demand_empty_rest_of_line ();
4757 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4759 as_bad (_("register must be either sp or set by a previous"
4760 "unwind_movsp directive"));
4764 /* Don't generate any opcodes, just record the information for later. */
4765 unwind
.fp_reg
= fp_reg
;
4767 if (sp_reg
== REG_SP
)
4768 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4770 unwind
.fp_offset
-= offset
;
4773 /* Parse an unwind_raw directive. */
4776 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4779 /* This is an arbitrary limit. */
4780 unsigned char op
[16];
4783 if (!unwind
.proc_start
)
4784 as_bad (MISSING_FNSTART
);
4787 if (exp
.X_op
== O_constant
4788 && skip_past_comma (&input_line_pointer
) != FAIL
)
4790 unwind
.frame_size
+= exp
.X_add_number
;
4794 exp
.X_op
= O_illegal
;
4796 if (exp
.X_op
!= O_constant
)
4798 as_bad (_("expected <offset>, <opcode>"));
4799 ignore_rest_of_line ();
4805 /* Parse the opcode. */
4810 as_bad (_("unwind opcode too long"));
4811 ignore_rest_of_line ();
4813 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4815 as_bad (_("invalid unwind opcode"));
4816 ignore_rest_of_line ();
4819 op
[count
++] = exp
.X_add_number
;
4821 /* Parse the next byte. */
4822 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4828 /* Add the opcode bytes in reverse order. */
4830 add_unwind_opcode (op
[count
], 1);
4832 demand_empty_rest_of_line ();
4836 /* Parse a .eabi_attribute directive. */
4839 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4841 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4843 if (tag
>= 0 && tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4844 attributes_set_explicitly
[tag
] = 1;
4847 /* Emit a tls fix for the symbol. */
4850 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4854 #ifdef md_flush_pending_output
4855 md_flush_pending_output ();
4858 #ifdef md_cons_align
4862 /* Since we're just labelling the code, there's no need to define a
4865 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4866 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4867 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4868 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4870 #endif /* OBJ_ELF */
4872 static void s_arm_arch (int);
4873 static void s_arm_object_arch (int);
4874 static void s_arm_cpu (int);
4875 static void s_arm_fpu (int);
4876 static void s_arm_arch_extension (int);
4881 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4888 if (exp
.X_op
== O_symbol
)
4889 exp
.X_op
= O_secrel
;
4891 emit_expr (&exp
, 4);
4893 while (*input_line_pointer
++ == ',');
4895 input_line_pointer
--;
4896 demand_empty_rest_of_line ();
4900 /* This table describes all the machine specific pseudo-ops the assembler
4901 has to support. The fields are:
4902 pseudo-op name without dot
4903 function to call to execute this pseudo-op
4904 Integer arg to pass to the function. */
4906 const pseudo_typeS md_pseudo_table
[] =
4908 /* Never called because '.req' does not start a line. */
4909 { "req", s_req
, 0 },
4910 /* Following two are likewise never called. */
4913 { "unreq", s_unreq
, 0 },
4914 { "bss", s_bss
, 0 },
4915 { "align", s_align_ptwo
, 2 },
4916 { "arm", s_arm
, 0 },
4917 { "thumb", s_thumb
, 0 },
4918 { "code", s_code
, 0 },
4919 { "force_thumb", s_force_thumb
, 0 },
4920 { "thumb_func", s_thumb_func
, 0 },
4921 { "thumb_set", s_thumb_set
, 0 },
4922 { "even", s_even
, 0 },
4923 { "ltorg", s_ltorg
, 0 },
4924 { "pool", s_ltorg
, 0 },
4925 { "syntax", s_syntax
, 0 },
4926 { "cpu", s_arm_cpu
, 0 },
4927 { "arch", s_arm_arch
, 0 },
4928 { "object_arch", s_arm_object_arch
, 0 },
4929 { "fpu", s_arm_fpu
, 0 },
4930 { "arch_extension", s_arm_arch_extension
, 0 },
4932 { "word", s_arm_elf_cons
, 4 },
4933 { "long", s_arm_elf_cons
, 4 },
4934 { "inst.n", s_arm_elf_inst
, 2 },
4935 { "inst.w", s_arm_elf_inst
, 4 },
4936 { "inst", s_arm_elf_inst
, 0 },
4937 { "rel31", s_arm_rel31
, 0 },
4938 { "fnstart", s_arm_unwind_fnstart
, 0 },
4939 { "fnend", s_arm_unwind_fnend
, 0 },
4940 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4941 { "personality", s_arm_unwind_personality
, 0 },
4942 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4943 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4944 { "save", s_arm_unwind_save
, 0 },
4945 { "vsave", s_arm_unwind_save
, 1 },
4946 { "movsp", s_arm_unwind_movsp
, 0 },
4947 { "pad", s_arm_unwind_pad
, 0 },
4948 { "setfp", s_arm_unwind_setfp
, 0 },
4949 { "unwind_raw", s_arm_unwind_raw
, 0 },
4950 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4951 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4955 /* These are used for dwarf. */
4959 /* These are used for dwarf2. */
4960 { "file", dwarf2_directive_file
, 0 },
4961 { "loc", dwarf2_directive_loc
, 0 },
4962 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4964 { "extend", float_cons
, 'x' },
4965 { "ldouble", float_cons
, 'x' },
4966 { "packed", float_cons
, 'p' },
4968 {"secrel32", pe_directive_secrel
, 0},
4971 /* These are for compatibility with CodeComposer Studio. */
4972 {"ref", s_ccs_ref
, 0},
4973 {"def", s_ccs_def
, 0},
4974 {"asmfunc", s_ccs_asmfunc
, 0},
4975 {"endasmfunc", s_ccs_endasmfunc
, 0},
4980 /* Parser functions used exclusively in instruction operands. */
4982 /* Generic immediate-value read function for use in insn parsing.
4983 STR points to the beginning of the immediate (the leading #);
4984 VAL receives the value; if the value is outside [MIN, MAX]
4985 issue an error. PREFIX_OPT is true if the immediate prefix is
4989 parse_immediate (char **str
, int *val
, int min
, int max
,
4990 bfd_boolean prefix_opt
)
4994 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4995 if (exp
.X_op
!= O_constant
)
4997 inst
.error
= _("constant expression required");
5001 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
5003 inst
.error
= _("immediate value out of range");
5007 *val
= exp
.X_add_number
;
5011 /* Less-generic immediate-value read function with the possibility of loading a
5012 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5013 instructions. Puts the result directly in inst.operands[i]. */
5016 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
5017 bfd_boolean allow_symbol_p
)
5020 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
5023 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
5025 if (exp_p
->X_op
== O_constant
)
5027 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
5028 /* If we're on a 64-bit host, then a 64-bit number can be returned using
5029 O_constant. We have to be careful not to break compilation for
5030 32-bit X_add_number, though. */
5031 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
5033 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
5034 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
5036 inst
.operands
[i
].regisimm
= 1;
5039 else if (exp_p
->X_op
== O_big
5040 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
5042 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
5044 /* Bignums have their least significant bits in
5045 generic_bignum[0]. Make sure we put 32 bits in imm and
5046 32 bits in reg, in a (hopefully) portable way. */
5047 gas_assert (parts
!= 0);
5049 /* Make sure that the number is not too big.
5050 PR 11972: Bignums can now be sign-extended to the
5051 size of a .octa so check that the out of range bits
5052 are all zero or all one. */
5053 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
5055 LITTLENUM_TYPE m
= -1;
5057 if (generic_bignum
[parts
* 2] != 0
5058 && generic_bignum
[parts
* 2] != m
)
5061 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
5062 if (generic_bignum
[j
] != generic_bignum
[j
-1])
5066 inst
.operands
[i
].imm
= 0;
5067 for (j
= 0; j
< parts
; j
++, idx
++)
5068 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
5069 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5070 inst
.operands
[i
].reg
= 0;
5071 for (j
= 0; j
< parts
; j
++, idx
++)
5072 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
5073 << (LITTLENUM_NUMBER_OF_BITS
* j
);
5074 inst
.operands
[i
].regisimm
= 1;
5076 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
5084 /* Returns the pseudo-register number of an FPA immediate constant,
5085 or FAIL if there isn't a valid constant here. */
5088 parse_fpa_immediate (char ** str
)
5090 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5096 /* First try and match exact strings, this is to guarantee
5097 that some formats will work even for cross assembly. */
5099 for (i
= 0; fp_const
[i
]; i
++)
5101 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5105 *str
+= strlen (fp_const
[i
]);
5106 if (is_end_of_line
[(unsigned char) **str
])
5112 /* Just because we didn't get a match doesn't mean that the constant
5113 isn't valid, just that it is in a format that we don't
5114 automatically recognize. Try parsing it with the standard
5115 expression routines. */
5117 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5119 /* Look for a raw floating point number. */
5120 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5121 && is_end_of_line
[(unsigned char) *save_in
])
5123 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5125 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5127 if (words
[j
] != fp_values
[i
][j
])
5131 if (j
== MAX_LITTLENUMS
)
5139 /* Try and parse a more complex expression, this will probably fail
5140 unless the code uses a floating point prefix (eg "0f"). */
5141 save_in
= input_line_pointer
;
5142 input_line_pointer
= *str
;
5143 if (expression (&exp
) == absolute_section
5144 && exp
.X_op
== O_big
5145 && exp
.X_add_number
< 0)
5147 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5149 #define X_PRECISION 5
5150 #define E_PRECISION 15L
5151 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5153 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5155 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5157 if (words
[j
] != fp_values
[i
][j
])
5161 if (j
== MAX_LITTLENUMS
)
5163 *str
= input_line_pointer
;
5164 input_line_pointer
= save_in
;
5171 *str
= input_line_pointer
;
5172 input_line_pointer
= save_in
;
5173 inst
.error
= _("invalid FPA immediate expression");
5177 /* Returns 1 if a number has "quarter-precision" float format
5178 0baBbbbbbc defgh000 00000000 00000000. */
5181 is_quarter_float (unsigned imm
)
5183 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5184 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5188 /* Detect the presence of a floating point or integer zero constant,
5192 parse_ifimm_zero (char **in
)
5196 if (!is_immediate_prefix (**in
))
5198 /* In unified syntax, all prefixes are optional. */
5199 if (!unified_syntax
)
5205 /* Accept #0x0 as a synonym for #0. */
5206 if (strncmp (*in
, "0x", 2) == 0)
5209 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5214 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5215 &generic_floating_point_number
);
5218 && generic_floating_point_number
.sign
== '+'
5219 && (generic_floating_point_number
.low
5220 > generic_floating_point_number
.leader
))
5226 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5227 0baBbbbbbc defgh000 00000000 00000000.
5228 The zero and minus-zero cases need special handling, since they can't be
5229 encoded in the "quarter-precision" float format, but can nonetheless be
5230 loaded as integer constants. */
5233 parse_qfloat_immediate (char **ccp
, int *immed
)
5237 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5238 int found_fpchar
= 0;
5240 skip_past_char (&str
, '#');
5242 /* We must not accidentally parse an integer as a floating-point number. Make
5243 sure that the value we parse is not an integer by checking for special
5244 characters '.' or 'e'.
5245 FIXME: This is a horrible hack, but doing better is tricky because type
5246 information isn't in a very usable state at parse time. */
5248 skip_whitespace (fpnum
);
5250 if (strncmp (fpnum
, "0x", 2) == 0)
5254 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5255 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5265 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5267 unsigned fpword
= 0;
5270 /* Our FP word must be 32 bits (single-precision FP). */
5271 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5273 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5277 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5290 /* Shift operands. */
5293 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
, SHIFT_UXTW
5296 struct asm_shift_name
5299 enum shift_kind kind
;
5302 /* Third argument to parse_shift. */
5303 enum parse_shift_mode
5305 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5306 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5307 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5308 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5309 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5310 SHIFT_UXTW_IMMEDIATE
/* Shift must be UXTW immediate. */
5313 /* Parse a <shift> specifier on an ARM data processing instruction.
5314 This has three forms:
5316 (LSL|LSR|ASL|ASR|ROR) Rs
5317 (LSL|LSR|ASL|ASR|ROR) #imm
5320 Note that ASL is assimilated to LSL in the instruction encoding, and
5321 RRX to ROR #0 (which cannot be written as such). */
5324 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5326 const struct asm_shift_name
*shift_name
;
5327 enum shift_kind shift
;
5332 for (p
= *str
; ISALPHA (*p
); p
++)
5337 inst
.error
= _("shift expression expected");
5341 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5344 if (shift_name
== NULL
)
5346 inst
.error
= _("shift expression expected");
5350 shift
= shift_name
->kind
;
5354 case NO_SHIFT_RESTRICT
:
5355 case SHIFT_IMMEDIATE
:
5356 if (shift
== SHIFT_UXTW
)
5358 inst
.error
= _("'UXTW' not allowed here");
5363 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5364 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5366 inst
.error
= _("'LSL' or 'ASR' required");
5371 case SHIFT_LSL_IMMEDIATE
:
5372 if (shift
!= SHIFT_LSL
)
5374 inst
.error
= _("'LSL' required");
5379 case SHIFT_ASR_IMMEDIATE
:
5380 if (shift
!= SHIFT_ASR
)
5382 inst
.error
= _("'ASR' required");
5386 case SHIFT_UXTW_IMMEDIATE
:
5387 if (shift
!= SHIFT_UXTW
)
5389 inst
.error
= _("'UXTW' required");
5397 if (shift
!= SHIFT_RRX
)
5399 /* Whitespace can appear here if the next thing is a bare digit. */
5400 skip_whitespace (p
);
5402 if (mode
== NO_SHIFT_RESTRICT
5403 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5405 inst
.operands
[i
].imm
= reg
;
5406 inst
.operands
[i
].immisreg
= 1;
5408 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5411 inst
.operands
[i
].shift_kind
= shift
;
5412 inst
.operands
[i
].shifted
= 1;
5417 /* Parse a <shifter_operand> for an ARM data processing instruction:
5420 #<immediate>, <rotate>
5424 where <shift> is defined by parse_shift above, and <rotate> is a
5425 multiple of 2 between 0 and 30. Validation of immediate operands
5426 is deferred to md_apply_fix. */
5429 parse_shifter_operand (char **str
, int i
)
5434 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5436 inst
.operands
[i
].reg
= value
;
5437 inst
.operands
[i
].isreg
= 1;
5439 /* parse_shift will override this if appropriate */
5440 inst
.relocs
[0].exp
.X_op
= O_constant
;
5441 inst
.relocs
[0].exp
.X_add_number
= 0;
5443 if (skip_past_comma (str
) == FAIL
)
5446 /* Shift operation on register. */
5447 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5450 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5453 if (skip_past_comma (str
) == SUCCESS
)
5455 /* #x, y -- ie explicit rotation by Y. */
5456 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5459 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5461 inst
.error
= _("constant expression expected");
5465 value
= exp
.X_add_number
;
5466 if (value
< 0 || value
> 30 || value
% 2 != 0)
5468 inst
.error
= _("invalid rotation");
5471 if (inst
.relocs
[0].exp
.X_add_number
< 0
5472 || inst
.relocs
[0].exp
.X_add_number
> 255)
5474 inst
.error
= _("invalid constant");
5478 /* Encode as specified. */
5479 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5483 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5484 inst
.relocs
[0].pc_rel
= 0;
5488 /* Group relocation information. Each entry in the table contains the
5489 textual name of the relocation as may appear in assembler source
5490 and must end with a colon.
5491 Along with this textual name are the relocation codes to be used if
5492 the corresponding instruction is an ALU instruction (ADD or SUB only),
5493 an LDR, an LDRS, or an LDC. */
5495 struct group_reloc_table_entry
5506 /* Varieties of non-ALU group relocation. */
5514 static struct group_reloc_table_entry group_reloc_table
[] =
5515 { /* Program counter relative: */
5517 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5522 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5523 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5524 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5525 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5527 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5532 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5533 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5534 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5535 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5537 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5538 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5539 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5540 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5541 /* Section base relative */
5543 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5548 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5549 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5550 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5551 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5553 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5558 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5559 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5560 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5561 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5563 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5564 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5565 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5566 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5567 /* Absolute thumb alu relocations. */
5569 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5574 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5579 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5584 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5589 /* Given the address of a pointer pointing to the textual name of a group
5590 relocation as may appear in assembler source, attempt to find its details
5591 in group_reloc_table. The pointer will be updated to the character after
5592 the trailing colon. On failure, FAIL will be returned; SUCCESS
5593 otherwise. On success, *entry will be updated to point at the relevant
5594 group_reloc_table entry. */
5597 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5600 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5602 int length
= strlen (group_reloc_table
[i
].name
);
5604 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5605 && (*str
)[length
] == ':')
5607 *out
= &group_reloc_table
[i
];
5608 *str
+= (length
+ 1);
5616 /* Parse a <shifter_operand> for an ARM data processing instruction
5617 (as for parse_shifter_operand) where group relocations are allowed:
5620 #<immediate>, <rotate>
5621 #:<group_reloc>:<expression>
5625 where <group_reloc> is one of the strings defined in group_reloc_table.
5626 The hashes are optional.
5628 Everything else is as for parse_shifter_operand. */
5630 static parse_operand_result
5631 parse_shifter_operand_group_reloc (char **str
, int i
)
5633 /* Determine if we have the sequence of characters #: or just :
5634 coming next. If we do, then we check for a group relocation.
5635 If we don't, punt the whole lot to parse_shifter_operand. */
5637 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5638 || (*str
)[0] == ':')
5640 struct group_reloc_table_entry
*entry
;
5642 if ((*str
)[0] == '#')
5647 /* Try to parse a group relocation. Anything else is an error. */
5648 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5650 inst
.error
= _("unknown group relocation");
5651 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5654 /* We now have the group relocation table entry corresponding to
5655 the name in the assembler source. Next, we parse the expression. */
5656 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5657 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5659 /* Record the relocation type (always the ALU variant here). */
5660 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5661 gas_assert (inst
.relocs
[0].type
!= 0);
5663 return PARSE_OPERAND_SUCCESS
;
5666 return parse_shifter_operand (str
, i
) == SUCCESS
5667 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5669 /* Never reached. */
5672 /* Parse a Neon alignment expression. Information is written to
5673 inst.operands[i]. We assume the initial ':' has been skipped.
5675 align .imm = align << 8, .immisalign=1, .preind=0 */
5676 static parse_operand_result
5677 parse_neon_alignment (char **str
, int i
)
5682 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5684 if (exp
.X_op
!= O_constant
)
5686 inst
.error
= _("alignment must be constant");
5687 return PARSE_OPERAND_FAIL
;
5690 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5691 inst
.operands
[i
].immisalign
= 1;
5692 /* Alignments are not pre-indexes. */
5693 inst
.operands
[i
].preind
= 0;
5696 return PARSE_OPERAND_SUCCESS
;
5699 /* Parse all forms of an ARM address expression. Information is written
5700 to inst.operands[i] and/or inst.relocs[0].
5702 Preindexed addressing (.preind=1):
5704 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5705 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5706 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5707 .shift_kind=shift .relocs[0].exp=shift_imm
5709 These three may have a trailing ! which causes .writeback to be set also.
5711 Postindexed addressing (.postind=1, .writeback=1):
5713 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5714 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5715 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5716 .shift_kind=shift .relocs[0].exp=shift_imm
5718 Unindexed addressing (.preind=0, .postind=0):
5720 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5724 [Rn]{!} shorthand for [Rn,#0]{!}
5725 =immediate .isreg=0 .relocs[0].exp=immediate
5726 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5728 It is the caller's responsibility to check for addressing modes not
5729 supported by the instruction, and to set inst.relocs[0].type. */
5731 static parse_operand_result
5732 parse_address_main (char **str
, int i
, int group_relocations
,
5733 group_reloc_type group_type
)
5738 if (skip_past_char (&p
, '[') == FAIL
)
5740 if (skip_past_char (&p
, '=') == FAIL
)
5742 /* Bare address - translate to PC-relative offset. */
5743 inst
.relocs
[0].pc_rel
= 1;
5744 inst
.operands
[i
].reg
= REG_PC
;
5745 inst
.operands
[i
].isreg
= 1;
5746 inst
.operands
[i
].preind
= 1;
5748 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5749 return PARSE_OPERAND_FAIL
;
5751 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5752 /*allow_symbol_p=*/TRUE
))
5753 return PARSE_OPERAND_FAIL
;
5756 return PARSE_OPERAND_SUCCESS
;
5759 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5760 skip_whitespace (p
);
5762 if (group_type
== GROUP_MVE
)
5764 enum arm_reg_type rtype
= REG_TYPE_MQ
;
5765 struct neon_type_el et
;
5766 if ((reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
5768 inst
.operands
[i
].isquad
= 1;
5770 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5772 inst
.error
= BAD_ADDR_MODE
;
5773 return PARSE_OPERAND_FAIL
;
5776 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5778 if (group_type
== GROUP_MVE
)
5779 inst
.error
= BAD_ADDR_MODE
;
5781 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5782 return PARSE_OPERAND_FAIL
;
5784 inst
.operands
[i
].reg
= reg
;
5785 inst
.operands
[i
].isreg
= 1;
5787 if (skip_past_comma (&p
) == SUCCESS
)
5789 inst
.operands
[i
].preind
= 1;
5792 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5794 enum arm_reg_type rtype
= REG_TYPE_MQ
;
5795 struct neon_type_el et
;
5796 if (group_type
== GROUP_MVE
5797 && (reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
5799 inst
.operands
[i
].immisreg
= 2;
5800 inst
.operands
[i
].imm
= reg
;
5802 if (skip_past_comma (&p
) == SUCCESS
)
5804 if (parse_shift (&p
, i
, SHIFT_UXTW_IMMEDIATE
) == SUCCESS
)
5806 inst
.operands
[i
].imm
|= inst
.relocs
[0].exp
.X_add_number
<< 5;
5807 inst
.relocs
[0].exp
.X_add_number
= 0;
5810 return PARSE_OPERAND_FAIL
;
5813 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5815 inst
.operands
[i
].imm
= reg
;
5816 inst
.operands
[i
].immisreg
= 1;
5818 if (skip_past_comma (&p
) == SUCCESS
)
5819 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5820 return PARSE_OPERAND_FAIL
;
5822 else if (skip_past_char (&p
, ':') == SUCCESS
)
5824 /* FIXME: '@' should be used here, but it's filtered out by generic
5825 code before we get to see it here. This may be subject to
5827 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5829 if (result
!= PARSE_OPERAND_SUCCESS
)
5834 if (inst
.operands
[i
].negative
)
5836 inst
.operands
[i
].negative
= 0;
5840 if (group_relocations
5841 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5843 struct group_reloc_table_entry
*entry
;
5845 /* Skip over the #: or : sequence. */
5851 /* Try to parse a group relocation. Anything else is an
5853 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5855 inst
.error
= _("unknown group relocation");
5856 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5859 /* We now have the group relocation table entry corresponding to
5860 the name in the assembler source. Next, we parse the
5862 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5863 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5865 /* Record the relocation type. */
5870 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5875 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5880 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5887 if (inst
.relocs
[0].type
== 0)
5889 inst
.error
= _("this group relocation is not allowed on this instruction");
5890 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5897 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5898 return PARSE_OPERAND_FAIL
;
5899 /* If the offset is 0, find out if it's a +0 or -0. */
5900 if (inst
.relocs
[0].exp
.X_op
== O_constant
5901 && inst
.relocs
[0].exp
.X_add_number
== 0)
5903 skip_whitespace (q
);
5907 skip_whitespace (q
);
5910 inst
.operands
[i
].negative
= 1;
5915 else if (skip_past_char (&p
, ':') == SUCCESS
)
5917 /* FIXME: '@' should be used here, but it's filtered out by generic code
5918 before we get to see it here. This may be subject to change. */
5919 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5921 if (result
!= PARSE_OPERAND_SUCCESS
)
5925 if (skip_past_char (&p
, ']') == FAIL
)
5927 inst
.error
= _("']' expected");
5928 return PARSE_OPERAND_FAIL
;
5931 if (skip_past_char (&p
, '!') == SUCCESS
)
5932 inst
.operands
[i
].writeback
= 1;
5934 else if (skip_past_comma (&p
) == SUCCESS
)
5936 if (skip_past_char (&p
, '{') == SUCCESS
)
5938 /* [Rn], {expr} - unindexed, with option */
5939 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5940 0, 255, TRUE
) == FAIL
)
5941 return PARSE_OPERAND_FAIL
;
5943 if (skip_past_char (&p
, '}') == FAIL
)
5945 inst
.error
= _("'}' expected at end of 'option' field");
5946 return PARSE_OPERAND_FAIL
;
5948 if (inst
.operands
[i
].preind
)
5950 inst
.error
= _("cannot combine index with option");
5951 return PARSE_OPERAND_FAIL
;
5954 return PARSE_OPERAND_SUCCESS
;
5958 inst
.operands
[i
].postind
= 1;
5959 inst
.operands
[i
].writeback
= 1;
5961 if (inst
.operands
[i
].preind
)
5963 inst
.error
= _("cannot combine pre- and post-indexing");
5964 return PARSE_OPERAND_FAIL
;
5968 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5970 enum arm_reg_type rtype
= REG_TYPE_MQ
;
5971 struct neon_type_el et
;
5972 if (group_type
== GROUP_MVE
5973 && (reg
= arm_typed_reg_parse (&p
, rtype
, &rtype
, &et
)) != FAIL
)
5975 inst
.operands
[i
].immisreg
= 2;
5976 inst
.operands
[i
].imm
= reg
;
5978 else if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5980 /* We might be using the immediate for alignment already. If we
5981 are, OR the register number into the low-order bits. */
5982 if (inst
.operands
[i
].immisalign
)
5983 inst
.operands
[i
].imm
|= reg
;
5985 inst
.operands
[i
].imm
= reg
;
5986 inst
.operands
[i
].immisreg
= 1;
5988 if (skip_past_comma (&p
) == SUCCESS
)
5989 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5990 return PARSE_OPERAND_FAIL
;
5996 if (inst
.operands
[i
].negative
)
5998 inst
.operands
[i
].negative
= 0;
6001 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
6002 return PARSE_OPERAND_FAIL
;
6003 /* If the offset is 0, find out if it's a +0 or -0. */
6004 if (inst
.relocs
[0].exp
.X_op
== O_constant
6005 && inst
.relocs
[0].exp
.X_add_number
== 0)
6007 skip_whitespace (q
);
6011 skip_whitespace (q
);
6014 inst
.operands
[i
].negative
= 1;
6020 /* If at this point neither .preind nor .postind is set, we have a
6021 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
6022 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
6024 inst
.operands
[i
].preind
= 1;
6025 inst
.relocs
[0].exp
.X_op
= O_constant
;
6026 inst
.relocs
[0].exp
.X_add_number
= 0;
6029 return PARSE_OPERAND_SUCCESS
;
6033 parse_address (char **str
, int i
)
6035 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
6039 static parse_operand_result
6040 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
6042 return parse_address_main (str
, i
, 1, type
);
6045 /* Parse an operand for a MOVW or MOVT instruction. */
6047 parse_half (char **str
)
6052 skip_past_char (&p
, '#');
6053 if (strncasecmp (p
, ":lower16:", 9) == 0)
6054 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
6055 else if (strncasecmp (p
, ":upper16:", 9) == 0)
6056 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
6058 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
6061 skip_whitespace (p
);
6064 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
6067 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
6069 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
6071 inst
.error
= _("constant expression expected");
6074 if (inst
.relocs
[0].exp
.X_add_number
< 0
6075 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
6077 inst
.error
= _("immediate value out of range");
6085 /* Miscellaneous. */
6087 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
6088 or a bitmask suitable to be or-ed into the ARM msr instruction. */
6090 parse_psr (char **str
, bfd_boolean lhs
)
6093 unsigned long psr_field
;
6094 const struct asm_psr
*psr
;
6096 bfd_boolean is_apsr
= FALSE
;
6097 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
6099 /* PR gas/12698: If the user has specified -march=all then m_profile will
6100 be TRUE, but we want to ignore it in this case as we are building for any
6101 CPU type, including non-m variants. */
6102 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
6105 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
6106 feature for ease of use and backwards compatibility. */
6108 if (strncasecmp (p
, "SPSR", 4) == 0)
6111 goto unsupported_psr
;
6113 psr_field
= SPSR_BIT
;
6115 else if (strncasecmp (p
, "CPSR", 4) == 0)
6118 goto unsupported_psr
;
6122 else if (strncasecmp (p
, "APSR", 4) == 0)
6124 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6125 and ARMv7-R architecture CPUs. */
6134 while (ISALNUM (*p
) || *p
== '_');
6136 if (strncasecmp (start
, "iapsr", 5) == 0
6137 || strncasecmp (start
, "eapsr", 5) == 0
6138 || strncasecmp (start
, "xpsr", 4) == 0
6139 || strncasecmp (start
, "psr", 3) == 0)
6140 p
= start
+ strcspn (start
, "rR") + 1;
6142 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
6148 /* If APSR is being written, a bitfield may be specified. Note that
6149 APSR itself is handled above. */
6150 if (psr
->field
<= 3)
6152 psr_field
= psr
->field
;
6158 /* M-profile MSR instructions have the mask field set to "10", except
6159 *PSR variants which modify APSR, which may use a different mask (and
6160 have been handled already). Do that by setting the PSR_f field
6162 return psr
->field
| (lhs
? PSR_f
: 0);
6165 goto unsupported_psr
;
6171 /* A suffix follows. */
6177 while (ISALNUM (*p
) || *p
== '_');
6181 /* APSR uses a notation for bits, rather than fields. */
6182 unsigned int nzcvq_bits
= 0;
6183 unsigned int g_bit
= 0;
6186 for (bit
= start
; bit
!= p
; bit
++)
6188 switch (TOLOWER (*bit
))
6191 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6195 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6199 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6203 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6207 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6211 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6215 inst
.error
= _("unexpected bit specified after APSR");
6220 if (nzcvq_bits
== 0x1f)
6225 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6227 inst
.error
= _("selected processor does not "
6228 "support DSP extension");
6235 if ((nzcvq_bits
& 0x20) != 0
6236 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6237 || (g_bit
& 0x2) != 0)
6239 inst
.error
= _("bad bitmask specified after APSR");
6245 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6250 psr_field
|= psr
->field
;
6256 goto error
; /* Garbage after "[CS]PSR". */
6258 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6259 is deprecated, but allow it anyway. */
6263 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6266 else if (!m_profile
)
6267 /* These bits are never right for M-profile devices: don't set them
6268 (only code paths which read/write APSR reach here). */
6269 psr_field
|= (PSR_c
| PSR_f
);
6275 inst
.error
= _("selected processor does not support requested special "
6276 "purpose register");
6280 inst
.error
= _("flag for {c}psr instruction expected");
6285 parse_sys_vldr_vstr (char **str
)
6294 {"FPSCR", 0x1, 0x0},
6295 {"FPSCR_nzcvqc", 0x2, 0x0},
6298 {"FPCXTNS", 0x6, 0x1},
6299 {"FPCXTS", 0x7, 0x1}
6301 char *op_end
= strchr (*str
, ',');
6302 size_t op_strlen
= op_end
- *str
;
6304 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6306 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6308 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6317 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6318 value suitable for splatting into the AIF field of the instruction. */
6321 parse_cps_flags (char **str
)
6330 case '\0': case ',':
6333 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6334 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6335 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6338 inst
.error
= _("unrecognized CPS flag");
6343 if (saw_a_flag
== 0)
6345 inst
.error
= _("missing CPS flags");
6353 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6354 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6357 parse_endian_specifier (char **str
)
6362 if (strncasecmp (s
, "BE", 2))
6364 else if (strncasecmp (s
, "LE", 2))
6368 inst
.error
= _("valid endian specifiers are be or le");
6372 if (ISALNUM (s
[2]) || s
[2] == '_')
6374 inst
.error
= _("valid endian specifiers are be or le");
6379 return little_endian
;
6382 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6383 value suitable for poking into the rotate field of an sxt or sxta
6384 instruction, or FAIL on error. */
6387 parse_ror (char **str
)
6392 if (strncasecmp (s
, "ROR", 3) == 0)
6396 inst
.error
= _("missing rotation field after comma");
6400 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6405 case 0: *str
= s
; return 0x0;
6406 case 8: *str
= s
; return 0x1;
6407 case 16: *str
= s
; return 0x2;
6408 case 24: *str
= s
; return 0x3;
6411 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6416 /* Parse a conditional code (from conds[] below). The value returned is in the
6417 range 0 .. 14, or FAIL. */
6419 parse_cond (char **str
)
6422 const struct asm_cond
*c
;
6424 /* Condition codes are always 2 characters, so matching up to
6425 3 characters is sufficient. */
6430 while (ISALPHA (*q
) && n
< 3)
6432 cond
[n
] = TOLOWER (*q
);
6437 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6440 inst
.error
= _("condition required");
6448 /* Parse an option for a barrier instruction. Returns the encoding for the
6451 parse_barrier (char **str
)
6454 const struct asm_barrier_opt
*o
;
6457 while (ISALPHA (*q
))
6460 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6465 if (!mark_feature_used (&o
->arch
))
6472 /* Parse the operands of a table branch instruction. Similar to a memory
6475 parse_tb (char **str
)
6480 if (skip_past_char (&p
, '[') == FAIL
)
6482 inst
.error
= _("'[' expected");
6486 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6488 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6491 inst
.operands
[0].reg
= reg
;
6493 if (skip_past_comma (&p
) == FAIL
)
6495 inst
.error
= _("',' expected");
6499 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6501 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6504 inst
.operands
[0].imm
= reg
;
6506 if (skip_past_comma (&p
) == SUCCESS
)
6508 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6510 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6512 inst
.error
= _("invalid shift");
6515 inst
.operands
[0].shifted
= 1;
6518 if (skip_past_char (&p
, ']') == FAIL
)
6520 inst
.error
= _("']' expected");
6527 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6528 information on the types the operands can take and how they are encoded.
6529 Up to four operands may be read; this function handles setting the
6530 ".present" field for each read operand itself.
6531 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6532 else returns FAIL. */
6535 parse_neon_mov (char **str
, int *which_operand
)
6537 int i
= *which_operand
, val
;
6538 enum arm_reg_type rtype
;
6540 struct neon_type_el optype
;
6542 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6544 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6545 inst
.operands
[i
].reg
= val
;
6546 inst
.operands
[i
].isscalar
= 1;
6547 inst
.operands
[i
].vectype
= optype
;
6548 inst
.operands
[i
++].present
= 1;
6550 if (skip_past_comma (&ptr
) == FAIL
)
6553 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6556 inst
.operands
[i
].reg
= val
;
6557 inst
.operands
[i
].isreg
= 1;
6558 inst
.operands
[i
].present
= 1;
6560 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6563 /* Cases 0, 1, 2, 3, 5 (D only). */
6564 if (skip_past_comma (&ptr
) == FAIL
)
6567 inst
.operands
[i
].reg
= val
;
6568 inst
.operands
[i
].isreg
= 1;
6569 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6570 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6571 inst
.operands
[i
].isvec
= 1;
6572 inst
.operands
[i
].vectype
= optype
;
6573 inst
.operands
[i
++].present
= 1;
6575 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6577 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6578 Case 13: VMOV <Sd>, <Rm> */
6579 inst
.operands
[i
].reg
= val
;
6580 inst
.operands
[i
].isreg
= 1;
6581 inst
.operands
[i
].present
= 1;
6583 if (rtype
== REG_TYPE_NQ
)
6585 first_error (_("can't use Neon quad register here"));
6588 else if (rtype
!= REG_TYPE_VFS
)
6591 if (skip_past_comma (&ptr
) == FAIL
)
6593 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6595 inst
.operands
[i
].reg
= val
;
6596 inst
.operands
[i
].isreg
= 1;
6597 inst
.operands
[i
].present
= 1;
6600 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6603 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6604 Case 1: VMOV<c><q> <Dd>, <Dm>
6605 Case 8: VMOV.F32 <Sd>, <Sm>
6606 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6608 inst
.operands
[i
].reg
= val
;
6609 inst
.operands
[i
].isreg
= 1;
6610 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6611 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6612 inst
.operands
[i
].isvec
= 1;
6613 inst
.operands
[i
].vectype
= optype
;
6614 inst
.operands
[i
].present
= 1;
6616 if (skip_past_comma (&ptr
) == SUCCESS
)
6621 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6624 inst
.operands
[i
].reg
= val
;
6625 inst
.operands
[i
].isreg
= 1;
6626 inst
.operands
[i
++].present
= 1;
6628 if (skip_past_comma (&ptr
) == FAIL
)
6631 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6634 inst
.operands
[i
].reg
= val
;
6635 inst
.operands
[i
].isreg
= 1;
6636 inst
.operands
[i
].present
= 1;
6639 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6640 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6641 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6642 Case 10: VMOV.F32 <Sd>, #<imm>
6643 Case 11: VMOV.F64 <Dd>, #<imm> */
6644 inst
.operands
[i
].immisfloat
= 1;
6645 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6647 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6648 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6652 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6656 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6659 inst
.operands
[i
].reg
= val
;
6660 inst
.operands
[i
].isreg
= 1;
6661 inst
.operands
[i
++].present
= 1;
6663 if (skip_past_comma (&ptr
) == FAIL
)
6666 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6668 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6669 inst
.operands
[i
].reg
= val
;
6670 inst
.operands
[i
].isscalar
= 1;
6671 inst
.operands
[i
].present
= 1;
6672 inst
.operands
[i
].vectype
= optype
;
6674 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6676 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6677 inst
.operands
[i
].reg
= val
;
6678 inst
.operands
[i
].isreg
= 1;
6679 inst
.operands
[i
++].present
= 1;
6681 if (skip_past_comma (&ptr
) == FAIL
)
6684 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6687 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6691 inst
.operands
[i
].reg
= val
;
6692 inst
.operands
[i
].isreg
= 1;
6693 inst
.operands
[i
].isvec
= 1;
6694 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6695 inst
.operands
[i
].vectype
= optype
;
6696 inst
.operands
[i
].present
= 1;
6698 if (rtype
== REG_TYPE_VFS
)
6702 if (skip_past_comma (&ptr
) == FAIL
)
6704 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6707 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6710 inst
.operands
[i
].reg
= val
;
6711 inst
.operands
[i
].isreg
= 1;
6712 inst
.operands
[i
].isvec
= 1;
6713 inst
.operands
[i
].issingle
= 1;
6714 inst
.operands
[i
].vectype
= optype
;
6715 inst
.operands
[i
].present
= 1;
6718 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6722 inst
.operands
[i
].reg
= val
;
6723 inst
.operands
[i
].isreg
= 1;
6724 inst
.operands
[i
].isvec
= 1;
6725 inst
.operands
[i
].issingle
= 1;
6726 inst
.operands
[i
].vectype
= optype
;
6727 inst
.operands
[i
].present
= 1;
6732 first_error (_("parse error"));
6736 /* Successfully parsed the operands. Update args. */
6742 first_error (_("expected comma"));
6746 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6750 /* Use this macro when the operand constraints are different
6751 for ARM and THUMB (e.g. ldrd). */
6752 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6753 ((arm_operand) | ((thumb_operand) << 16))
6755 /* Matcher codes for parse_operands. */
6756 enum operand_parse_code
6758 OP_stop
, /* end of line */
6760 OP_RR
, /* ARM register */
6761 OP_RRnpc
, /* ARM register, not r15 */
6762 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6763 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6764 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6765 optional trailing ! */
6766 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6767 OP_RCP
, /* Coprocessor number */
6768 OP_RCN
, /* Coprocessor register */
6769 OP_RF
, /* FPA register */
6770 OP_RVS
, /* VFP single precision register */
6771 OP_RVD
, /* VFP double precision register (0..15) */
6772 OP_RND
, /* Neon double precision register (0..31) */
6773 OP_RNDMQ
, /* Neon double precision (0..31) or MVE vector register. */
6774 OP_RNDMQR
, /* Neon double precision (0..31), MVE vector or ARM register.
6776 OP_RNQ
, /* Neon quad precision register */
6777 OP_RNQMQ
, /* Neon quad or MVE vector register. */
6778 OP_RVSD
, /* VFP single or double precision register */
6779 OP_RVSDMQ
, /* VFP single, double precision or MVE vector register. */
6780 OP_RNSD
, /* Neon single or double precision register */
6781 OP_RNDQ
, /* Neon double or quad precision register */
6782 OP_RNDQMQ
, /* Neon double, quad or MVE vector register. */
6783 OP_RNSDQ
, /* Neon single, double or quad precision register */
6784 OP_RNSC
, /* Neon scalar D[X] */
6785 OP_RVC
, /* VFP control register */
6786 OP_RMF
, /* Maverick F register */
6787 OP_RMD
, /* Maverick D register */
6788 OP_RMFX
, /* Maverick FX register */
6789 OP_RMDX
, /* Maverick DX register */
6790 OP_RMAX
, /* Maverick AX register */
6791 OP_RMDS
, /* Maverick DSPSC register */
6792 OP_RIWR
, /* iWMMXt wR register */
6793 OP_RIWC
, /* iWMMXt wC register */
6794 OP_RIWG
, /* iWMMXt wCG register */
6795 OP_RXA
, /* XScale accumulator register */
6797 OP_RNSDQMQ
, /* Neon single, double or quad register or MVE vector register
6799 OP_RNSDQMQR
, /* Neon single, double or quad register, MVE vector register or
6801 OP_RMQ
, /* MVE vector register. */
6803 /* New operands for Armv8.1-M Mainline. */
6804 OP_LR
, /* ARM LR register */
6805 OP_RRe
, /* ARM register, only even numbered. */
6806 OP_RRo
, /* ARM register, only odd numbered, not r13 or r15. */
6807 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6809 OP_REGLST
, /* ARM register list */
6810 OP_CLRMLST
, /* CLRM register list */
6811 OP_VRSLST
, /* VFP single-precision register list */
6812 OP_VRDLST
, /* VFP double-precision register list */
6813 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6814 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6815 OP_NSTRLST
, /* Neon element/structure list */
6816 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
6817 OP_MSTRLST2
, /* MVE vector list with two elements. */
6818 OP_MSTRLST4
, /* MVE vector list with four elements. */
6820 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6821 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6822 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6823 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6824 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6825 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6826 OP_RNSDQ_RNSC_MQ
, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
6828 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6829 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6830 OP_VMOV
, /* Neon VMOV operands. */
6831 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6832 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6833 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6834 OP_VLDR
, /* VLDR operand. */
6836 OP_I0
, /* immediate zero */
6837 OP_I7
, /* immediate value 0 .. 7 */
6838 OP_I15
, /* 0 .. 15 */
6839 OP_I16
, /* 1 .. 16 */
6840 OP_I16z
, /* 0 .. 16 */
6841 OP_I31
, /* 0 .. 31 */
6842 OP_I31w
, /* 0 .. 31, optional trailing ! */
6843 OP_I32
, /* 1 .. 32 */
6844 OP_I32z
, /* 0 .. 32 */
6845 OP_I63
, /* 0 .. 63 */
6846 OP_I63s
, /* -64 .. 63 */
6847 OP_I64
, /* 1 .. 64 */
6848 OP_I64z
, /* 0 .. 64 */
6849 OP_I255
, /* 0 .. 255 */
6851 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6852 OP_I7b
, /* 0 .. 7 */
6853 OP_I15b
, /* 0 .. 15 */
6854 OP_I31b
, /* 0 .. 31 */
6856 OP_SH
, /* shifter operand */
6857 OP_SHG
, /* shifter operand with possible group relocation */
6858 OP_ADDR
, /* Memory address expression (any mode) */
6859 OP_ADDRMVE
, /* Memory address expression for MVE's VSTR/VLDR. */
6860 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6861 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6862 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6863 OP_EXP
, /* arbitrary expression */
6864 OP_EXPi
, /* same, with optional immediate prefix */
6865 OP_EXPr
, /* same, with optional relocation suffix */
6866 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6867 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6868 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6869 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6871 OP_CPSF
, /* CPS flags */
6872 OP_ENDI
, /* Endianness specifier */
6873 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6874 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6875 OP_COND
, /* conditional code */
6876 OP_TB
, /* Table branch. */
6878 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6880 OP_RRnpc_I0
, /* ARM register or literal 0 */
6881 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6882 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6883 OP_RF_IF
, /* FPA register or immediate */
6884 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6885 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6887 /* Optional operands. */
6888 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6889 OP_oI31b
, /* 0 .. 31 */
6890 OP_oI32b
, /* 1 .. 32 */
6891 OP_oI32z
, /* 0 .. 32 */
6892 OP_oIffffb
, /* 0 .. 65535 */
6893 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6895 OP_oRR
, /* ARM register */
6896 OP_oLR
, /* ARM LR register */
6897 OP_oRRnpc
, /* ARM register, not the PC */
6898 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6899 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6900 OP_oRND
, /* Optional Neon double precision register */
6901 OP_oRNQ
, /* Optional Neon quad precision register */
6902 OP_oRNDQMQ
, /* Optional Neon double, quad or MVE vector register. */
6903 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6904 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6905 OP_oRNSDQMQ
, /* Optional single, double or quad register or MVE vector
6907 OP_oSHll
, /* LSL immediate */
6908 OP_oSHar
, /* ASR immediate */
6909 OP_oSHllar
, /* LSL or ASR immediate */
6910 OP_oROR
, /* ROR 0/8/16/24 */
6911 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6913 /* Some pre-defined mixed (ARM/THUMB) operands. */
6914 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6915 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6916 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6918 OP_FIRST_OPTIONAL
= OP_oI7b
6921 /* Generic instruction operand parser. This does no encoding and no
6922 semantic validation; it merely squirrels values away in the inst
6923 structure. Returns SUCCESS or FAIL depending on whether the
6924 specified grammar matched. */
6926 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6928 unsigned const int *upat
= pattern
;
6929 char *backtrack_pos
= 0;
6930 const char *backtrack_error
= 0;
6931 int i
, val
= 0, backtrack_index
= 0;
6932 enum arm_reg_type rtype
;
6933 parse_operand_result result
;
6934 unsigned int op_parse_code
;
6935 bfd_boolean partial_match
;
6937 #define po_char_or_fail(chr) \
6940 if (skip_past_char (&str, chr) == FAIL) \
6945 #define po_reg_or_fail(regtype) \
6948 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6949 & inst.operands[i].vectype); \
6952 first_error (_(reg_expected_msgs[regtype])); \
6955 inst.operands[i].reg = val; \
6956 inst.operands[i].isreg = 1; \
6957 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6958 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6959 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6960 || rtype == REG_TYPE_VFD \
6961 || rtype == REG_TYPE_NQ); \
6965 #define po_reg_or_goto(regtype, label) \
6968 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6969 & inst.operands[i].vectype); \
6973 inst.operands[i].reg = val; \
6974 inst.operands[i].isreg = 1; \
6975 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6976 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6977 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6978 || rtype == REG_TYPE_VFD \
6979 || rtype == REG_TYPE_NQ); \
6983 #define po_imm_or_fail(min, max, popt) \
6986 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6988 inst.operands[i].imm = val; \
6992 #define po_scalar_or_goto(elsz, label) \
6995 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6998 inst.operands[i].reg = val; \
6999 inst.operands[i].isscalar = 1; \
7003 #define po_misc_or_fail(expr) \
7011 #define po_misc_or_fail_no_backtrack(expr) \
7015 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
7016 backtrack_pos = 0; \
7017 if (result != PARSE_OPERAND_SUCCESS) \
7022 #define po_barrier_or_imm(str) \
7025 val = parse_barrier (&str); \
7026 if (val == FAIL && ! ISALPHA (*str)) \
7029 /* ISB can only take SY as an option. */ \
7030 || ((inst.instruction & 0xf0) == 0x60 \
7033 inst.error = _("invalid barrier type"); \
7034 backtrack_pos = 0; \
7040 skip_whitespace (str
);
7042 for (i
= 0; upat
[i
] != OP_stop
; i
++)
7044 op_parse_code
= upat
[i
];
7045 if (op_parse_code
>= 1<<16)
7046 op_parse_code
= thumb
? (op_parse_code
>> 16)
7047 : (op_parse_code
& ((1<<16)-1));
7049 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
7051 /* Remember where we are in case we need to backtrack. */
7052 backtrack_pos
= str
;
7053 backtrack_error
= inst
.error
;
7054 backtrack_index
= i
;
7057 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
7058 po_char_or_fail (',');
7060 switch (op_parse_code
)
7072 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
7073 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
7074 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
7075 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
7076 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
7077 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
7080 po_reg_or_goto (REG_TYPE_RN
, try_rndmq
);
7084 po_reg_or_goto (REG_TYPE_MQ
, try_rnd
);
7087 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
7089 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
7091 /* Also accept generic coprocessor regs for unknown registers. */
7093 po_reg_or_fail (REG_TYPE_CN
);
7095 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
7096 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
7097 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
7098 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
7099 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
7100 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
7101 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
7102 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
7103 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
7104 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
7107 po_reg_or_goto (REG_TYPE_MQ
, try_nq
);
7110 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
7111 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
7114 po_reg_or_goto (REG_TYPE_MQ
, try_rndq
);
7118 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
7120 po_reg_or_goto (REG_TYPE_MQ
, try_rvsd
);
7123 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
7125 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
7127 po_reg_or_goto (REG_TYPE_RN
, try_mq
);
7132 po_reg_or_goto (REG_TYPE_MQ
, try_nsdq2
);
7135 po_reg_or_fail (REG_TYPE_NSDQ
);
7139 po_reg_or_fail (REG_TYPE_MQ
);
7141 /* Neon scalar. Using an element size of 8 means that some invalid
7142 scalars are accepted here, so deal with those in later code. */
7143 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
7147 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
7150 po_imm_or_fail (0, 0, TRUE
);
7155 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
7160 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
7163 if (parse_ifimm_zero (&str
))
7164 inst
.operands
[i
].imm
= 0;
7168 = _("only floating point zero is allowed as immediate value");
7176 po_scalar_or_goto (8, try_rr
);
7179 po_reg_or_fail (REG_TYPE_RN
);
7183 case OP_RNSDQ_RNSC_MQ
:
7184 po_reg_or_goto (REG_TYPE_MQ
, try_rnsdq_rnsc
);
7189 po_scalar_or_goto (8, try_nsdq
);
7192 po_reg_or_fail (REG_TYPE_NSDQ
);
7198 po_scalar_or_goto (8, try_s_scalar
);
7201 po_scalar_or_goto (4, try_nsd
);
7204 po_reg_or_fail (REG_TYPE_NSD
);
7210 po_scalar_or_goto (8, try_ndq
);
7213 po_reg_or_fail (REG_TYPE_NDQ
);
7219 po_scalar_or_goto (8, try_vfd
);
7222 po_reg_or_fail (REG_TYPE_VFD
);
7227 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7228 not careful then bad things might happen. */
7229 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7234 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7237 /* There's a possibility of getting a 64-bit immediate here, so
7238 we need special handling. */
7239 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
7242 inst
.error
= _("immediate value is out of range");
7250 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7253 po_imm_or_fail (0, 63, TRUE
);
7258 po_char_or_fail ('[');
7259 po_reg_or_fail (REG_TYPE_RN
);
7260 po_char_or_fail (']');
7266 po_reg_or_fail (REG_TYPE_RN
);
7267 if (skip_past_char (&str
, '!') == SUCCESS
)
7268 inst
.operands
[i
].writeback
= 1;
7272 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
7273 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
7274 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
7275 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
7276 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
7277 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
7278 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7279 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7280 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7281 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7282 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7283 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7285 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7287 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7288 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7290 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7291 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7292 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7293 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7295 /* Immediate variants */
7297 po_char_or_fail ('{');
7298 po_imm_or_fail (0, 255, TRUE
);
7299 po_char_or_fail ('}');
7303 /* The expression parser chokes on a trailing !, so we have
7304 to find it first and zap it. */
7307 while (*s
&& *s
!= ',')
7312 inst
.operands
[i
].writeback
= 1;
7314 po_imm_or_fail (0, 31, TRUE
);
7322 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7327 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7332 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7334 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7336 val
= parse_reloc (&str
);
7339 inst
.error
= _("unrecognized relocation suffix");
7342 else if (val
!= BFD_RELOC_UNUSED
)
7344 inst
.operands
[i
].imm
= val
;
7345 inst
.operands
[i
].hasreloc
= 1;
7351 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7353 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7355 inst
.operands
[i
].hasreloc
= 1;
7357 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7359 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7360 inst
.operands
[i
].hasreloc
= 0;
7364 /* Operand for MOVW or MOVT. */
7366 po_misc_or_fail (parse_half (&str
));
7369 /* Register or expression. */
7370 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7371 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7373 /* Register or immediate. */
7374 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7375 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7377 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7379 if (!is_immediate_prefix (*str
))
7382 val
= parse_fpa_immediate (&str
);
7385 /* FPA immediates are encoded as registers 8-15.
7386 parse_fpa_immediate has already applied the offset. */
7387 inst
.operands
[i
].reg
= val
;
7388 inst
.operands
[i
].isreg
= 1;
7391 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7392 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7394 /* Two kinds of register. */
7397 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7399 || (rege
->type
!= REG_TYPE_MMXWR
7400 && rege
->type
!= REG_TYPE_MMXWC
7401 && rege
->type
!= REG_TYPE_MMXWCG
))
7403 inst
.error
= _("iWMMXt data or control register expected");
7406 inst
.operands
[i
].reg
= rege
->number
;
7407 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7413 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7415 || (rege
->type
!= REG_TYPE_MMXWC
7416 && rege
->type
!= REG_TYPE_MMXWCG
))
7418 inst
.error
= _("iWMMXt control register expected");
7421 inst
.operands
[i
].reg
= rege
->number
;
7422 inst
.operands
[i
].isreg
= 1;
7427 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7428 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7429 case OP_oROR
: val
= parse_ror (&str
); break;
7430 case OP_COND
: val
= parse_cond (&str
); break;
7431 case OP_oBARRIER_I15
:
7432 po_barrier_or_imm (str
); break;
7434 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7440 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7441 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7443 inst
.error
= _("Banked registers are not available with this "
7449 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7453 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7456 val
= parse_sys_vldr_vstr (&str
);
7460 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7463 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7465 if (strncasecmp (str
, "APSR_", 5) == 0)
7472 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7473 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7474 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7475 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7476 default: found
= 16;
7480 inst
.operands
[i
].isvec
= 1;
7481 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7482 inst
.operands
[i
].reg
= REG_PC
;
7489 po_misc_or_fail (parse_tb (&str
));
7492 /* Register lists. */
7494 val
= parse_reg_list (&str
, REGLIST_RN
);
7497 inst
.operands
[i
].writeback
= 1;
7503 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7507 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7512 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7517 /* Allow Q registers too. */
7518 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7519 REGLIST_NEON_D
, &partial_match
);
7523 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7524 REGLIST_VFP_S
, &partial_match
);
7525 inst
.operands
[i
].issingle
= 1;
7530 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7531 REGLIST_VFP_D_VPR
, &partial_match
);
7532 if (val
== FAIL
&& !partial_match
)
7535 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7536 REGLIST_VFP_S_VPR
, &partial_match
);
7537 inst
.operands
[i
].issingle
= 1;
7542 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7543 REGLIST_NEON_D
, &partial_match
);
7548 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7549 1, &inst
.operands
[i
].vectype
);
7550 if (val
!= (((op_parse_code
== OP_MSTRLST2
) ? 3 : 7) << 5 | 0xe))
7554 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7555 0, &inst
.operands
[i
].vectype
);
7558 /* Addressing modes */
7560 po_misc_or_fail (parse_address_group_reloc (&str
, i
, GROUP_MVE
));
7564 po_misc_or_fail (parse_address (&str
, i
));
7568 po_misc_or_fail_no_backtrack (
7569 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7573 po_misc_or_fail_no_backtrack (
7574 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7578 po_misc_or_fail_no_backtrack (
7579 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7583 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7587 po_misc_or_fail_no_backtrack (
7588 parse_shifter_operand_group_reloc (&str
, i
));
7592 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7596 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7600 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7604 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7607 /* Various value-based sanity checks and shared operations. We
7608 do not signal immediate failures for the register constraints;
7609 this allows a syntax error to take precedence. */
7610 switch (op_parse_code
)
7618 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7619 inst
.error
= BAD_PC
;
7624 if (inst
.operands
[i
].isreg
)
7626 if (inst
.operands
[i
].reg
== REG_PC
)
7627 inst
.error
= BAD_PC
;
7628 else if (inst
.operands
[i
].reg
== REG_SP
7629 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7630 relaxed since ARMv8-A. */
7631 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7634 inst
.error
= BAD_SP
;
7640 if (inst
.operands
[i
].isreg
7641 && inst
.operands
[i
].reg
== REG_PC
7642 && (inst
.operands
[i
].writeback
|| thumb
))
7643 inst
.error
= BAD_PC
;
7647 if (inst
.operands
[i
].isreg
)
7656 case OP_oBARRIER_I15
:
7669 inst
.operands
[i
].imm
= val
;
7674 if (inst
.operands
[i
].reg
!= REG_LR
)
7675 inst
.error
= _("operand must be LR register");
7679 if (inst
.operands
[i
].isreg
7680 && (inst
.operands
[i
].reg
& 0x00000001) != 0)
7681 inst
.error
= BAD_ODD
;
7685 if (inst
.operands
[i
].isreg
)
7687 if ((inst
.operands
[i
].reg
& 0x00000001) != 1)
7688 inst
.error
= BAD_EVEN
;
7689 else if (inst
.operands
[i
].reg
== REG_SP
)
7690 as_tsktsk (MVE_BAD_SP
);
7691 else if (inst
.operands
[i
].reg
== REG_PC
)
7692 inst
.error
= BAD_PC
;
7700 /* If we get here, this operand was successfully parsed. */
7701 inst
.operands
[i
].present
= 1;
7705 inst
.error
= BAD_ARGS
;
7710 /* The parse routine should already have set inst.error, but set a
7711 default here just in case. */
7713 inst
.error
= BAD_SYNTAX
;
7717 /* Do not backtrack over a trailing optional argument that
7718 absorbed some text. We will only fail again, with the
7719 'garbage following instruction' error message, which is
7720 probably less helpful than the current one. */
7721 if (backtrack_index
== i
&& backtrack_pos
!= str
7722 && upat
[i
+1] == OP_stop
)
7725 inst
.error
= BAD_SYNTAX
;
7729 /* Try again, skipping the optional argument at backtrack_pos. */
7730 str
= backtrack_pos
;
7731 inst
.error
= backtrack_error
;
7732 inst
.operands
[backtrack_index
].present
= 0;
7733 i
= backtrack_index
;
7737 /* Check that we have parsed all the arguments. */
7738 if (*str
!= '\0' && !inst
.error
)
7739 inst
.error
= _("garbage following instruction");
7741 return inst
.error
? FAIL
: SUCCESS
;
7744 #undef po_char_or_fail
7745 #undef po_reg_or_fail
7746 #undef po_reg_or_goto
7747 #undef po_imm_or_fail
7748 #undef po_scalar_or_fail
7749 #undef po_barrier_or_imm
7751 /* Shorthand macro for instruction encoding functions issuing errors. */
7752 #define constraint(expr, err) \
7763 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7764 instructions are unpredictable if these registers are used. This
7765 is the BadReg predicate in ARM's Thumb-2 documentation.
7767 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7768 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7769 #define reject_bad_reg(reg) \
7771 if (reg == REG_PC) \
7773 inst.error = BAD_PC; \
7776 else if (reg == REG_SP \
7777 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7779 inst.error = BAD_SP; \
7784 /* If REG is R13 (the stack pointer), warn that its use is
7786 #define warn_deprecated_sp(reg) \
7788 if (warn_on_deprecated && reg == REG_SP) \
7789 as_tsktsk (_("use of r13 is deprecated")); \
7792 /* Functions for operand encoding. ARM, then Thumb. */
7794 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7796 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7798 The only binary encoding difference is the Coprocessor number. Coprocessor
7799 9 is used for half-precision calculations or conversions. The format of the
7800 instruction is the same as the equivalent Coprocessor 10 instruction that
7801 exists for Single-Precision operation. */
7804 do_scalar_fp16_v82_encode (void)
7806 if (inst
.cond
< COND_ALWAYS
)
7807 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7808 " the behaviour is UNPREDICTABLE"));
7809 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7812 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7813 mark_feature_used (&arm_ext_fp16
);
7816 /* If VAL can be encoded in the immediate field of an ARM instruction,
7817 return the encoded form. Otherwise, return FAIL. */
7820 encode_arm_immediate (unsigned int val
)
7827 for (i
= 2; i
< 32; i
+= 2)
7828 if ((a
= rotate_left (val
, i
)) <= 0xff)
7829 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7834 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7835 return the encoded form. Otherwise, return FAIL. */
7837 encode_thumb32_immediate (unsigned int val
)
7844 for (i
= 1; i
<= 24; i
++)
7847 if ((val
& ~(0xff << i
)) == 0)
7848 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7852 if (val
== ((a
<< 16) | a
))
7854 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7858 if (val
== ((a
<< 16) | a
))
7859 return 0x200 | (a
>> 8);
7863 /* Encode a VFP SP or DP register number into inst.instruction. */
7866 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7868 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7871 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7874 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7877 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7882 first_error (_("D register out of range for selected VFP version"));
7890 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7894 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7898 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7902 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7906 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7910 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7918 /* Encode a <shift> in an ARM-format instruction. The immediate,
7919 if any, is handled by md_apply_fix. */
7921 encode_arm_shift (int i
)
7923 /* register-shifted register. */
7924 if (inst
.operands
[i
].immisreg
)
7927 for (op_index
= 0; op_index
<= i
; ++op_index
)
7929 /* Check the operand only when it's presented. In pre-UAL syntax,
7930 if the destination register is the same as the first operand, two
7931 register form of the instruction can be used. */
7932 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7933 && inst
.operands
[op_index
].reg
== REG_PC
)
7934 as_warn (UNPRED_REG ("r15"));
7937 if (inst
.operands
[i
].imm
== REG_PC
)
7938 as_warn (UNPRED_REG ("r15"));
7941 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7942 inst
.instruction
|= SHIFT_ROR
<< 5;
7945 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7946 if (inst
.operands
[i
].immisreg
)
7948 inst
.instruction
|= SHIFT_BY_REG
;
7949 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7952 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7957 encode_arm_shifter_operand (int i
)
7959 if (inst
.operands
[i
].isreg
)
7961 inst
.instruction
|= inst
.operands
[i
].reg
;
7962 encode_arm_shift (i
);
7966 inst
.instruction
|= INST_IMMEDIATE
;
7967 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7968 inst
.instruction
|= inst
.operands
[i
].imm
;
7972 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7974 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7977 Generate an error if the operand is not a register. */
7978 constraint (!inst
.operands
[i
].isreg
,
7979 _("Instruction does not support =N addresses"));
7981 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7983 if (inst
.operands
[i
].preind
)
7987 inst
.error
= _("instruction does not accept preindexed addressing");
7990 inst
.instruction
|= PRE_INDEX
;
7991 if (inst
.operands
[i
].writeback
)
7992 inst
.instruction
|= WRITE_BACK
;
7995 else if (inst
.operands
[i
].postind
)
7997 gas_assert (inst
.operands
[i
].writeback
);
7999 inst
.instruction
|= WRITE_BACK
;
8001 else /* unindexed - only for coprocessor */
8003 inst
.error
= _("instruction does not accept unindexed addressing");
8007 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
8008 && (((inst
.instruction
& 0x000f0000) >> 16)
8009 == ((inst
.instruction
& 0x0000f000) >> 12)))
8010 as_warn ((inst
.instruction
& LOAD_BIT
)
8011 ? _("destination register same as write-back base")
8012 : _("source register same as write-back base"));
8015 /* inst.operands[i] was set up by parse_address. Encode it into an
8016 ARM-format mode 2 load or store instruction. If is_t is true,
8017 reject forms that cannot be used with a T instruction (i.e. not
8020 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
8022 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
8024 encode_arm_addr_mode_common (i
, is_t
);
8026 if (inst
.operands
[i
].immisreg
)
8028 constraint ((inst
.operands
[i
].imm
== REG_PC
8029 || (is_pc
&& inst
.operands
[i
].writeback
)),
8031 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
8032 inst
.instruction
|= inst
.operands
[i
].imm
;
8033 if (!inst
.operands
[i
].negative
)
8034 inst
.instruction
|= INDEX_UP
;
8035 if (inst
.operands
[i
].shifted
)
8037 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
8038 inst
.instruction
|= SHIFT_ROR
<< 5;
8041 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
8042 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
8046 else /* immediate offset in inst.relocs[0] */
8048 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
8050 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
8052 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
8053 cannot use PC in addressing.
8054 PC cannot be used in writeback addressing, either. */
8055 constraint ((is_t
|| inst
.operands
[i
].writeback
),
8058 /* Use of PC in str is deprecated for ARMv7. */
8059 if (warn_on_deprecated
8061 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
8062 as_tsktsk (_("use of PC in this instruction is deprecated"));
8065 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8067 /* Prefer + for zero encoded value. */
8068 if (!inst
.operands
[i
].negative
)
8069 inst
.instruction
|= INDEX_UP
;
8070 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
8075 /* inst.operands[i] was set up by parse_address. Encode it into an
8076 ARM-format mode 3 load or store instruction. Reject forms that
8077 cannot be used with such instructions. If is_t is true, reject
8078 forms that cannot be used with a T instruction (i.e. not
8081 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
8083 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
8085 inst
.error
= _("instruction does not accept scaled register index");
8089 encode_arm_addr_mode_common (i
, is_t
);
8091 if (inst
.operands
[i
].immisreg
)
8093 constraint ((inst
.operands
[i
].imm
== REG_PC
8094 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
8096 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
8098 inst
.instruction
|= inst
.operands
[i
].imm
;
8099 if (!inst
.operands
[i
].negative
)
8100 inst
.instruction
|= INDEX_UP
;
8102 else /* immediate offset in inst.relocs[0] */
8104 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
8105 && inst
.operands
[i
].writeback
),
8107 inst
.instruction
|= HWOFFSET_IMM
;
8108 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
8110 /* Prefer + for zero encoded value. */
8111 if (!inst
.operands
[i
].negative
)
8112 inst
.instruction
|= INDEX_UP
;
8114 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
8119 /* Write immediate bits [7:0] to the following locations:
8121 |28/24|23 19|18 16|15 4|3 0|
8122 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8124 This function is used by VMOV/VMVN/VORR/VBIC. */
8127 neon_write_immbits (unsigned immbits
)
8129 inst
.instruction
|= immbits
& 0xf;
8130 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
8131 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
8134 /* Invert low-order SIZE bits of XHI:XLO. */
8137 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
8139 unsigned immlo
= xlo
? *xlo
: 0;
8140 unsigned immhi
= xhi
? *xhi
: 0;
8145 immlo
= (~immlo
) & 0xff;
8149 immlo
= (~immlo
) & 0xffff;
8153 immhi
= (~immhi
) & 0xffffffff;
8157 immlo
= (~immlo
) & 0xffffffff;
8171 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8175 neon_bits_same_in_bytes (unsigned imm
)
8177 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
8178 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
8179 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
8180 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
8183 /* For immediate of above form, return 0bABCD. */
8186 neon_squash_bits (unsigned imm
)
8188 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
8189 | ((imm
& 0x01000000) >> 21);
8192 /* Compress quarter-float representation to 0b...000 abcdefgh. */
8195 neon_qfloat_bits (unsigned imm
)
8197 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
8200 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8201 the instruction. *OP is passed as the initial value of the op field, and
8202 may be set to a different value depending on the constant (i.e.
8203 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8204 MVN). If the immediate looks like a repeated pattern then also
8205 try smaller element sizes. */
8208 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
8209 unsigned *immbits
, int *op
, int size
,
8210 enum neon_el_type type
)
8212 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8214 if (type
== NT_float
&& !float_p
)
8217 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
8219 if (size
!= 32 || *op
== 1)
8221 *immbits
= neon_qfloat_bits (immlo
);
8227 if (neon_bits_same_in_bytes (immhi
)
8228 && neon_bits_same_in_bytes (immlo
))
8232 *immbits
= (neon_squash_bits (immhi
) << 4)
8233 | neon_squash_bits (immlo
);
8244 if (immlo
== (immlo
& 0x000000ff))
8249 else if (immlo
== (immlo
& 0x0000ff00))
8251 *immbits
= immlo
>> 8;
8254 else if (immlo
== (immlo
& 0x00ff0000))
8256 *immbits
= immlo
>> 16;
8259 else if (immlo
== (immlo
& 0xff000000))
8261 *immbits
= immlo
>> 24;
8264 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8266 *immbits
= (immlo
>> 8) & 0xff;
8269 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8271 *immbits
= (immlo
>> 16) & 0xff;
8275 if ((immlo
& 0xffff) != (immlo
>> 16))
8282 if (immlo
== (immlo
& 0x000000ff))
8287 else if (immlo
== (immlo
& 0x0000ff00))
8289 *immbits
= immlo
>> 8;
8293 if ((immlo
& 0xff) != (immlo
>> 8))
8298 if (immlo
== (immlo
& 0x000000ff))
8300 /* Don't allow MVN with 8-bit immediate. */
8310 #if defined BFD_HOST_64_BIT
8311 /* Returns TRUE if double precision value V may be cast
8312 to single precision without loss of accuracy. */
8315 is_double_a_single (bfd_int64_t v
)
8317 int exp
= (int)((v
>> 52) & 0x7FF);
8318 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8320 return (exp
== 0 || exp
== 0x7FF
8321 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8322 && (mantissa
& 0x1FFFFFFFl
) == 0;
8325 /* Returns a double precision value casted to single precision
8326 (ignoring the least significant bits in exponent and mantissa). */
8329 double_to_single (bfd_int64_t v
)
8331 int sign
= (int) ((v
>> 63) & 1l);
8332 int exp
= (int) ((v
>> 52) & 0x7FF);
8333 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8339 exp
= exp
- 1023 + 127;
8348 /* No denormalized numbers. */
8354 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8356 #endif /* BFD_HOST_64_BIT */
8365 static void do_vfp_nsyn_opcode (const char *);
8367 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8368 Determine whether it can be performed with a move instruction; if
8369 it can, convert inst.instruction to that move instruction and
8370 return TRUE; if it can't, convert inst.instruction to a literal-pool
8371 load and return FALSE. If this is not a valid thing to do in the
8372 current context, set inst.error and return TRUE.
8374 inst.operands[i] describes the destination register. */
8377 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8380 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8381 bfd_boolean arm_p
= (t
== CONST_ARM
);
8384 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8388 if ((inst
.instruction
& tbit
) == 0)
8390 inst
.error
= _("invalid pseudo operation");
8394 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8395 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8396 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8398 inst
.error
= _("constant expression expected");
8402 if (inst
.relocs
[0].exp
.X_op
== O_constant
8403 || inst
.relocs
[0].exp
.X_op
== O_big
)
8405 #if defined BFD_HOST_64_BIT
8410 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8412 LITTLENUM_TYPE w
[X_PRECISION
];
8415 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8417 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8419 /* FIXME: Should we check words w[2..5] ? */
8424 #if defined BFD_HOST_64_BIT
8426 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8427 << LITTLENUM_NUMBER_OF_BITS
)
8428 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8429 << LITTLENUM_NUMBER_OF_BITS
)
8430 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8431 << LITTLENUM_NUMBER_OF_BITS
)
8432 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8434 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8435 | (l
[0] & LITTLENUM_MASK
);
8439 v
= inst
.relocs
[0].exp
.X_add_number
;
8441 if (!inst
.operands
[i
].issingle
)
8445 /* LDR should not use lead in a flag-setting instruction being
8446 chosen so we do not check whether movs can be used. */
8448 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8449 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8450 && inst
.operands
[i
].reg
!= 13
8451 && inst
.operands
[i
].reg
!= 15)
8453 /* Check if on thumb2 it can be done with a mov.w, mvn or
8454 movw instruction. */
8455 unsigned int newimm
;
8456 bfd_boolean isNegated
;
8458 newimm
= encode_thumb32_immediate (v
);
8459 if (newimm
!= (unsigned int) FAIL
)
8463 newimm
= encode_thumb32_immediate (~v
);
8464 if (newimm
!= (unsigned int) FAIL
)
8468 /* The number can be loaded with a mov.w or mvn
8470 if (newimm
!= (unsigned int) FAIL
8471 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8473 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8474 | (inst
.operands
[i
].reg
<< 8));
8475 /* Change to MOVN. */
8476 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8477 inst
.instruction
|= (newimm
& 0x800) << 15;
8478 inst
.instruction
|= (newimm
& 0x700) << 4;
8479 inst
.instruction
|= (newimm
& 0x0ff);
8482 /* The number can be loaded with a movw instruction. */
8483 else if ((v
& ~0xFFFF) == 0
8484 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8486 int imm
= v
& 0xFFFF;
8488 inst
.instruction
= 0xf2400000; /* MOVW. */
8489 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8490 inst
.instruction
|= (imm
& 0xf000) << 4;
8491 inst
.instruction
|= (imm
& 0x0800) << 15;
8492 inst
.instruction
|= (imm
& 0x0700) << 4;
8493 inst
.instruction
|= (imm
& 0x00ff);
8500 int value
= encode_arm_immediate (v
);
8504 /* This can be done with a mov instruction. */
8505 inst
.instruction
&= LITERAL_MASK
;
8506 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8507 inst
.instruction
|= value
& 0xfff;
8511 value
= encode_arm_immediate (~ v
);
8514 /* This can be done with a mvn instruction. */
8515 inst
.instruction
&= LITERAL_MASK
;
8516 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8517 inst
.instruction
|= value
& 0xfff;
8521 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8524 unsigned immbits
= 0;
8525 unsigned immlo
= inst
.operands
[1].imm
;
8526 unsigned immhi
= inst
.operands
[1].regisimm
8527 ? inst
.operands
[1].reg
8528 : inst
.relocs
[0].exp
.X_unsigned
8530 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8531 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8532 &op
, 64, NT_invtype
);
8536 neon_invert_size (&immlo
, &immhi
, 64);
8538 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8539 &op
, 64, NT_invtype
);
8544 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8550 /* Fill other bits in vmov encoding for both thumb and arm. */
8552 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8554 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8555 neon_write_immbits (immbits
);
8563 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8564 if (inst
.operands
[i
].issingle
8565 && is_quarter_float (inst
.operands
[1].imm
)
8566 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8568 inst
.operands
[1].imm
=
8569 neon_qfloat_bits (v
);
8570 do_vfp_nsyn_opcode ("fconsts");
8574 /* If our host does not support a 64-bit type then we cannot perform
8575 the following optimization. This mean that there will be a
8576 discrepancy between the output produced by an assembler built for
8577 a 32-bit-only host and the output produced from a 64-bit host, but
8578 this cannot be helped. */
8579 #if defined BFD_HOST_64_BIT
8580 else if (!inst
.operands
[1].issingle
8581 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8583 if (is_double_a_single (v
)
8584 && is_quarter_float (double_to_single (v
)))
8586 inst
.operands
[1].imm
=
8587 neon_qfloat_bits (double_to_single (v
));
8588 do_vfp_nsyn_opcode ("fconstd");
8596 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8597 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8600 inst
.operands
[1].reg
= REG_PC
;
8601 inst
.operands
[1].isreg
= 1;
8602 inst
.operands
[1].preind
= 1;
8603 inst
.relocs
[0].pc_rel
= 1;
8604 inst
.relocs
[0].type
= (thumb_p
8605 ? BFD_RELOC_ARM_THUMB_OFFSET
8607 ? BFD_RELOC_ARM_HWLITERAL
8608 : BFD_RELOC_ARM_LITERAL
));
8612 /* inst.operands[i] was set up by parse_address. Encode it into an
8613 ARM-format instruction. Reject all forms which cannot be encoded
8614 into a coprocessor load/store instruction. If wb_ok is false,
8615 reject use of writeback; if unind_ok is false, reject use of
8616 unindexed addressing. If reloc_override is not 0, use it instead
8617 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8618 (in which case it is preserved). */
8621 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8623 if (!inst
.operands
[i
].isreg
)
8626 if (! inst
.operands
[0].isvec
)
8628 inst
.error
= _("invalid co-processor operand");
8631 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8635 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8637 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8639 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8641 gas_assert (!inst
.operands
[i
].writeback
);
8644 inst
.error
= _("instruction does not support unindexed addressing");
8647 inst
.instruction
|= inst
.operands
[i
].imm
;
8648 inst
.instruction
|= INDEX_UP
;
8652 if (inst
.operands
[i
].preind
)
8653 inst
.instruction
|= PRE_INDEX
;
8655 if (inst
.operands
[i
].writeback
)
8657 if (inst
.operands
[i
].reg
== REG_PC
)
8659 inst
.error
= _("pc may not be used with write-back");
8664 inst
.error
= _("instruction does not support writeback");
8667 inst
.instruction
|= WRITE_BACK
;
8671 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8672 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8673 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8674 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8677 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8679 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8682 /* Prefer + for zero encoded value. */
8683 if (!inst
.operands
[i
].negative
)
8684 inst
.instruction
|= INDEX_UP
;
8689 /* Functions for instruction encoding, sorted by sub-architecture.
8690 First some generics; their names are taken from the conventional
8691 bit positions for register arguments in ARM format instructions. */
8701 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8707 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8713 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8714 inst
.instruction
|= inst
.operands
[1].reg
;
8720 inst
.instruction
|= inst
.operands
[0].reg
;
8721 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8727 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8728 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8734 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8735 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8741 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8742 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8746 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8748 if (ARM_CPU_IS_ANY (cpu_variant
))
8750 as_tsktsk ("%s", msg
);
8753 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8765 unsigned Rn
= inst
.operands
[2].reg
;
8766 /* Enforce restrictions on SWP instruction. */
8767 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8769 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8770 _("Rn must not overlap other operands"));
8772 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8774 if (!check_obsolete (&arm_ext_v8
,
8775 _("swp{b} use is obsoleted for ARMv8 and later"))
8776 && warn_on_deprecated
8777 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8778 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8781 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8782 inst
.instruction
|= inst
.operands
[1].reg
;
8783 inst
.instruction
|= Rn
<< 16;
8789 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8790 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8791 inst
.instruction
|= inst
.operands
[2].reg
;
8797 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8798 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8799 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8800 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8802 inst
.instruction
|= inst
.operands
[0].reg
;
8803 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8804 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8810 inst
.instruction
|= inst
.operands
[0].imm
;
8816 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8817 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8820 /* ARM instructions, in alphabetical order by function name (except
8821 that wrapper functions appear immediately after the function they
8824 /* This is a pseudo-op of the form "adr rd, label" to be converted
8825 into a relative address of the form "add rd, pc, #label-.-8". */
8830 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8832 /* Frag hacking will turn this into a sub instruction if the offset turns
8833 out to be negative. */
8834 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8835 inst
.relocs
[0].pc_rel
= 1;
8836 inst
.relocs
[0].exp
.X_add_number
-= 8;
8838 if (support_interwork
8839 && inst
.relocs
[0].exp
.X_op
== O_symbol
8840 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8841 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8842 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8843 inst
.relocs
[0].exp
.X_add_number
|= 1;
8846 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8847 into a relative address of the form:
8848 add rd, pc, #low(label-.-8)"
8849 add rd, rd, #high(label-.-8)" */
8854 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8856 /* Frag hacking will turn this into a sub instruction if the offset turns
8857 out to be negative. */
8858 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8859 inst
.relocs
[0].pc_rel
= 1;
8860 inst
.size
= INSN_SIZE
* 2;
8861 inst
.relocs
[0].exp
.X_add_number
-= 8;
8863 if (support_interwork
8864 && inst
.relocs
[0].exp
.X_op
== O_symbol
8865 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8866 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8867 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8868 inst
.relocs
[0].exp
.X_add_number
|= 1;
8874 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8875 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8877 if (!inst
.operands
[1].present
)
8878 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8879 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8880 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8881 encode_arm_shifter_operand (2);
8887 if (inst
.operands
[0].present
)
8888 inst
.instruction
|= inst
.operands
[0].imm
;
8890 inst
.instruction
|= 0xf;
8896 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8897 constraint (msb
> 32, _("bit-field extends past end of register"));
8898 /* The instruction encoding stores the LSB and MSB,
8899 not the LSB and width. */
8900 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8901 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8902 inst
.instruction
|= (msb
- 1) << 16;
8910 /* #0 in second position is alternative syntax for bfc, which is
8911 the same instruction but with REG_PC in the Rm field. */
8912 if (!inst
.operands
[1].isreg
)
8913 inst
.operands
[1].reg
= REG_PC
;
8915 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8916 constraint (msb
> 32, _("bit-field extends past end of register"));
8917 /* The instruction encoding stores the LSB and MSB,
8918 not the LSB and width. */
8919 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8920 inst
.instruction
|= inst
.operands
[1].reg
;
8921 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8922 inst
.instruction
|= (msb
- 1) << 16;
8928 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8929 _("bit-field extends past end of register"));
8930 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8931 inst
.instruction
|= inst
.operands
[1].reg
;
8932 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8933 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8936 /* ARM V5 breakpoint instruction (argument parse)
8937 BKPT <16 bit unsigned immediate>
8938 Instruction is not conditional.
8939 The bit pattern given in insns[] has the COND_ALWAYS condition,
8940 and it is an error if the caller tried to override that. */
8945 /* Top 12 of 16 bits to bits 19:8. */
8946 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8948 /* Bottom 4 of 16 bits to bits 3:0. */
8949 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8953 encode_branch (int default_reloc
)
8955 if (inst
.operands
[0].hasreloc
)
8957 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8958 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8959 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8960 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8961 ? BFD_RELOC_ARM_PLT32
8962 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8965 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8966 inst
.relocs
[0].pc_rel
= 1;
8973 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8974 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8977 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8984 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8986 if (inst
.cond
== COND_ALWAYS
)
8987 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8989 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8993 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8996 /* ARM V5 branch-link-exchange instruction (argument parse)
8997 BLX <target_addr> ie BLX(1)
8998 BLX{<condition>} <Rm> ie BLX(2)
8999 Unfortunately, there are two different opcodes for this mnemonic.
9000 So, the insns[].value is not used, and the code here zaps values
9001 into inst.instruction.
9002 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
9007 if (inst
.operands
[0].isreg
)
9009 /* Arg is a register; the opcode provided by insns[] is correct.
9010 It is not illegal to do "blx pc", just useless. */
9011 if (inst
.operands
[0].reg
== REG_PC
)
9012 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9014 inst
.instruction
|= inst
.operands
[0].reg
;
9018 /* Arg is an address; this instruction cannot be executed
9019 conditionally, and the opcode must be adjusted.
9020 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9021 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
9022 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
9023 inst
.instruction
= 0xfa000000;
9024 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
9031 bfd_boolean want_reloc
;
9033 if (inst
.operands
[0].reg
== REG_PC
)
9034 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9036 inst
.instruction
|= inst
.operands
[0].reg
;
9037 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9038 it is for ARMv4t or earlier. */
9039 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
9040 if (!ARM_FEATURE_ZERO (selected_object_arch
)
9041 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
9045 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
9050 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
9054 /* ARM v5TEJ. Jump to Jazelle code. */
9059 if (inst
.operands
[0].reg
== REG_PC
)
9060 as_tsktsk (_("use of r15 in bxj is not really useful"));
9062 inst
.instruction
|= inst
.operands
[0].reg
;
9065 /* Co-processor data operation:
9066 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9067 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
9071 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9072 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
9073 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
9074 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9075 inst
.instruction
|= inst
.operands
[4].reg
;
9076 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9082 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9083 encode_arm_shifter_operand (1);
9086 /* Transfer between coprocessor and ARM registers.
9087 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9092 No special properties. */
9094 struct deprecated_coproc_regs_s
9101 arm_feature_set deprecated
;
9102 arm_feature_set obsoleted
;
9103 const char *dep_msg
;
9104 const char *obs_msg
;
9107 #define DEPR_ACCESS_V8 \
9108 N_("This coprocessor register access is deprecated in ARMv8")
9110 /* Table of all deprecated coprocessor registers. */
9111 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
9113 {15, 0, 7, 10, 5, /* CP15DMB. */
9114 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9115 DEPR_ACCESS_V8
, NULL
},
9116 {15, 0, 7, 10, 4, /* CP15DSB. */
9117 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9118 DEPR_ACCESS_V8
, NULL
},
9119 {15, 0, 7, 5, 4, /* CP15ISB. */
9120 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9121 DEPR_ACCESS_V8
, NULL
},
9122 {14, 6, 1, 0, 0, /* TEEHBR. */
9123 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9124 DEPR_ACCESS_V8
, NULL
},
9125 {14, 6, 0, 0, 0, /* TEECR. */
9126 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
9127 DEPR_ACCESS_V8
, NULL
},
9130 #undef DEPR_ACCESS_V8
9132 static const size_t deprecated_coproc_reg_count
=
9133 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
9141 Rd
= inst
.operands
[2].reg
;
9144 if (inst
.instruction
== 0xee000010
9145 || inst
.instruction
== 0xfe000010)
9147 reject_bad_reg (Rd
);
9148 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9150 constraint (Rd
== REG_SP
, BAD_SP
);
9155 if (inst
.instruction
== 0xe000010)
9156 constraint (Rd
== REG_PC
, BAD_PC
);
9159 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
9161 const struct deprecated_coproc_regs_s
*r
=
9162 deprecated_coproc_regs
+ i
;
9164 if (inst
.operands
[0].reg
== r
->cp
9165 && inst
.operands
[1].imm
== r
->opc1
9166 && inst
.operands
[3].reg
== r
->crn
9167 && inst
.operands
[4].reg
== r
->crm
9168 && inst
.operands
[5].imm
== r
->opc2
)
9170 if (! ARM_CPU_IS_ANY (cpu_variant
)
9171 && warn_on_deprecated
9172 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
9173 as_tsktsk ("%s", r
->dep_msg
);
9177 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9178 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
9179 inst
.instruction
|= Rd
<< 12;
9180 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9181 inst
.instruction
|= inst
.operands
[4].reg
;
9182 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
9185 /* Transfer between coprocessor register and pair of ARM registers.
9186 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9191 Two XScale instructions are special cases of these:
9193 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9194 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9196 Result unpredictable if Rd or Rn is R15. */
9203 Rd
= inst
.operands
[2].reg
;
9204 Rn
= inst
.operands
[3].reg
;
9208 reject_bad_reg (Rd
);
9209 reject_bad_reg (Rn
);
9213 constraint (Rd
== REG_PC
, BAD_PC
);
9214 constraint (Rn
== REG_PC
, BAD_PC
);
9217 /* Only check the MRRC{2} variants. */
9218 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
9220 /* If Rd == Rn, error that the operation is
9221 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9222 constraint (Rd
== Rn
, BAD_OVERLAP
);
9225 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9226 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9227 inst
.instruction
|= Rd
<< 12;
9228 inst
.instruction
|= Rn
<< 16;
9229 inst
.instruction
|= inst
.operands
[4].reg
;
9235 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9236 if (inst
.operands
[1].present
)
9238 inst
.instruction
|= CPSI_MMOD
;
9239 inst
.instruction
|= inst
.operands
[1].imm
;
9246 inst
.instruction
|= inst
.operands
[0].imm
;
9252 unsigned Rd
, Rn
, Rm
;
9254 Rd
= inst
.operands
[0].reg
;
9255 Rn
= (inst
.operands
[1].present
9256 ? inst
.operands
[1].reg
: Rd
);
9257 Rm
= inst
.operands
[2].reg
;
9259 constraint ((Rd
== REG_PC
), BAD_PC
);
9260 constraint ((Rn
== REG_PC
), BAD_PC
);
9261 constraint ((Rm
== REG_PC
), BAD_PC
);
9263 inst
.instruction
|= Rd
<< 16;
9264 inst
.instruction
|= Rn
<< 0;
9265 inst
.instruction
|= Rm
<< 8;
9271 /* There is no IT instruction in ARM mode. We
9272 process it to do the validation as if in
9273 thumb mode, just in case the code gets
9274 assembled for thumb using the unified syntax. */
9279 set_pred_insn_type (IT_INSN
);
9280 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
9281 now_pred
.cc
= inst
.operands
[0].imm
;
9285 /* If there is only one register in the register list,
9286 then return its register number. Otherwise return -1. */
9288 only_one_reg_in_list (int range
)
9290 int i
= ffs (range
) - 1;
9291 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9295 encode_ldmstm(int from_push_pop_mnem
)
9297 int base_reg
= inst
.operands
[0].reg
;
9298 int range
= inst
.operands
[1].imm
;
9301 inst
.instruction
|= base_reg
<< 16;
9302 inst
.instruction
|= range
;
9304 if (inst
.operands
[1].writeback
)
9305 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9307 if (inst
.operands
[0].writeback
)
9309 inst
.instruction
|= WRITE_BACK
;
9310 /* Check for unpredictable uses of writeback. */
9311 if (inst
.instruction
& LOAD_BIT
)
9313 /* Not allowed in LDM type 2. */
9314 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9315 && ((range
& (1 << REG_PC
)) == 0))
9316 as_warn (_("writeback of base register is UNPREDICTABLE"));
9317 /* Only allowed if base reg not in list for other types. */
9318 else if (range
& (1 << base_reg
))
9319 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9323 /* Not allowed for type 2. */
9324 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9325 as_warn (_("writeback of base register is UNPREDICTABLE"));
9326 /* Only allowed if base reg not in list, or first in list. */
9327 else if ((range
& (1 << base_reg
))
9328 && (range
& ((1 << base_reg
) - 1)))
9329 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9333 /* If PUSH/POP has only one register, then use the A2 encoding. */
9334 one_reg
= only_one_reg_in_list (range
);
9335 if (from_push_pop_mnem
&& one_reg
>= 0)
9337 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9339 if (is_push
&& one_reg
== 13 /* SP */)
9340 /* PR 22483: The A2 encoding cannot be used when
9341 pushing the stack pointer as this is UNPREDICTABLE. */
9344 inst
.instruction
&= A_COND_MASK
;
9345 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9346 inst
.instruction
|= one_reg
<< 12;
9353 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9356 /* ARMv5TE load-consecutive (argument parse)
9365 constraint (inst
.operands
[0].reg
% 2 != 0,
9366 _("first transfer register must be even"));
9367 constraint (inst
.operands
[1].present
9368 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9369 _("can only transfer two consecutive registers"));
9370 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9371 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9373 if (!inst
.operands
[1].present
)
9374 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9376 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9377 register and the first register written; we have to diagnose
9378 overlap between the base and the second register written here. */
9380 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9381 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9382 as_warn (_("base register written back, and overlaps "
9383 "second transfer register"));
9385 if (!(inst
.instruction
& V4_STR_BIT
))
9387 /* For an index-register load, the index register must not overlap the
9388 destination (even if not write-back). */
9389 if (inst
.operands
[2].immisreg
9390 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9391 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9392 as_warn (_("index register overlaps transfer register"));
9394 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9395 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9401 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9402 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9403 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9404 || inst
.operands
[1].negative
9405 /* This can arise if the programmer has written
9407 or if they have mistakenly used a register name as the last
9410 It is very difficult to distinguish between these two cases
9411 because "rX" might actually be a label. ie the register
9412 name has been occluded by a symbol of the same name. So we
9413 just generate a general 'bad addressing mode' type error
9414 message and leave it up to the programmer to discover the
9415 true cause and fix their mistake. */
9416 || (inst
.operands
[1].reg
== REG_PC
),
9419 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9420 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9421 _("offset must be zero in ARM encoding"));
9423 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9425 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9426 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9427 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9433 constraint (inst
.operands
[0].reg
% 2 != 0,
9434 _("even register required"));
9435 constraint (inst
.operands
[1].present
9436 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9437 _("can only load two consecutive registers"));
9438 /* If op 1 were present and equal to PC, this function wouldn't
9439 have been called in the first place. */
9440 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9442 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9443 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9446 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9447 which is not a multiple of four is UNPREDICTABLE. */
9449 check_ldr_r15_aligned (void)
9451 constraint (!(inst
.operands
[1].immisreg
)
9452 && (inst
.operands
[0].reg
== REG_PC
9453 && inst
.operands
[1].reg
== REG_PC
9454 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9455 _("ldr to register 15 must be 4-byte aligned"));
9461 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9462 if (!inst
.operands
[1].isreg
)
9463 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9465 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9466 check_ldr_r15_aligned ();
9472 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9474 if (inst
.operands
[1].preind
)
9476 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9477 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9478 _("this instruction requires a post-indexed address"));
9480 inst
.operands
[1].preind
= 0;
9481 inst
.operands
[1].postind
= 1;
9482 inst
.operands
[1].writeback
= 1;
9484 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9485 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9488 /* Halfword and signed-byte load/store operations. */
9493 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9494 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9495 if (!inst
.operands
[1].isreg
)
9496 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9498 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9504 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9506 if (inst
.operands
[1].preind
)
9508 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9509 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9510 _("this instruction requires a post-indexed address"));
9512 inst
.operands
[1].preind
= 0;
9513 inst
.operands
[1].postind
= 1;
9514 inst
.operands
[1].writeback
= 1;
9516 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9517 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9520 /* Co-processor register load/store.
9521 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9525 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9526 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9527 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9533 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9534 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9535 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9536 && !(inst
.instruction
& 0x00400000))
9537 as_tsktsk (_("Rd and Rm should be different in mla"));
9539 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9540 inst
.instruction
|= inst
.operands
[1].reg
;
9541 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9542 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9548 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9549 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9551 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9552 encode_arm_shifter_operand (1);
9555 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9562 top
= (inst
.instruction
& 0x00400000) != 0;
9563 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9564 _(":lower16: not allowed in this instruction"));
9565 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9566 _(":upper16: not allowed in this instruction"));
9567 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9568 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9570 imm
= inst
.relocs
[0].exp
.X_add_number
;
9571 /* The value is in two pieces: 0:11, 16:19. */
9572 inst
.instruction
|= (imm
& 0x00000fff);
9573 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9578 do_vfp_nsyn_mrs (void)
9580 if (inst
.operands
[0].isvec
)
9582 if (inst
.operands
[1].reg
!= 1)
9583 first_error (_("operand 1 must be FPSCR"));
9584 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9585 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9586 do_vfp_nsyn_opcode ("fmstat");
9588 else if (inst
.operands
[1].isvec
)
9589 do_vfp_nsyn_opcode ("fmrx");
9597 do_vfp_nsyn_msr (void)
9599 if (inst
.operands
[0].isvec
)
9600 do_vfp_nsyn_opcode ("fmxr");
9610 unsigned Rt
= inst
.operands
[0].reg
;
9612 if (thumb_mode
&& Rt
== REG_SP
)
9614 inst
.error
= BAD_SP
;
9618 /* MVFR2 is only valid at ARMv8-A. */
9619 if (inst
.operands
[1].reg
== 5)
9620 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9623 /* APSR_ sets isvec. All other refs to PC are illegal. */
9624 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9626 inst
.error
= BAD_PC
;
9630 /* If we get through parsing the register name, we just insert the number
9631 generated into the instruction without further validation. */
9632 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9633 inst
.instruction
|= (Rt
<< 12);
9639 unsigned Rt
= inst
.operands
[1].reg
;
9642 reject_bad_reg (Rt
);
9643 else if (Rt
== REG_PC
)
9645 inst
.error
= BAD_PC
;
9649 /* MVFR2 is only valid for ARMv8-A. */
9650 if (inst
.operands
[0].reg
== 5)
9651 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9654 /* If we get through parsing the register name, we just insert the number
9655 generated into the instruction without further validation. */
9656 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9657 inst
.instruction
|= (Rt
<< 12);
9665 if (do_vfp_nsyn_mrs () == SUCCESS
)
9668 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9669 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9671 if (inst
.operands
[1].isreg
)
9673 br
= inst
.operands
[1].reg
;
9674 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9675 as_bad (_("bad register for mrs"));
9679 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9680 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9682 _("'APSR', 'CPSR' or 'SPSR' expected"));
9683 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9686 inst
.instruction
|= br
;
9689 /* Two possible forms:
9690 "{C|S}PSR_<field>, Rm",
9691 "{C|S}PSR_f, #expression". */
9696 if (do_vfp_nsyn_msr () == SUCCESS
)
9699 inst
.instruction
|= inst
.operands
[0].imm
;
9700 if (inst
.operands
[1].isreg
)
9701 inst
.instruction
|= inst
.operands
[1].reg
;
9704 inst
.instruction
|= INST_IMMEDIATE
;
9705 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9706 inst
.relocs
[0].pc_rel
= 0;
9713 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9715 if (!inst
.operands
[2].present
)
9716 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9717 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9718 inst
.instruction
|= inst
.operands
[1].reg
;
9719 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9721 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9722 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9723 as_tsktsk (_("Rd and Rm should be different in mul"));
9726 /* Long Multiply Parser
9727 UMULL RdLo, RdHi, Rm, Rs
9728 SMULL RdLo, RdHi, Rm, Rs
9729 UMLAL RdLo, RdHi, Rm, Rs
9730 SMLAL RdLo, RdHi, Rm, Rs. */
9735 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9736 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9737 inst
.instruction
|= inst
.operands
[2].reg
;
9738 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9740 /* rdhi and rdlo must be different. */
9741 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9742 as_tsktsk (_("rdhi and rdlo must be different"));
9744 /* rdhi, rdlo and rm must all be different before armv6. */
9745 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9746 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9747 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9748 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9754 if (inst
.operands
[0].present
9755 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9757 /* Architectural NOP hints are CPSR sets with no bits selected. */
9758 inst
.instruction
&= 0xf0000000;
9759 inst
.instruction
|= 0x0320f000;
9760 if (inst
.operands
[0].present
)
9761 inst
.instruction
|= inst
.operands
[0].imm
;
9765 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9766 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9767 Condition defaults to COND_ALWAYS.
9768 Error if Rd, Rn or Rm are R15. */
9773 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9774 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9775 inst
.instruction
|= inst
.operands
[2].reg
;
9776 if (inst
.operands
[3].present
)
9777 encode_arm_shift (3);
9780 /* ARM V6 PKHTB (Argument Parse). */
9785 if (!inst
.operands
[3].present
)
9787 /* If the shift specifier is omitted, turn the instruction
9788 into pkhbt rd, rm, rn. */
9789 inst
.instruction
&= 0xfff00010;
9790 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9791 inst
.instruction
|= inst
.operands
[1].reg
;
9792 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9796 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9797 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9798 inst
.instruction
|= inst
.operands
[2].reg
;
9799 encode_arm_shift (3);
9803 /* ARMv5TE: Preload-Cache
9804 MP Extensions: Preload for write
9808 Syntactically, like LDR with B=1, W=0, L=1. */
9813 constraint (!inst
.operands
[0].isreg
,
9814 _("'[' expected after PLD mnemonic"));
9815 constraint (inst
.operands
[0].postind
,
9816 _("post-indexed expression used in preload instruction"));
9817 constraint (inst
.operands
[0].writeback
,
9818 _("writeback used in preload instruction"));
9819 constraint (!inst
.operands
[0].preind
,
9820 _("unindexed addressing used in preload instruction"));
9821 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9824 /* ARMv7: PLI <addr_mode> */
9828 constraint (!inst
.operands
[0].isreg
,
9829 _("'[' expected after PLI mnemonic"));
9830 constraint (inst
.operands
[0].postind
,
9831 _("post-indexed expression used in preload instruction"));
9832 constraint (inst
.operands
[0].writeback
,
9833 _("writeback used in preload instruction"));
9834 constraint (!inst
.operands
[0].preind
,
9835 _("unindexed addressing used in preload instruction"));
9836 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9837 inst
.instruction
&= ~PRE_INDEX
;
9843 constraint (inst
.operands
[0].writeback
,
9844 _("push/pop do not support {reglist}^"));
9845 inst
.operands
[1] = inst
.operands
[0];
9846 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9847 inst
.operands
[0].isreg
= 1;
9848 inst
.operands
[0].writeback
= 1;
9849 inst
.operands
[0].reg
= REG_SP
;
9850 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9853 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9854 word at the specified address and the following word
9856 Unconditionally executed.
9857 Error if Rn is R15. */
9862 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9863 if (inst
.operands
[0].writeback
)
9864 inst
.instruction
|= WRITE_BACK
;
9867 /* ARM V6 ssat (argument parse). */
9872 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9873 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9874 inst
.instruction
|= inst
.operands
[2].reg
;
9876 if (inst
.operands
[3].present
)
9877 encode_arm_shift (3);
9880 /* ARM V6 usat (argument parse). */
9885 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9886 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9887 inst
.instruction
|= inst
.operands
[2].reg
;
9889 if (inst
.operands
[3].present
)
9890 encode_arm_shift (3);
9893 /* ARM V6 ssat16 (argument parse). */
9898 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9899 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9900 inst
.instruction
|= inst
.operands
[2].reg
;
9906 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9907 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9908 inst
.instruction
|= inst
.operands
[2].reg
;
9911 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9912 preserving the other bits.
9914 setend <endian_specifier>, where <endian_specifier> is either
9920 if (warn_on_deprecated
9921 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9922 as_tsktsk (_("setend use is deprecated for ARMv8"));
9924 if (inst
.operands
[0].imm
)
9925 inst
.instruction
|= 0x200;
9931 unsigned int Rm
= (inst
.operands
[1].present
9932 ? inst
.operands
[1].reg
9933 : inst
.operands
[0].reg
);
9935 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9936 inst
.instruction
|= Rm
;
9937 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9939 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9940 inst
.instruction
|= SHIFT_BY_REG
;
9941 /* PR 12854: Error on extraneous shifts. */
9942 constraint (inst
.operands
[2].shifted
,
9943 _("extraneous shift as part of operand to shift insn"));
9946 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9952 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9953 inst
.relocs
[0].pc_rel
= 0;
9959 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9960 inst
.relocs
[0].pc_rel
= 0;
9966 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9967 inst
.relocs
[0].pc_rel
= 0;
9973 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9974 _("selected processor does not support SETPAN instruction"));
9976 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9982 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9983 _("selected processor does not support SETPAN instruction"));
9985 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9988 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9989 SMLAxy{cond} Rd,Rm,Rs,Rn
9990 SMLAWy{cond} Rd,Rm,Rs,Rn
9991 Error if any register is R15. */
9996 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9997 inst
.instruction
|= inst
.operands
[1].reg
;
9998 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9999 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
10002 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10003 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10004 Error if any register is R15.
10005 Warning if Rdlo == Rdhi. */
10010 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10011 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10012 inst
.instruction
|= inst
.operands
[2].reg
;
10013 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
10015 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
10016 as_tsktsk (_("rdhi and rdlo must be different"));
10019 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10020 SMULxy{cond} Rd,Rm,Rs
10021 Error if any register is R15. */
10026 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10027 inst
.instruction
|= inst
.operands
[1].reg
;
10028 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
10031 /* ARM V6 srs (argument parse). The variable fields in the encoding are
10032 the same for both ARM and Thumb-2. */
10039 if (inst
.operands
[0].present
)
10041 reg
= inst
.operands
[0].reg
;
10042 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
10047 inst
.instruction
|= reg
<< 16;
10048 inst
.instruction
|= inst
.operands
[1].imm
;
10049 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
10050 inst
.instruction
|= WRITE_BACK
;
10053 /* ARM V6 strex (argument parse). */
10058 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10059 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10060 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10061 || inst
.operands
[2].negative
10062 /* See comment in do_ldrex(). */
10063 || (inst
.operands
[2].reg
== REG_PC
),
10066 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10067 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10069 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10070 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10071 _("offset must be zero in ARM encoding"));
10073 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10074 inst
.instruction
|= inst
.operands
[1].reg
;
10075 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10076 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10080 do_t_strexbh (void)
10082 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
10083 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
10084 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
10085 || inst
.operands
[2].negative
,
10088 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10089 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10097 constraint (inst
.operands
[1].reg
% 2 != 0,
10098 _("even register required"));
10099 constraint (inst
.operands
[2].present
10100 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
10101 _("can only store two consecutive registers"));
10102 /* If op 2 were present and equal to PC, this function wouldn't
10103 have been called in the first place. */
10104 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
10106 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10107 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
10108 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
10111 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10112 inst
.instruction
|= inst
.operands
[1].reg
;
10113 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
10120 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10121 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10129 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
10130 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
10135 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10136 extends it to 32-bits, and adds the result to a value in another
10137 register. You can specify a rotation by 0, 8, 16, or 24 bits
10138 before extracting the 16-bit value.
10139 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10140 Condition defaults to COND_ALWAYS.
10141 Error if any register uses R15. */
10146 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10147 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10148 inst
.instruction
|= inst
.operands
[2].reg
;
10149 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
10154 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10155 Condition defaults to COND_ALWAYS.
10156 Error if any register uses R15. */
10161 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10162 inst
.instruction
|= inst
.operands
[1].reg
;
10163 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
10166 /* VFP instructions. In a logical order: SP variant first, monad
10167 before dyad, arithmetic then move then load/store. */
10170 do_vfp_sp_monadic (void)
10172 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10173 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10177 do_vfp_sp_dyadic (void)
10179 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10180 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10181 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10185 do_vfp_sp_compare_z (void)
10187 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10191 do_vfp_dp_sp_cvt (void)
10193 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10194 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
10198 do_vfp_sp_dp_cvt (void)
10200 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10201 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10205 do_vfp_reg_from_sp (void)
10207 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10208 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
10212 do_vfp_reg2_from_sp2 (void)
10214 constraint (inst
.operands
[2].imm
!= 2,
10215 _("only two consecutive VFP SP registers allowed here"));
10216 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10217 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10218 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10222 do_vfp_sp_from_reg (void)
10224 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10225 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10229 do_vfp_sp2_from_reg2 (void)
10231 constraint (inst
.operands
[0].imm
!= 2,
10232 _("only two consecutive VFP SP registers allowed here"));
10233 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10234 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10235 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10239 do_vfp_sp_ldst (void)
10241 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10242 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10246 do_vfp_dp_ldst (void)
10248 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10249 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10254 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10256 if (inst
.operands
[0].writeback
)
10257 inst
.instruction
|= WRITE_BACK
;
10259 constraint (ldstm_type
!= VFP_LDSTMIA
,
10260 _("this addressing mode requires base-register writeback"));
10261 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10262 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10263 inst
.instruction
|= inst
.operands
[1].imm
;
10267 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10271 if (inst
.operands
[0].writeback
)
10272 inst
.instruction
|= WRITE_BACK
;
10274 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10275 _("this addressing mode requires base-register writeback"));
10277 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10278 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10280 count
= inst
.operands
[1].imm
<< 1;
10281 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10284 inst
.instruction
|= count
;
10288 do_vfp_sp_ldstmia (void)
10290 vfp_sp_ldstm (VFP_LDSTMIA
);
10294 do_vfp_sp_ldstmdb (void)
10296 vfp_sp_ldstm (VFP_LDSTMDB
);
10300 do_vfp_dp_ldstmia (void)
10302 vfp_dp_ldstm (VFP_LDSTMIA
);
10306 do_vfp_dp_ldstmdb (void)
10308 vfp_dp_ldstm (VFP_LDSTMDB
);
10312 do_vfp_xp_ldstmia (void)
10314 vfp_dp_ldstm (VFP_LDSTMIAX
);
10318 do_vfp_xp_ldstmdb (void)
10320 vfp_dp_ldstm (VFP_LDSTMDBX
);
10324 do_vfp_dp_rd_rm (void)
10326 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10327 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10331 do_vfp_dp_rn_rd (void)
10333 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10334 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10338 do_vfp_dp_rd_rn (void)
10340 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10341 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10345 do_vfp_dp_rd_rn_rm (void)
10347 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10348 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10349 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10353 do_vfp_dp_rd (void)
10355 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10359 do_vfp_dp_rm_rd_rn (void)
10361 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10362 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10363 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10366 /* VFPv3 instructions. */
10368 do_vfp_sp_const (void)
10370 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10371 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10372 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10376 do_vfp_dp_const (void)
10378 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10379 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10380 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10384 vfp_conv (int srcsize
)
10386 int immbits
= srcsize
- inst
.operands
[1].imm
;
10388 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10390 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10391 i.e. immbits must be in range 0 - 16. */
10392 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10395 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10397 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10398 i.e. immbits must be in range 0 - 31. */
10399 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10403 inst
.instruction
|= (immbits
& 1) << 5;
10404 inst
.instruction
|= (immbits
>> 1);
10408 do_vfp_sp_conv_16 (void)
10410 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10415 do_vfp_dp_conv_16 (void)
10417 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10422 do_vfp_sp_conv_32 (void)
10424 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10429 do_vfp_dp_conv_32 (void)
10431 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10435 /* FPA instructions. Also in a logical order. */
10440 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10441 inst
.instruction
|= inst
.operands
[1].reg
;
10445 do_fpa_ldmstm (void)
10447 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10448 switch (inst
.operands
[1].imm
)
10450 case 1: inst
.instruction
|= CP_T_X
; break;
10451 case 2: inst
.instruction
|= CP_T_Y
; break;
10452 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10457 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10459 /* The instruction specified "ea" or "fd", so we can only accept
10460 [Rn]{!}. The instruction does not really support stacking or
10461 unstacking, so we have to emulate these by setting appropriate
10462 bits and offsets. */
10463 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10464 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10465 _("this instruction does not support indexing"));
10467 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10468 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10470 if (!(inst
.instruction
& INDEX_UP
))
10471 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10473 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10475 inst
.operands
[2].preind
= 0;
10476 inst
.operands
[2].postind
= 1;
10480 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10483 /* iWMMXt instructions: strictly in alphabetical order. */
10486 do_iwmmxt_tandorc (void)
10488 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10492 do_iwmmxt_textrc (void)
10494 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10495 inst
.instruction
|= inst
.operands
[1].imm
;
10499 do_iwmmxt_textrm (void)
10501 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10502 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10503 inst
.instruction
|= inst
.operands
[2].imm
;
10507 do_iwmmxt_tinsr (void)
10509 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10510 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10511 inst
.instruction
|= inst
.operands
[2].imm
;
10515 do_iwmmxt_tmia (void)
10517 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10518 inst
.instruction
|= inst
.operands
[1].reg
;
10519 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10523 do_iwmmxt_waligni (void)
10525 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10526 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10527 inst
.instruction
|= inst
.operands
[2].reg
;
10528 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10532 do_iwmmxt_wmerge (void)
10534 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10535 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10536 inst
.instruction
|= inst
.operands
[2].reg
;
10537 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10541 do_iwmmxt_wmov (void)
10543 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10544 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10545 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10546 inst
.instruction
|= inst
.operands
[1].reg
;
10550 do_iwmmxt_wldstbh (void)
10553 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10555 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10557 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10558 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10562 do_iwmmxt_wldstw (void)
10564 /* RIWR_RIWC clears .isreg for a control register. */
10565 if (!inst
.operands
[0].isreg
)
10567 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10568 inst
.instruction
|= 0xf0000000;
10571 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10572 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10576 do_iwmmxt_wldstd (void)
10578 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10579 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10580 && inst
.operands
[1].immisreg
)
10582 inst
.instruction
&= ~0x1a000ff;
10583 inst
.instruction
|= (0xfU
<< 28);
10584 if (inst
.operands
[1].preind
)
10585 inst
.instruction
|= PRE_INDEX
;
10586 if (!inst
.operands
[1].negative
)
10587 inst
.instruction
|= INDEX_UP
;
10588 if (inst
.operands
[1].writeback
)
10589 inst
.instruction
|= WRITE_BACK
;
10590 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10591 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10592 inst
.instruction
|= inst
.operands
[1].imm
;
10595 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10599 do_iwmmxt_wshufh (void)
10601 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10602 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10603 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10604 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10608 do_iwmmxt_wzero (void)
10610 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10611 inst
.instruction
|= inst
.operands
[0].reg
;
10612 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10613 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10617 do_iwmmxt_wrwrwr_or_imm5 (void)
10619 if (inst
.operands
[2].isreg
)
10622 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10623 _("immediate operand requires iWMMXt2"));
10625 if (inst
.operands
[2].imm
== 0)
10627 switch ((inst
.instruction
>> 20) & 0xf)
10633 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10634 inst
.operands
[2].imm
= 16;
10635 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10641 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10642 inst
.operands
[2].imm
= 32;
10643 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10650 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10652 wrn
= (inst
.instruction
>> 16) & 0xf;
10653 inst
.instruction
&= 0xff0fff0f;
10654 inst
.instruction
|= wrn
;
10655 /* Bail out here; the instruction is now assembled. */
10660 /* Map 32 -> 0, etc. */
10661 inst
.operands
[2].imm
&= 0x1f;
10662 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10666 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10667 operations first, then control, shift, and load/store. */
10669 /* Insns like "foo X,Y,Z". */
10672 do_mav_triple (void)
10674 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10675 inst
.instruction
|= inst
.operands
[1].reg
;
10676 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10679 /* Insns like "foo W,X,Y,Z".
10680 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10685 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10686 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10687 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10688 inst
.instruction
|= inst
.operands
[3].reg
;
10691 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10693 do_mav_dspsc (void)
10695 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10698 /* Maverick shift immediate instructions.
10699 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10700 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10703 do_mav_shift (void)
10705 int imm
= inst
.operands
[2].imm
;
10707 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10708 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10710 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10711 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10712 Bit 4 should be 0. */
10713 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10715 inst
.instruction
|= imm
;
10718 /* XScale instructions. Also sorted arithmetic before move. */
10720 /* Xscale multiply-accumulate (argument parse)
10723 MIAxycc acc0,Rm,Rs. */
10728 inst
.instruction
|= inst
.operands
[1].reg
;
10729 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10732 /* Xscale move-accumulator-register (argument parse)
10734 MARcc acc0,RdLo,RdHi. */
10739 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10740 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10743 /* Xscale move-register-accumulator (argument parse)
10745 MRAcc RdLo,RdHi,acc0. */
10750 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10751 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10752 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10755 /* Encoding functions relevant only to Thumb. */
10757 /* inst.operands[i] is a shifted-register operand; encode
10758 it into inst.instruction in the format used by Thumb32. */
10761 encode_thumb32_shifted_operand (int i
)
10763 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10764 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10766 constraint (inst
.operands
[i
].immisreg
,
10767 _("shift by register not allowed in thumb mode"));
10768 inst
.instruction
|= inst
.operands
[i
].reg
;
10769 if (shift
== SHIFT_RRX
)
10770 inst
.instruction
|= SHIFT_ROR
<< 4;
10773 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10774 _("expression too complex"));
10776 constraint (value
> 32
10777 || (value
== 32 && (shift
== SHIFT_LSL
10778 || shift
== SHIFT_ROR
)),
10779 _("shift expression is too large"));
10783 else if (value
== 32)
10786 inst
.instruction
|= shift
<< 4;
10787 inst
.instruction
|= (value
& 0x1c) << 10;
10788 inst
.instruction
|= (value
& 0x03) << 6;
10793 /* inst.operands[i] was set up by parse_address. Encode it into a
10794 Thumb32 format load or store instruction. Reject forms that cannot
10795 be used with such instructions. If is_t is true, reject forms that
10796 cannot be used with a T instruction; if is_d is true, reject forms
10797 that cannot be used with a D instruction. If it is a store insn,
10798 reject PC in Rn. */
10801 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10803 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10805 constraint (!inst
.operands
[i
].isreg
,
10806 _("Instruction does not support =N addresses"));
10808 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10809 if (inst
.operands
[i
].immisreg
)
10811 constraint (is_pc
, BAD_PC_ADDRESSING
);
10812 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10813 constraint (inst
.operands
[i
].negative
,
10814 _("Thumb does not support negative register indexing"));
10815 constraint (inst
.operands
[i
].postind
,
10816 _("Thumb does not support register post-indexing"));
10817 constraint (inst
.operands
[i
].writeback
,
10818 _("Thumb does not support register indexing with writeback"));
10819 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10820 _("Thumb supports only LSL in shifted register indexing"));
10822 inst
.instruction
|= inst
.operands
[i
].imm
;
10823 if (inst
.operands
[i
].shifted
)
10825 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10826 _("expression too complex"));
10827 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10828 || inst
.relocs
[0].exp
.X_add_number
> 3,
10829 _("shift out of range"));
10830 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10832 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10834 else if (inst
.operands
[i
].preind
)
10836 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10837 constraint (is_t
&& inst
.operands
[i
].writeback
,
10838 _("cannot use writeback with this instruction"));
10839 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10840 BAD_PC_ADDRESSING
);
10844 inst
.instruction
|= 0x01000000;
10845 if (inst
.operands
[i
].writeback
)
10846 inst
.instruction
|= 0x00200000;
10850 inst
.instruction
|= 0x00000c00;
10851 if (inst
.operands
[i
].writeback
)
10852 inst
.instruction
|= 0x00000100;
10854 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10856 else if (inst
.operands
[i
].postind
)
10858 gas_assert (inst
.operands
[i
].writeback
);
10859 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10860 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10863 inst
.instruction
|= 0x00200000;
10865 inst
.instruction
|= 0x00000900;
10866 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10868 else /* unindexed - only for coprocessor */
10869 inst
.error
= _("instruction does not accept unindexed addressing");
10872 /* Table of Thumb instructions which exist in both 16- and 32-bit
10873 encodings (the latter only in post-V6T2 cores). The index is the
10874 value used in the insns table below. When there is more than one
10875 possible 16-bit encoding for the instruction, this table always
10877 Also contains several pseudo-instructions used during relaxation. */
10878 #define T16_32_TAB \
10879 X(_adc, 4140, eb400000), \
10880 X(_adcs, 4140, eb500000), \
10881 X(_add, 1c00, eb000000), \
10882 X(_adds, 1c00, eb100000), \
10883 X(_addi, 0000, f1000000), \
10884 X(_addis, 0000, f1100000), \
10885 X(_add_pc,000f, f20f0000), \
10886 X(_add_sp,000d, f10d0000), \
10887 X(_adr, 000f, f20f0000), \
10888 X(_and, 4000, ea000000), \
10889 X(_ands, 4000, ea100000), \
10890 X(_asr, 1000, fa40f000), \
10891 X(_asrs, 1000, fa50f000), \
10892 X(_b, e000, f000b000), \
10893 X(_bcond, d000, f0008000), \
10894 X(_bf, 0000, f040e001), \
10895 X(_bfcsel,0000, f000e001), \
10896 X(_bfx, 0000, f060e001), \
10897 X(_bfl, 0000, f000c001), \
10898 X(_bflx, 0000, f070e001), \
10899 X(_bic, 4380, ea200000), \
10900 X(_bics, 4380, ea300000), \
10901 X(_cmn, 42c0, eb100f00), \
10902 X(_cmp, 2800, ebb00f00), \
10903 X(_cpsie, b660, f3af8400), \
10904 X(_cpsid, b670, f3af8600), \
10905 X(_cpy, 4600, ea4f0000), \
10906 X(_dec_sp,80dd, f1ad0d00), \
10907 X(_dls, 0000, f040e001), \
10908 X(_eor, 4040, ea800000), \
10909 X(_eors, 4040, ea900000), \
10910 X(_inc_sp,00dd, f10d0d00), \
10911 X(_ldmia, c800, e8900000), \
10912 X(_ldr, 6800, f8500000), \
10913 X(_ldrb, 7800, f8100000), \
10914 X(_ldrh, 8800, f8300000), \
10915 X(_ldrsb, 5600, f9100000), \
10916 X(_ldrsh, 5e00, f9300000), \
10917 X(_ldr_pc,4800, f85f0000), \
10918 X(_ldr_pc2,4800, f85f0000), \
10919 X(_ldr_sp,9800, f85d0000), \
10920 X(_le, 0000, f00fc001), \
10921 X(_lsl, 0000, fa00f000), \
10922 X(_lsls, 0000, fa10f000), \
10923 X(_lsr, 0800, fa20f000), \
10924 X(_lsrs, 0800, fa30f000), \
10925 X(_mov, 2000, ea4f0000), \
10926 X(_movs, 2000, ea5f0000), \
10927 X(_mul, 4340, fb00f000), \
10928 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10929 X(_mvn, 43c0, ea6f0000), \
10930 X(_mvns, 43c0, ea7f0000), \
10931 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10932 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10933 X(_orr, 4300, ea400000), \
10934 X(_orrs, 4300, ea500000), \
10935 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10936 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10937 X(_rev, ba00, fa90f080), \
10938 X(_rev16, ba40, fa90f090), \
10939 X(_revsh, bac0, fa90f0b0), \
10940 X(_ror, 41c0, fa60f000), \
10941 X(_rors, 41c0, fa70f000), \
10942 X(_sbc, 4180, eb600000), \
10943 X(_sbcs, 4180, eb700000), \
10944 X(_stmia, c000, e8800000), \
10945 X(_str, 6000, f8400000), \
10946 X(_strb, 7000, f8000000), \
10947 X(_strh, 8000, f8200000), \
10948 X(_str_sp,9000, f84d0000), \
10949 X(_sub, 1e00, eba00000), \
10950 X(_subs, 1e00, ebb00000), \
10951 X(_subi, 8000, f1a00000), \
10952 X(_subis, 8000, f1b00000), \
10953 X(_sxtb, b240, fa4ff080), \
10954 X(_sxth, b200, fa0ff080), \
10955 X(_tst, 4200, ea100f00), \
10956 X(_uxtb, b2c0, fa5ff080), \
10957 X(_uxth, b280, fa1ff080), \
10958 X(_nop, bf00, f3af8000), \
10959 X(_yield, bf10, f3af8001), \
10960 X(_wfe, bf20, f3af8002), \
10961 X(_wfi, bf30, f3af8003), \
10962 X(_wls, 0000, f040c001), \
10963 X(_sev, bf40, f3af8004), \
10964 X(_sevl, bf50, f3af8005), \
10965 X(_udf, de00, f7f0a000)
10967 /* To catch errors in encoding functions, the codes are all offset by
10968 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10969 as 16-bit instructions. */
10970 #define X(a,b,c) T_MNEM##a
10971 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10974 #define X(a,b,c) 0x##b
10975 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10976 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10979 #define X(a,b,c) 0x##c
10980 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10981 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10982 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10986 /* Thumb instruction encoders, in alphabetical order. */
10988 /* ADDW or SUBW. */
10991 do_t_add_sub_w (void)
10995 Rd
= inst
.operands
[0].reg
;
10996 Rn
= inst
.operands
[1].reg
;
10998 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10999 is the SP-{plus,minus}-immediate form of the instruction. */
11001 constraint (Rd
== REG_PC
, BAD_PC
);
11003 reject_bad_reg (Rd
);
11005 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
11006 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11009 /* Parse an add or subtract instruction. We get here with inst.instruction
11010 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
11013 do_t_add_sub (void)
11017 Rd
= inst
.operands
[0].reg
;
11018 Rs
= (inst
.operands
[1].present
11019 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11020 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11023 set_pred_insn_type_last ();
11025 if (unified_syntax
)
11028 bfd_boolean narrow
;
11031 flags
= (inst
.instruction
== T_MNEM_adds
11032 || inst
.instruction
== T_MNEM_subs
);
11034 narrow
= !in_pred_block ();
11036 narrow
= in_pred_block ();
11037 if (!inst
.operands
[2].isreg
)
11041 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11042 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11044 add
= (inst
.instruction
== T_MNEM_add
11045 || inst
.instruction
== T_MNEM_adds
);
11047 if (inst
.size_req
!= 4)
11049 /* Attempt to use a narrow opcode, with relaxation if
11051 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
11052 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
11053 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
11054 opcode
= T_MNEM_add_sp
;
11055 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
11056 opcode
= T_MNEM_add_pc
;
11057 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
11060 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
11062 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
11066 inst
.instruction
= THUMB_OP16(opcode
);
11067 inst
.instruction
|= (Rd
<< 4) | Rs
;
11068 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11069 || (inst
.relocs
[0].type
11070 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
11072 if (inst
.size_req
== 2)
11073 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11075 inst
.relax
= opcode
;
11079 constraint (inst
.size_req
== 2, BAD_HIREG
);
11081 if (inst
.size_req
== 4
11082 || (inst
.size_req
!= 2 && !opcode
))
11084 constraint ((inst
.relocs
[0].type
11085 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
11086 && (inst
.relocs
[0].type
11087 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
11088 THUMB1_RELOC_ONLY
);
11091 constraint (add
, BAD_PC
);
11092 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
11093 _("only SUBS PC, LR, #const allowed"));
11094 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
11095 _("expression too complex"));
11096 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
11097 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
11098 _("immediate value out of range"));
11099 inst
.instruction
= T2_SUBS_PC_LR
11100 | inst
.relocs
[0].exp
.X_add_number
;
11101 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
11104 else if (Rs
== REG_PC
)
11106 /* Always use addw/subw. */
11107 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
11108 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
11112 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11113 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
11116 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11118 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
11120 inst
.instruction
|= Rd
<< 8;
11121 inst
.instruction
|= Rs
<< 16;
11126 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
11127 unsigned int shift
= inst
.operands
[2].shift_kind
;
11129 Rn
= inst
.operands
[2].reg
;
11130 /* See if we can do this with a 16-bit instruction. */
11131 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
11133 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11138 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
11139 || inst
.instruction
== T_MNEM_add
)
11141 : T_OPCODE_SUB_R3
);
11142 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11146 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
11148 /* Thumb-1 cores (except v6-M) require at least one high
11149 register in a narrow non flag setting add. */
11150 if (Rd
> 7 || Rn
> 7
11151 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
11152 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
11159 inst
.instruction
= T_OPCODE_ADD_HI
;
11160 inst
.instruction
|= (Rd
& 8) << 4;
11161 inst
.instruction
|= (Rd
& 7);
11162 inst
.instruction
|= Rn
<< 3;
11168 constraint (Rd
== REG_PC
, BAD_PC
);
11169 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
11170 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
11171 constraint (Rs
== REG_PC
, BAD_PC
);
11172 reject_bad_reg (Rn
);
11174 /* If we get here, it can't be done in 16 bits. */
11175 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
11176 _("shift must be constant"));
11177 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11178 inst
.instruction
|= Rd
<< 8;
11179 inst
.instruction
|= Rs
<< 16;
11180 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
11181 _("shift value over 3 not allowed in thumb mode"));
11182 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
11183 _("only LSL shift allowed in thumb mode"));
11184 encode_thumb32_shifted_operand (2);
11189 constraint (inst
.instruction
== T_MNEM_adds
11190 || inst
.instruction
== T_MNEM_subs
,
11193 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
11195 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
11196 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
11199 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11200 ? 0x0000 : 0x8000);
11201 inst
.instruction
|= (Rd
<< 4) | Rs
;
11202 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11206 Rn
= inst
.operands
[2].reg
;
11207 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
11209 /* We now have Rd, Rs, and Rn set to registers. */
11210 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
11212 /* Can't do this for SUB. */
11213 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
11214 inst
.instruction
= T_OPCODE_ADD_HI
;
11215 inst
.instruction
|= (Rd
& 8) << 4;
11216 inst
.instruction
|= (Rd
& 7);
11218 inst
.instruction
|= Rn
<< 3;
11220 inst
.instruction
|= Rs
<< 3;
11222 constraint (1, _("dest must overlap one source register"));
11226 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11227 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11228 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11238 Rd
= inst
.operands
[0].reg
;
11239 reject_bad_reg (Rd
);
11241 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11243 /* Defer to section relaxation. */
11244 inst
.relax
= inst
.instruction
;
11245 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11246 inst
.instruction
|= Rd
<< 4;
11248 else if (unified_syntax
&& inst
.size_req
!= 2)
11250 /* Generate a 32-bit opcode. */
11251 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11252 inst
.instruction
|= Rd
<< 8;
11253 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11254 inst
.relocs
[0].pc_rel
= 1;
11258 /* Generate a 16-bit opcode. */
11259 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11260 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11261 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11262 inst
.relocs
[0].pc_rel
= 1;
11263 inst
.instruction
|= Rd
<< 4;
11266 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11267 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11268 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11269 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11270 inst
.relocs
[0].exp
.X_add_number
+= 1;
11273 /* Arithmetic instructions for which there is just one 16-bit
11274 instruction encoding, and it allows only two low registers.
11275 For maximal compatibility with ARM syntax, we allow three register
11276 operands even when Thumb-32 instructions are not available, as long
11277 as the first two are identical. For instance, both "sbc r0,r1" and
11278 "sbc r0,r0,r1" are allowed. */
11284 Rd
= inst
.operands
[0].reg
;
11285 Rs
= (inst
.operands
[1].present
11286 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11287 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11288 Rn
= inst
.operands
[2].reg
;
11290 reject_bad_reg (Rd
);
11291 reject_bad_reg (Rs
);
11292 if (inst
.operands
[2].isreg
)
11293 reject_bad_reg (Rn
);
11295 if (unified_syntax
)
11297 if (!inst
.operands
[2].isreg
)
11299 /* For an immediate, we always generate a 32-bit opcode;
11300 section relaxation will shrink it later if possible. */
11301 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11302 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11303 inst
.instruction
|= Rd
<< 8;
11304 inst
.instruction
|= Rs
<< 16;
11305 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11309 bfd_boolean narrow
;
11311 /* See if we can do this with a 16-bit instruction. */
11312 if (THUMB_SETS_FLAGS (inst
.instruction
))
11313 narrow
= !in_pred_block ();
11315 narrow
= in_pred_block ();
11317 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11319 if (inst
.operands
[2].shifted
)
11321 if (inst
.size_req
== 4)
11327 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11328 inst
.instruction
|= Rd
;
11329 inst
.instruction
|= Rn
<< 3;
11333 /* If we get here, it can't be done in 16 bits. */
11334 constraint (inst
.operands
[2].shifted
11335 && inst
.operands
[2].immisreg
,
11336 _("shift must be constant"));
11337 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11338 inst
.instruction
|= Rd
<< 8;
11339 inst
.instruction
|= Rs
<< 16;
11340 encode_thumb32_shifted_operand (2);
11345 /* On its face this is a lie - the instruction does set the
11346 flags. However, the only supported mnemonic in this mode
11347 says it doesn't. */
11348 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11350 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11351 _("unshifted register required"));
11352 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11353 constraint (Rd
!= Rs
,
11354 _("dest and source1 must be the same register"));
11356 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11357 inst
.instruction
|= Rd
;
11358 inst
.instruction
|= Rn
<< 3;
11362 /* Similarly, but for instructions where the arithmetic operation is
11363 commutative, so we can allow either of them to be different from
11364 the destination operand in a 16-bit instruction. For instance, all
11365 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11372 Rd
= inst
.operands
[0].reg
;
11373 Rs
= (inst
.operands
[1].present
11374 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11375 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11376 Rn
= inst
.operands
[2].reg
;
11378 reject_bad_reg (Rd
);
11379 reject_bad_reg (Rs
);
11380 if (inst
.operands
[2].isreg
)
11381 reject_bad_reg (Rn
);
11383 if (unified_syntax
)
11385 if (!inst
.operands
[2].isreg
)
11387 /* For an immediate, we always generate a 32-bit opcode;
11388 section relaxation will shrink it later if possible. */
11389 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11390 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11391 inst
.instruction
|= Rd
<< 8;
11392 inst
.instruction
|= Rs
<< 16;
11393 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11397 bfd_boolean narrow
;
11399 /* See if we can do this with a 16-bit instruction. */
11400 if (THUMB_SETS_FLAGS (inst
.instruction
))
11401 narrow
= !in_pred_block ();
11403 narrow
= in_pred_block ();
11405 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11407 if (inst
.operands
[2].shifted
)
11409 if (inst
.size_req
== 4)
11416 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11417 inst
.instruction
|= Rd
;
11418 inst
.instruction
|= Rn
<< 3;
11423 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11424 inst
.instruction
|= Rd
;
11425 inst
.instruction
|= Rs
<< 3;
11430 /* If we get here, it can't be done in 16 bits. */
11431 constraint (inst
.operands
[2].shifted
11432 && inst
.operands
[2].immisreg
,
11433 _("shift must be constant"));
11434 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11435 inst
.instruction
|= Rd
<< 8;
11436 inst
.instruction
|= Rs
<< 16;
11437 encode_thumb32_shifted_operand (2);
11442 /* On its face this is a lie - the instruction does set the
11443 flags. However, the only supported mnemonic in this mode
11444 says it doesn't. */
11445 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11447 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11448 _("unshifted register required"));
11449 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11451 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11452 inst
.instruction
|= Rd
;
11455 inst
.instruction
|= Rn
<< 3;
11457 inst
.instruction
|= Rs
<< 3;
11459 constraint (1, _("dest must overlap one source register"));
11467 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11468 constraint (msb
> 32, _("bit-field extends past end of register"));
11469 /* The instruction encoding stores the LSB and MSB,
11470 not the LSB and width. */
11471 Rd
= inst
.operands
[0].reg
;
11472 reject_bad_reg (Rd
);
11473 inst
.instruction
|= Rd
<< 8;
11474 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11475 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11476 inst
.instruction
|= msb
- 1;
11485 Rd
= inst
.operands
[0].reg
;
11486 reject_bad_reg (Rd
);
11488 /* #0 in second position is alternative syntax for bfc, which is
11489 the same instruction but with REG_PC in the Rm field. */
11490 if (!inst
.operands
[1].isreg
)
11494 Rn
= inst
.operands
[1].reg
;
11495 reject_bad_reg (Rn
);
11498 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11499 constraint (msb
> 32, _("bit-field extends past end of register"));
11500 /* The instruction encoding stores the LSB and MSB,
11501 not the LSB and width. */
11502 inst
.instruction
|= Rd
<< 8;
11503 inst
.instruction
|= Rn
<< 16;
11504 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11505 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11506 inst
.instruction
|= msb
- 1;
11514 Rd
= inst
.operands
[0].reg
;
11515 Rn
= inst
.operands
[1].reg
;
11517 reject_bad_reg (Rd
);
11518 reject_bad_reg (Rn
);
11520 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11521 _("bit-field extends past end of register"));
11522 inst
.instruction
|= Rd
<< 8;
11523 inst
.instruction
|= Rn
<< 16;
11524 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11525 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11526 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11529 /* ARM V5 Thumb BLX (argument parse)
11530 BLX <target_addr> which is BLX(1)
11531 BLX <Rm> which is BLX(2)
11532 Unfortunately, there are two different opcodes for this mnemonic.
11533 So, the insns[].value is not used, and the code here zaps values
11534 into inst.instruction.
11536 ??? How to take advantage of the additional two bits of displacement
11537 available in Thumb32 mode? Need new relocation? */
11542 set_pred_insn_type_last ();
11544 if (inst
.operands
[0].isreg
)
11546 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11547 /* We have a register, so this is BLX(2). */
11548 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11552 /* No register. This must be BLX(1). */
11553 inst
.instruction
= 0xf000e800;
11554 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11563 bfd_reloc_code_real_type reloc
;
11566 set_pred_insn_type (IF_INSIDE_IT_LAST_INSN
);
11568 if (in_pred_block ())
11570 /* Conditional branches inside IT blocks are encoded as unconditional
11572 cond
= COND_ALWAYS
;
11577 if (cond
!= COND_ALWAYS
)
11578 opcode
= T_MNEM_bcond
;
11580 opcode
= inst
.instruction
;
11583 && (inst
.size_req
== 4
11584 || (inst
.size_req
!= 2
11585 && (inst
.operands
[0].hasreloc
11586 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11588 inst
.instruction
= THUMB_OP32(opcode
);
11589 if (cond
== COND_ALWAYS
)
11590 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11593 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11594 _("selected architecture does not support "
11595 "wide conditional branch instruction"));
11597 gas_assert (cond
!= 0xF);
11598 inst
.instruction
|= cond
<< 22;
11599 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11604 inst
.instruction
= THUMB_OP16(opcode
);
11605 if (cond
== COND_ALWAYS
)
11606 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11609 inst
.instruction
|= cond
<< 8;
11610 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11612 /* Allow section relaxation. */
11613 if (unified_syntax
&& inst
.size_req
!= 2)
11614 inst
.relax
= opcode
;
11616 inst
.relocs
[0].type
= reloc
;
11617 inst
.relocs
[0].pc_rel
= 1;
11620 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11621 between the two is the maximum immediate allowed - which is passed in
11624 do_t_bkpt_hlt1 (int range
)
11626 constraint (inst
.cond
!= COND_ALWAYS
,
11627 _("instruction is always unconditional"));
11628 if (inst
.operands
[0].present
)
11630 constraint (inst
.operands
[0].imm
> range
,
11631 _("immediate value out of range"));
11632 inst
.instruction
|= inst
.operands
[0].imm
;
11635 set_pred_insn_type (NEUTRAL_IT_INSN
);
11641 do_t_bkpt_hlt1 (63);
11647 do_t_bkpt_hlt1 (255);
11651 do_t_branch23 (void)
11653 set_pred_insn_type_last ();
11654 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11656 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11657 this file. We used to simply ignore the PLT reloc type here --
11658 the branch encoding is now needed to deal with TLSCALL relocs.
11659 So if we see a PLT reloc now, put it back to how it used to be to
11660 keep the preexisting behaviour. */
11661 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11662 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11664 #if defined(OBJ_COFF)
11665 /* If the destination of the branch is a defined symbol which does not have
11666 the THUMB_FUNC attribute, then we must be calling a function which has
11667 the (interfacearm) attribute. We look for the Thumb entry point to that
11668 function and change the branch to refer to that function instead. */
11669 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11670 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11671 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11672 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11673 inst
.relocs
[0].exp
.X_add_symbol
11674 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11681 set_pred_insn_type_last ();
11682 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11683 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11684 should cause the alignment to be checked once it is known. This is
11685 because BX PC only works if the instruction is word aligned. */
11693 set_pred_insn_type_last ();
11694 Rm
= inst
.operands
[0].reg
;
11695 reject_bad_reg (Rm
);
11696 inst
.instruction
|= Rm
<< 16;
11705 Rd
= inst
.operands
[0].reg
;
11706 Rm
= inst
.operands
[1].reg
;
11708 reject_bad_reg (Rd
);
11709 reject_bad_reg (Rm
);
11711 inst
.instruction
|= Rd
<< 8;
11712 inst
.instruction
|= Rm
<< 16;
11713 inst
.instruction
|= Rm
;
11719 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11725 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11726 inst
.instruction
|= inst
.operands
[0].imm
;
11732 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11734 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11735 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11737 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11738 inst
.instruction
= 0xf3af8000;
11739 inst
.instruction
|= imod
<< 9;
11740 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11741 if (inst
.operands
[1].present
)
11742 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11746 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11747 && (inst
.operands
[0].imm
& 4),
11748 _("selected processor does not support 'A' form "
11749 "of this instruction"));
11750 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11751 _("Thumb does not support the 2-argument "
11752 "form of this instruction"));
11753 inst
.instruction
|= inst
.operands
[0].imm
;
11757 /* THUMB CPY instruction (argument parse). */
11762 if (inst
.size_req
== 4)
11764 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11765 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11766 inst
.instruction
|= inst
.operands
[1].reg
;
11770 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11771 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11772 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11779 set_pred_insn_type (OUTSIDE_PRED_INSN
);
11780 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11781 inst
.instruction
|= inst
.operands
[0].reg
;
11782 inst
.relocs
[0].pc_rel
= 1;
11783 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11789 inst
.instruction
|= inst
.operands
[0].imm
;
11795 unsigned Rd
, Rn
, Rm
;
11797 Rd
= inst
.operands
[0].reg
;
11798 Rn
= (inst
.operands
[1].present
11799 ? inst
.operands
[1].reg
: Rd
);
11800 Rm
= inst
.operands
[2].reg
;
11802 reject_bad_reg (Rd
);
11803 reject_bad_reg (Rn
);
11804 reject_bad_reg (Rm
);
11806 inst
.instruction
|= Rd
<< 8;
11807 inst
.instruction
|= Rn
<< 16;
11808 inst
.instruction
|= Rm
;
11814 if (unified_syntax
&& inst
.size_req
== 4)
11815 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11817 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11823 unsigned int cond
= inst
.operands
[0].imm
;
11825 set_pred_insn_type (IT_INSN
);
11826 now_pred
.mask
= (inst
.instruction
& 0xf) | 0x10;
11827 now_pred
.cc
= cond
;
11828 now_pred
.warn_deprecated
= FALSE
;
11829 now_pred
.type
= SCALAR_PRED
;
11831 /* If the condition is a negative condition, invert the mask. */
11832 if ((cond
& 0x1) == 0x0)
11834 unsigned int mask
= inst
.instruction
& 0x000f;
11836 if ((mask
& 0x7) == 0)
11838 /* No conversion needed. */
11839 now_pred
.block_length
= 1;
11841 else if ((mask
& 0x3) == 0)
11844 now_pred
.block_length
= 2;
11846 else if ((mask
& 0x1) == 0)
11849 now_pred
.block_length
= 3;
11854 now_pred
.block_length
= 4;
11857 inst
.instruction
&= 0xfff0;
11858 inst
.instruction
|= mask
;
11861 inst
.instruction
|= cond
<< 4;
11867 /* We are dealing with a vector predicated block. */
11868 set_pred_insn_type (VPT_INSN
);
11870 now_pred
.mask
= ((inst
.instruction
& 0x00400000) >> 19)
11871 | ((inst
.instruction
& 0xe000) >> 13);
11872 now_pred
.warn_deprecated
= FALSE
;
11873 now_pred
.type
= VECTOR_PRED
;
11876 /* Helper function used for both push/pop and ldm/stm. */
11878 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11879 bfd_boolean writeback
)
11881 bfd_boolean load
, store
;
11883 gas_assert (base
!= -1 || !do_io
);
11884 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11885 store
= do_io
&& !load
;
11887 if (mask
& (1 << 13))
11888 inst
.error
= _("SP not allowed in register list");
11890 if (do_io
&& (mask
& (1 << base
)) != 0
11892 inst
.error
= _("having the base register in the register list when "
11893 "using write back is UNPREDICTABLE");
11897 if (mask
& (1 << 15))
11899 if (mask
& (1 << 14))
11900 inst
.error
= _("LR and PC should not both be in register list");
11902 set_pred_insn_type_last ();
11907 if (mask
& (1 << 15))
11908 inst
.error
= _("PC not allowed in register list");
11911 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11913 /* Single register transfers implemented as str/ldr. */
11916 if (inst
.instruction
& (1 << 23))
11917 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11919 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11923 if (inst
.instruction
& (1 << 23))
11924 inst
.instruction
= 0x00800000; /* ia -> [base] */
11926 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11929 inst
.instruction
|= 0xf8400000;
11931 inst
.instruction
|= 0x00100000;
11933 mask
= ffs (mask
) - 1;
11936 else if (writeback
)
11937 inst
.instruction
|= WRITE_BACK
;
11939 inst
.instruction
|= mask
;
11941 inst
.instruction
|= base
<< 16;
11947 /* This really doesn't seem worth it. */
11948 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11949 _("expression too complex"));
11950 constraint (inst
.operands
[1].writeback
,
11951 _("Thumb load/store multiple does not support {reglist}^"));
11953 if (unified_syntax
)
11955 bfd_boolean narrow
;
11959 /* See if we can use a 16-bit instruction. */
11960 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11961 && inst
.size_req
!= 4
11962 && !(inst
.operands
[1].imm
& ~0xff))
11964 mask
= 1 << inst
.operands
[0].reg
;
11966 if (inst
.operands
[0].reg
<= 7)
11968 if (inst
.instruction
== T_MNEM_stmia
11969 ? inst
.operands
[0].writeback
11970 : (inst
.operands
[0].writeback
11971 == !(inst
.operands
[1].imm
& mask
)))
11973 if (inst
.instruction
== T_MNEM_stmia
11974 && (inst
.operands
[1].imm
& mask
)
11975 && (inst
.operands
[1].imm
& (mask
- 1)))
11976 as_warn (_("value stored for r%d is UNKNOWN"),
11977 inst
.operands
[0].reg
);
11979 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11980 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11981 inst
.instruction
|= inst
.operands
[1].imm
;
11984 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11986 /* This means 1 register in reg list one of 3 situations:
11987 1. Instruction is stmia, but without writeback.
11988 2. lmdia without writeback, but with Rn not in
11990 3. ldmia with writeback, but with Rn in reglist.
11991 Case 3 is UNPREDICTABLE behaviour, so we handle
11992 case 1 and 2 which can be converted into a 16-bit
11993 str or ldr. The SP cases are handled below. */
11994 unsigned long opcode
;
11995 /* First, record an error for Case 3. */
11996 if (inst
.operands
[1].imm
& mask
11997 && inst
.operands
[0].writeback
)
11999 _("having the base register in the register list when "
12000 "using write back is UNPREDICTABLE");
12002 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
12004 inst
.instruction
= THUMB_OP16 (opcode
);
12005 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
12006 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
12010 else if (inst
.operands
[0] .reg
== REG_SP
)
12012 if (inst
.operands
[0].writeback
)
12015 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
12016 ? T_MNEM_push
: T_MNEM_pop
);
12017 inst
.instruction
|= inst
.operands
[1].imm
;
12020 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
12023 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
12024 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
12025 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
12033 if (inst
.instruction
< 0xffff)
12034 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12036 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
12037 inst
.operands
[1].imm
,
12038 inst
.operands
[0].writeback
);
12043 constraint (inst
.operands
[0].reg
> 7
12044 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
12045 constraint (inst
.instruction
!= T_MNEM_ldmia
12046 && inst
.instruction
!= T_MNEM_stmia
,
12047 _("Thumb-2 instruction only valid in unified syntax"));
12048 if (inst
.instruction
== T_MNEM_stmia
)
12050 if (!inst
.operands
[0].writeback
)
12051 as_warn (_("this instruction will write back the base register"));
12052 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
12053 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
12054 as_warn (_("value stored for r%d is UNKNOWN"),
12055 inst
.operands
[0].reg
);
12059 if (!inst
.operands
[0].writeback
12060 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
12061 as_warn (_("this instruction will write back the base register"));
12062 else if (inst
.operands
[0].writeback
12063 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
12064 as_warn (_("this instruction will not write back the base register"));
12067 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12068 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12069 inst
.instruction
|= inst
.operands
[1].imm
;
12076 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
12077 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
12078 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
12079 || inst
.operands
[1].negative
,
12082 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
12084 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12085 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12086 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
12092 if (!inst
.operands
[1].present
)
12094 constraint (inst
.operands
[0].reg
== REG_LR
,
12095 _("r14 not allowed as first register "
12096 "when second register is omitted"));
12097 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12099 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12102 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12103 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12104 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
12110 unsigned long opcode
;
12113 if (inst
.operands
[0].isreg
12114 && !inst
.operands
[0].preind
12115 && inst
.operands
[0].reg
== REG_PC
)
12116 set_pred_insn_type_last ();
12118 opcode
= inst
.instruction
;
12119 if (unified_syntax
)
12121 if (!inst
.operands
[1].isreg
)
12123 if (opcode
<= 0xffff)
12124 inst
.instruction
= THUMB_OP32 (opcode
);
12125 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12128 if (inst
.operands
[1].isreg
12129 && !inst
.operands
[1].writeback
12130 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
12131 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
12132 && opcode
<= 0xffff
12133 && inst
.size_req
!= 4)
12135 /* Insn may have a 16-bit form. */
12136 Rn
= inst
.operands
[1].reg
;
12137 if (inst
.operands
[1].immisreg
)
12139 inst
.instruction
= THUMB_OP16 (opcode
);
12141 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
12143 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
12144 reject_bad_reg (inst
.operands
[1].imm
);
12146 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
12147 && opcode
!= T_MNEM_ldrsb
)
12148 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
12149 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
12156 if (inst
.relocs
[0].pc_rel
)
12157 opcode
= T_MNEM_ldr_pc2
;
12159 opcode
= T_MNEM_ldr_pc
;
12163 if (opcode
== T_MNEM_ldr
)
12164 opcode
= T_MNEM_ldr_sp
;
12166 opcode
= T_MNEM_str_sp
;
12168 inst
.instruction
= inst
.operands
[0].reg
<< 8;
12172 inst
.instruction
= inst
.operands
[0].reg
;
12173 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12175 inst
.instruction
|= THUMB_OP16 (opcode
);
12176 if (inst
.size_req
== 2)
12177 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12179 inst
.relax
= opcode
;
12183 /* Definitely a 32-bit variant. */
12185 /* Warning for Erratum 752419. */
12186 if (opcode
== T_MNEM_ldr
12187 && inst
.operands
[0].reg
== REG_SP
12188 && inst
.operands
[1].writeback
== 1
12189 && !inst
.operands
[1].immisreg
)
12191 if (no_cpu_selected ()
12192 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
12193 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
12194 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
12195 as_warn (_("This instruction may be unpredictable "
12196 "if executed on M-profile cores "
12197 "with interrupts enabled."));
12200 /* Do some validations regarding addressing modes. */
12201 if (inst
.operands
[1].immisreg
)
12202 reject_bad_reg (inst
.operands
[1].imm
);
12204 constraint (inst
.operands
[1].writeback
== 1
12205 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
12208 inst
.instruction
= THUMB_OP32 (opcode
);
12209 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12210 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12211 check_ldr_r15_aligned ();
12215 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
12217 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
12219 /* Only [Rn,Rm] is acceptable. */
12220 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
12221 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
12222 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
12223 || inst
.operands
[1].negative
,
12224 _("Thumb does not support this addressing mode"));
12225 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12229 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12230 if (!inst
.operands
[1].isreg
)
12231 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12234 constraint (!inst
.operands
[1].preind
12235 || inst
.operands
[1].shifted
12236 || inst
.operands
[1].writeback
,
12237 _("Thumb does not support this addressing mode"));
12238 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12240 constraint (inst
.instruction
& 0x0600,
12241 _("byte or halfword not valid for base register"));
12242 constraint (inst
.operands
[1].reg
== REG_PC
12243 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12244 _("r15 based store not allowed"));
12245 constraint (inst
.operands
[1].immisreg
,
12246 _("invalid base register for register offset"));
12248 if (inst
.operands
[1].reg
== REG_PC
)
12249 inst
.instruction
= T_OPCODE_LDR_PC
;
12250 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12251 inst
.instruction
= T_OPCODE_LDR_SP
;
12253 inst
.instruction
= T_OPCODE_STR_SP
;
12255 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12256 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12260 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12261 if (!inst
.operands
[1].immisreg
)
12263 /* Immediate offset. */
12264 inst
.instruction
|= inst
.operands
[0].reg
;
12265 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12266 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12270 /* Register offset. */
12271 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12272 constraint (inst
.operands
[1].negative
,
12273 _("Thumb does not support this addressing mode"));
12276 switch (inst
.instruction
)
12278 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12279 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12280 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12281 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12282 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12283 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12284 case 0x5600 /* ldrsb */:
12285 case 0x5e00 /* ldrsh */: break;
12289 inst
.instruction
|= inst
.operands
[0].reg
;
12290 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12291 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12297 if (!inst
.operands
[1].present
)
12299 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12300 constraint (inst
.operands
[0].reg
== REG_LR
,
12301 _("r14 not allowed here"));
12302 constraint (inst
.operands
[0].reg
== REG_R12
,
12303 _("r12 not allowed here"));
12306 if (inst
.operands
[2].writeback
12307 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12308 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12309 as_warn (_("base register written back, and overlaps "
12310 "one of transfer registers"));
12312 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12313 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12314 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
12320 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12321 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
12327 unsigned Rd
, Rn
, Rm
, Ra
;
12329 Rd
= inst
.operands
[0].reg
;
12330 Rn
= inst
.operands
[1].reg
;
12331 Rm
= inst
.operands
[2].reg
;
12332 Ra
= inst
.operands
[3].reg
;
12334 reject_bad_reg (Rd
);
12335 reject_bad_reg (Rn
);
12336 reject_bad_reg (Rm
);
12337 reject_bad_reg (Ra
);
12339 inst
.instruction
|= Rd
<< 8;
12340 inst
.instruction
|= Rn
<< 16;
12341 inst
.instruction
|= Rm
;
12342 inst
.instruction
|= Ra
<< 12;
12348 unsigned RdLo
, RdHi
, Rn
, Rm
;
12350 RdLo
= inst
.operands
[0].reg
;
12351 RdHi
= inst
.operands
[1].reg
;
12352 Rn
= inst
.operands
[2].reg
;
12353 Rm
= inst
.operands
[3].reg
;
12355 reject_bad_reg (RdLo
);
12356 reject_bad_reg (RdHi
);
12357 reject_bad_reg (Rn
);
12358 reject_bad_reg (Rm
);
12360 inst
.instruction
|= RdLo
<< 12;
12361 inst
.instruction
|= RdHi
<< 8;
12362 inst
.instruction
|= Rn
<< 16;
12363 inst
.instruction
|= Rm
;
12367 do_t_mov_cmp (void)
12371 Rn
= inst
.operands
[0].reg
;
12372 Rm
= inst
.operands
[1].reg
;
12375 set_pred_insn_type_last ();
12377 if (unified_syntax
)
12379 int r0off
= (inst
.instruction
== T_MNEM_mov
12380 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12381 unsigned long opcode
;
12382 bfd_boolean narrow
;
12383 bfd_boolean low_regs
;
12385 low_regs
= (Rn
<= 7 && Rm
<= 7);
12386 opcode
= inst
.instruction
;
12387 if (in_pred_block ())
12388 narrow
= opcode
!= T_MNEM_movs
;
12390 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12391 if (inst
.size_req
== 4
12392 || inst
.operands
[1].shifted
)
12395 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12396 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12397 && !inst
.operands
[1].shifted
12401 inst
.instruction
= T2_SUBS_PC_LR
;
12405 if (opcode
== T_MNEM_cmp
)
12407 constraint (Rn
== REG_PC
, BAD_PC
);
12410 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12412 warn_deprecated_sp (Rm
);
12413 /* R15 was documented as a valid choice for Rm in ARMv6,
12414 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12415 tools reject R15, so we do too. */
12416 constraint (Rm
== REG_PC
, BAD_PC
);
12419 reject_bad_reg (Rm
);
12421 else if (opcode
== T_MNEM_mov
12422 || opcode
== T_MNEM_movs
)
12424 if (inst
.operands
[1].isreg
)
12426 if (opcode
== T_MNEM_movs
)
12428 reject_bad_reg (Rn
);
12429 reject_bad_reg (Rm
);
12433 /* This is mov.n. */
12434 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12435 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12437 as_tsktsk (_("Use of r%u as a source register is "
12438 "deprecated when r%u is the destination "
12439 "register."), Rm
, Rn
);
12444 /* This is mov.w. */
12445 constraint (Rn
== REG_PC
, BAD_PC
);
12446 constraint (Rm
== REG_PC
, BAD_PC
);
12447 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12448 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12452 reject_bad_reg (Rn
);
12455 if (!inst
.operands
[1].isreg
)
12457 /* Immediate operand. */
12458 if (!in_pred_block () && opcode
== T_MNEM_mov
)
12460 if (low_regs
&& narrow
)
12462 inst
.instruction
= THUMB_OP16 (opcode
);
12463 inst
.instruction
|= Rn
<< 8;
12464 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12465 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12467 if (inst
.size_req
== 2)
12468 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12470 inst
.relax
= opcode
;
12475 constraint ((inst
.relocs
[0].type
12476 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12477 && (inst
.relocs
[0].type
12478 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12479 THUMB1_RELOC_ONLY
);
12481 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12482 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12483 inst
.instruction
|= Rn
<< r0off
;
12484 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12487 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12488 && (inst
.instruction
== T_MNEM_mov
12489 || inst
.instruction
== T_MNEM_movs
))
12491 /* Register shifts are encoded as separate shift instructions. */
12492 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12494 if (in_pred_block ())
12499 if (inst
.size_req
== 4)
12502 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12508 switch (inst
.operands
[1].shift_kind
)
12511 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12514 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12517 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12520 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12526 inst
.instruction
= opcode
;
12529 inst
.instruction
|= Rn
;
12530 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12535 inst
.instruction
|= CONDS_BIT
;
12537 inst
.instruction
|= Rn
<< 8;
12538 inst
.instruction
|= Rm
<< 16;
12539 inst
.instruction
|= inst
.operands
[1].imm
;
12544 /* Some mov with immediate shift have narrow variants.
12545 Register shifts are handled above. */
12546 if (low_regs
&& inst
.operands
[1].shifted
12547 && (inst
.instruction
== T_MNEM_mov
12548 || inst
.instruction
== T_MNEM_movs
))
12550 if (in_pred_block ())
12551 narrow
= (inst
.instruction
== T_MNEM_mov
);
12553 narrow
= (inst
.instruction
== T_MNEM_movs
);
12558 switch (inst
.operands
[1].shift_kind
)
12560 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12561 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12562 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12563 default: narrow
= FALSE
; break;
12569 inst
.instruction
|= Rn
;
12570 inst
.instruction
|= Rm
<< 3;
12571 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12575 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12576 inst
.instruction
|= Rn
<< r0off
;
12577 encode_thumb32_shifted_operand (1);
12581 switch (inst
.instruction
)
12584 /* In v4t or v5t a move of two lowregs produces unpredictable
12585 results. Don't allow this. */
12588 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12589 "MOV Rd, Rs with two low registers is not "
12590 "permitted on this architecture");
12591 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12595 inst
.instruction
= T_OPCODE_MOV_HR
;
12596 inst
.instruction
|= (Rn
& 0x8) << 4;
12597 inst
.instruction
|= (Rn
& 0x7);
12598 inst
.instruction
|= Rm
<< 3;
12602 /* We know we have low registers at this point.
12603 Generate LSLS Rd, Rs, #0. */
12604 inst
.instruction
= T_OPCODE_LSL_I
;
12605 inst
.instruction
|= Rn
;
12606 inst
.instruction
|= Rm
<< 3;
12612 inst
.instruction
= T_OPCODE_CMP_LR
;
12613 inst
.instruction
|= Rn
;
12614 inst
.instruction
|= Rm
<< 3;
12618 inst
.instruction
= T_OPCODE_CMP_HR
;
12619 inst
.instruction
|= (Rn
& 0x8) << 4;
12620 inst
.instruction
|= (Rn
& 0x7);
12621 inst
.instruction
|= Rm
<< 3;
12628 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12630 /* PR 10443: Do not silently ignore shifted operands. */
12631 constraint (inst
.operands
[1].shifted
,
12632 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12634 if (inst
.operands
[1].isreg
)
12636 if (Rn
< 8 && Rm
< 8)
12638 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12639 since a MOV instruction produces unpredictable results. */
12640 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12641 inst
.instruction
= T_OPCODE_ADD_I3
;
12643 inst
.instruction
= T_OPCODE_CMP_LR
;
12645 inst
.instruction
|= Rn
;
12646 inst
.instruction
|= Rm
<< 3;
12650 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12651 inst
.instruction
= T_OPCODE_MOV_HR
;
12653 inst
.instruction
= T_OPCODE_CMP_HR
;
12659 constraint (Rn
> 7,
12660 _("only lo regs allowed with immediate"));
12661 inst
.instruction
|= Rn
<< 8;
12662 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12673 top
= (inst
.instruction
& 0x00800000) != 0;
12674 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12676 constraint (top
, _(":lower16: not allowed in this instruction"));
12677 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12679 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12681 constraint (!top
, _(":upper16: not allowed in this instruction"));
12682 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12685 Rd
= inst
.operands
[0].reg
;
12686 reject_bad_reg (Rd
);
12688 inst
.instruction
|= Rd
<< 8;
12689 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12691 imm
= inst
.relocs
[0].exp
.X_add_number
;
12692 inst
.instruction
|= (imm
& 0xf000) << 4;
12693 inst
.instruction
|= (imm
& 0x0800) << 15;
12694 inst
.instruction
|= (imm
& 0x0700) << 4;
12695 inst
.instruction
|= (imm
& 0x00ff);
12700 do_t_mvn_tst (void)
12704 Rn
= inst
.operands
[0].reg
;
12705 Rm
= inst
.operands
[1].reg
;
12707 if (inst
.instruction
== T_MNEM_cmp
12708 || inst
.instruction
== T_MNEM_cmn
)
12709 constraint (Rn
== REG_PC
, BAD_PC
);
12711 reject_bad_reg (Rn
);
12712 reject_bad_reg (Rm
);
12714 if (unified_syntax
)
12716 int r0off
= (inst
.instruction
== T_MNEM_mvn
12717 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12718 bfd_boolean narrow
;
12720 if (inst
.size_req
== 4
12721 || inst
.instruction
> 0xffff
12722 || inst
.operands
[1].shifted
12723 || Rn
> 7 || Rm
> 7)
12725 else if (inst
.instruction
== T_MNEM_cmn
12726 || inst
.instruction
== T_MNEM_tst
)
12728 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12729 narrow
= !in_pred_block ();
12731 narrow
= in_pred_block ();
12733 if (!inst
.operands
[1].isreg
)
12735 /* For an immediate, we always generate a 32-bit opcode;
12736 section relaxation will shrink it later if possible. */
12737 if (inst
.instruction
< 0xffff)
12738 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12739 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12740 inst
.instruction
|= Rn
<< r0off
;
12741 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12745 /* See if we can do this with a 16-bit instruction. */
12748 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12749 inst
.instruction
|= Rn
;
12750 inst
.instruction
|= Rm
<< 3;
12754 constraint (inst
.operands
[1].shifted
12755 && inst
.operands
[1].immisreg
,
12756 _("shift must be constant"));
12757 if (inst
.instruction
< 0xffff)
12758 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12759 inst
.instruction
|= Rn
<< r0off
;
12760 encode_thumb32_shifted_operand (1);
12766 constraint (inst
.instruction
> 0xffff
12767 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12768 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12769 _("unshifted register required"));
12770 constraint (Rn
> 7 || Rm
> 7,
12773 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12774 inst
.instruction
|= Rn
;
12775 inst
.instruction
|= Rm
<< 3;
12784 if (do_vfp_nsyn_mrs () == SUCCESS
)
12787 Rd
= inst
.operands
[0].reg
;
12788 reject_bad_reg (Rd
);
12789 inst
.instruction
|= Rd
<< 8;
12791 if (inst
.operands
[1].isreg
)
12793 unsigned br
= inst
.operands
[1].reg
;
12794 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12795 as_bad (_("bad register for mrs"));
12797 inst
.instruction
|= br
& (0xf << 16);
12798 inst
.instruction
|= (br
& 0x300) >> 4;
12799 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12803 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12805 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12807 /* PR gas/12698: The constraint is only applied for m_profile.
12808 If the user has specified -march=all, we want to ignore it as
12809 we are building for any CPU type, including non-m variants. */
12810 bfd_boolean m_profile
=
12811 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12812 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12813 "not support requested special purpose register"));
12816 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12818 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12819 _("'APSR', 'CPSR' or 'SPSR' expected"));
12821 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12822 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12823 inst
.instruction
|= 0xf0000;
12833 if (do_vfp_nsyn_msr () == SUCCESS
)
12836 constraint (!inst
.operands
[1].isreg
,
12837 _("Thumb encoding does not support an immediate here"));
12839 if (inst
.operands
[0].isreg
)
12840 flags
= (int)(inst
.operands
[0].reg
);
12842 flags
= inst
.operands
[0].imm
;
12844 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12846 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12848 /* PR gas/12698: The constraint is only applied for m_profile.
12849 If the user has specified -march=all, we want to ignore it as
12850 we are building for any CPU type, including non-m variants. */
12851 bfd_boolean m_profile
=
12852 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12853 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12854 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12855 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12856 && bits
!= PSR_f
)) && m_profile
,
12857 _("selected processor does not support requested special "
12858 "purpose register"));
12861 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12862 "requested special purpose register"));
12864 Rn
= inst
.operands
[1].reg
;
12865 reject_bad_reg (Rn
);
12867 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12868 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12869 inst
.instruction
|= (flags
& 0x300) >> 4;
12870 inst
.instruction
|= (flags
& 0xff);
12871 inst
.instruction
|= Rn
<< 16;
12877 bfd_boolean narrow
;
12878 unsigned Rd
, Rn
, Rm
;
12880 if (!inst
.operands
[2].present
)
12881 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12883 Rd
= inst
.operands
[0].reg
;
12884 Rn
= inst
.operands
[1].reg
;
12885 Rm
= inst
.operands
[2].reg
;
12887 if (unified_syntax
)
12889 if (inst
.size_req
== 4
12895 else if (inst
.instruction
== T_MNEM_muls
)
12896 narrow
= !in_pred_block ();
12898 narrow
= in_pred_block ();
12902 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12903 constraint (Rn
> 7 || Rm
> 7,
12910 /* 16-bit MULS/Conditional MUL. */
12911 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12912 inst
.instruction
|= Rd
;
12915 inst
.instruction
|= Rm
<< 3;
12917 inst
.instruction
|= Rn
<< 3;
12919 constraint (1, _("dest must overlap one source register"));
12923 constraint (inst
.instruction
!= T_MNEM_mul
,
12924 _("Thumb-2 MUL must not set flags"));
12926 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12927 inst
.instruction
|= Rd
<< 8;
12928 inst
.instruction
|= Rn
<< 16;
12929 inst
.instruction
|= Rm
<< 0;
12931 reject_bad_reg (Rd
);
12932 reject_bad_reg (Rn
);
12933 reject_bad_reg (Rm
);
12940 unsigned RdLo
, RdHi
, Rn
, Rm
;
12942 RdLo
= inst
.operands
[0].reg
;
12943 RdHi
= inst
.operands
[1].reg
;
12944 Rn
= inst
.operands
[2].reg
;
12945 Rm
= inst
.operands
[3].reg
;
12947 reject_bad_reg (RdLo
);
12948 reject_bad_reg (RdHi
);
12949 reject_bad_reg (Rn
);
12950 reject_bad_reg (Rm
);
12952 inst
.instruction
|= RdLo
<< 12;
12953 inst
.instruction
|= RdHi
<< 8;
12954 inst
.instruction
|= Rn
<< 16;
12955 inst
.instruction
|= Rm
;
12958 as_tsktsk (_("rdhi and rdlo must be different"));
12964 set_pred_insn_type (NEUTRAL_IT_INSN
);
12966 if (unified_syntax
)
12968 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12970 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12971 inst
.instruction
|= inst
.operands
[0].imm
;
12975 /* PR9722: Check for Thumb2 availability before
12976 generating a thumb2 nop instruction. */
12977 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12979 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12980 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12983 inst
.instruction
= 0x46c0;
12988 constraint (inst
.operands
[0].present
,
12989 _("Thumb does not support NOP with hints"));
12990 inst
.instruction
= 0x46c0;
12997 if (unified_syntax
)
12999 bfd_boolean narrow
;
13001 if (THUMB_SETS_FLAGS (inst
.instruction
))
13002 narrow
= !in_pred_block ();
13004 narrow
= in_pred_block ();
13005 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13007 if (inst
.size_req
== 4)
13012 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13013 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13014 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13018 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13019 inst
.instruction
|= inst
.operands
[0].reg
;
13020 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13025 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
13027 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13029 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13030 inst
.instruction
|= inst
.operands
[0].reg
;
13031 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13040 Rd
= inst
.operands
[0].reg
;
13041 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
13043 reject_bad_reg (Rd
);
13044 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
13045 reject_bad_reg (Rn
);
13047 inst
.instruction
|= Rd
<< 8;
13048 inst
.instruction
|= Rn
<< 16;
13050 if (!inst
.operands
[2].isreg
)
13052 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13053 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13059 Rm
= inst
.operands
[2].reg
;
13060 reject_bad_reg (Rm
);
13062 constraint (inst
.operands
[2].shifted
13063 && inst
.operands
[2].immisreg
,
13064 _("shift must be constant"));
13065 encode_thumb32_shifted_operand (2);
13072 unsigned Rd
, Rn
, Rm
;
13074 Rd
= inst
.operands
[0].reg
;
13075 Rn
= inst
.operands
[1].reg
;
13076 Rm
= inst
.operands
[2].reg
;
13078 reject_bad_reg (Rd
);
13079 reject_bad_reg (Rn
);
13080 reject_bad_reg (Rm
);
13082 inst
.instruction
|= Rd
<< 8;
13083 inst
.instruction
|= Rn
<< 16;
13084 inst
.instruction
|= Rm
;
13085 if (inst
.operands
[3].present
)
13087 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
13088 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13089 _("expression too complex"));
13090 inst
.instruction
|= (val
& 0x1c) << 10;
13091 inst
.instruction
|= (val
& 0x03) << 6;
13098 if (!inst
.operands
[3].present
)
13102 inst
.instruction
&= ~0x00000020;
13104 /* PR 10168. Swap the Rm and Rn registers. */
13105 Rtmp
= inst
.operands
[1].reg
;
13106 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
13107 inst
.operands
[2].reg
= Rtmp
;
13115 if (inst
.operands
[0].immisreg
)
13116 reject_bad_reg (inst
.operands
[0].imm
);
13118 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
13122 do_t_push_pop (void)
13126 constraint (inst
.operands
[0].writeback
,
13127 _("push/pop do not support {reglist}^"));
13128 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
13129 _("expression too complex"));
13131 mask
= inst
.operands
[0].imm
;
13132 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
13133 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
13134 else if (inst
.size_req
!= 4
13135 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
13136 ? REG_LR
: REG_PC
)))
13138 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13139 inst
.instruction
|= THUMB_PP_PC_LR
;
13140 inst
.instruction
|= mask
& 0xff;
13142 else if (unified_syntax
)
13144 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13145 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
13149 inst
.error
= _("invalid register list to push/pop instruction");
13157 if (unified_syntax
)
13158 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
13161 inst
.error
= _("invalid register list to push/pop instruction");
13167 do_t_vscclrm (void)
13169 if (inst
.operands
[0].issingle
)
13171 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
13172 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
13173 inst
.instruction
|= inst
.operands
[0].imm
;
13177 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
13178 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
13179 inst
.instruction
|= 1 << 8;
13180 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
13189 Rd
= inst
.operands
[0].reg
;
13190 Rm
= inst
.operands
[1].reg
;
13192 reject_bad_reg (Rd
);
13193 reject_bad_reg (Rm
);
13195 inst
.instruction
|= Rd
<< 8;
13196 inst
.instruction
|= Rm
<< 16;
13197 inst
.instruction
|= Rm
;
13205 Rd
= inst
.operands
[0].reg
;
13206 Rm
= inst
.operands
[1].reg
;
13208 reject_bad_reg (Rd
);
13209 reject_bad_reg (Rm
);
13211 if (Rd
<= 7 && Rm
<= 7
13212 && inst
.size_req
!= 4)
13214 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13215 inst
.instruction
|= Rd
;
13216 inst
.instruction
|= Rm
<< 3;
13218 else if (unified_syntax
)
13220 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13221 inst
.instruction
|= Rd
<< 8;
13222 inst
.instruction
|= Rm
<< 16;
13223 inst
.instruction
|= Rm
;
13226 inst
.error
= BAD_HIREG
;
13234 Rd
= inst
.operands
[0].reg
;
13235 Rm
= inst
.operands
[1].reg
;
13237 reject_bad_reg (Rd
);
13238 reject_bad_reg (Rm
);
13240 inst
.instruction
|= Rd
<< 8;
13241 inst
.instruction
|= Rm
;
13249 Rd
= inst
.operands
[0].reg
;
13250 Rs
= (inst
.operands
[1].present
13251 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13252 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13254 reject_bad_reg (Rd
);
13255 reject_bad_reg (Rs
);
13256 if (inst
.operands
[2].isreg
)
13257 reject_bad_reg (inst
.operands
[2].reg
);
13259 inst
.instruction
|= Rd
<< 8;
13260 inst
.instruction
|= Rs
<< 16;
13261 if (!inst
.operands
[2].isreg
)
13263 bfd_boolean narrow
;
13265 if ((inst
.instruction
& 0x00100000) != 0)
13266 narrow
= !in_pred_block ();
13268 narrow
= in_pred_block ();
13270 if (Rd
> 7 || Rs
> 7)
13273 if (inst
.size_req
== 4 || !unified_syntax
)
13276 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13277 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13280 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13281 relaxation, but it doesn't seem worth the hassle. */
13284 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13285 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13286 inst
.instruction
|= Rs
<< 3;
13287 inst
.instruction
|= Rd
;
13291 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13292 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13296 encode_thumb32_shifted_operand (2);
13302 if (warn_on_deprecated
13303 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13304 as_tsktsk (_("setend use is deprecated for ARMv8"));
13306 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13307 if (inst
.operands
[0].imm
)
13308 inst
.instruction
|= 0x8;
13314 if (!inst
.operands
[1].present
)
13315 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13317 if (unified_syntax
)
13319 bfd_boolean narrow
;
13322 switch (inst
.instruction
)
13325 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13327 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13329 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13331 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13335 if (THUMB_SETS_FLAGS (inst
.instruction
))
13336 narrow
= !in_pred_block ();
13338 narrow
= in_pred_block ();
13339 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13341 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13343 if (inst
.operands
[2].isreg
13344 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13345 || inst
.operands
[2].reg
> 7))
13347 if (inst
.size_req
== 4)
13350 reject_bad_reg (inst
.operands
[0].reg
);
13351 reject_bad_reg (inst
.operands
[1].reg
);
13355 if (inst
.operands
[2].isreg
)
13357 reject_bad_reg (inst
.operands
[2].reg
);
13358 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13359 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13360 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13361 inst
.instruction
|= inst
.operands
[2].reg
;
13363 /* PR 12854: Error on extraneous shifts. */
13364 constraint (inst
.operands
[2].shifted
,
13365 _("extraneous shift as part of operand to shift insn"));
13369 inst
.operands
[1].shifted
= 1;
13370 inst
.operands
[1].shift_kind
= shift_kind
;
13371 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13372 ? T_MNEM_movs
: T_MNEM_mov
);
13373 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13374 encode_thumb32_shifted_operand (1);
13375 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13376 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13381 if (inst
.operands
[2].isreg
)
13383 switch (shift_kind
)
13385 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13386 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13387 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13388 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13392 inst
.instruction
|= inst
.operands
[0].reg
;
13393 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13395 /* PR 12854: Error on extraneous shifts. */
13396 constraint (inst
.operands
[2].shifted
,
13397 _("extraneous shift as part of operand to shift insn"));
13401 switch (shift_kind
)
13403 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13404 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13405 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13408 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13409 inst
.instruction
|= inst
.operands
[0].reg
;
13410 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13416 constraint (inst
.operands
[0].reg
> 7
13417 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13418 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13420 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13422 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13423 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13424 _("source1 and dest must be same register"));
13426 switch (inst
.instruction
)
13428 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13429 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13430 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13431 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13435 inst
.instruction
|= inst
.operands
[0].reg
;
13436 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13438 /* PR 12854: Error on extraneous shifts. */
13439 constraint (inst
.operands
[2].shifted
,
13440 _("extraneous shift as part of operand to shift insn"));
13444 switch (inst
.instruction
)
13446 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13447 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13448 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13449 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13452 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13453 inst
.instruction
|= inst
.operands
[0].reg
;
13454 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13462 unsigned Rd
, Rn
, Rm
;
13464 Rd
= inst
.operands
[0].reg
;
13465 Rn
= inst
.operands
[1].reg
;
13466 Rm
= inst
.operands
[2].reg
;
13468 reject_bad_reg (Rd
);
13469 reject_bad_reg (Rn
);
13470 reject_bad_reg (Rm
);
13472 inst
.instruction
|= Rd
<< 8;
13473 inst
.instruction
|= Rn
<< 16;
13474 inst
.instruction
|= Rm
;
13480 unsigned Rd
, Rn
, Rm
;
13482 Rd
= inst
.operands
[0].reg
;
13483 Rm
= inst
.operands
[1].reg
;
13484 Rn
= inst
.operands
[2].reg
;
13486 reject_bad_reg (Rd
);
13487 reject_bad_reg (Rn
);
13488 reject_bad_reg (Rm
);
13490 inst
.instruction
|= Rd
<< 8;
13491 inst
.instruction
|= Rn
<< 16;
13492 inst
.instruction
|= Rm
;
13498 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13499 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13500 _("SMC is not permitted on this architecture"));
13501 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13502 _("expression too complex"));
13503 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13504 inst
.instruction
|= (value
& 0xf000) >> 12;
13505 inst
.instruction
|= (value
& 0x0ff0);
13506 inst
.instruction
|= (value
& 0x000f) << 16;
13507 /* PR gas/15623: SMC instructions must be last in an IT block. */
13508 set_pred_insn_type_last ();
13514 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13516 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13517 inst
.instruction
|= (value
& 0x0fff);
13518 inst
.instruction
|= (value
& 0xf000) << 4;
13522 do_t_ssat_usat (int bias
)
13526 Rd
= inst
.operands
[0].reg
;
13527 Rn
= inst
.operands
[2].reg
;
13529 reject_bad_reg (Rd
);
13530 reject_bad_reg (Rn
);
13532 inst
.instruction
|= Rd
<< 8;
13533 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13534 inst
.instruction
|= Rn
<< 16;
13536 if (inst
.operands
[3].present
)
13538 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13540 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13542 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13543 _("expression too complex"));
13545 if (shift_amount
!= 0)
13547 constraint (shift_amount
> 31,
13548 _("shift expression is too large"));
13550 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13551 inst
.instruction
|= 0x00200000; /* sh bit. */
13553 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13554 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13562 do_t_ssat_usat (1);
13570 Rd
= inst
.operands
[0].reg
;
13571 Rn
= inst
.operands
[2].reg
;
13573 reject_bad_reg (Rd
);
13574 reject_bad_reg (Rn
);
13576 inst
.instruction
|= Rd
<< 8;
13577 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13578 inst
.instruction
|= Rn
<< 16;
13584 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13585 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13586 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13587 || inst
.operands
[2].negative
,
13590 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13592 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13593 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13594 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13595 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13601 if (!inst
.operands
[2].present
)
13602 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13604 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13605 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13606 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13609 inst
.instruction
|= inst
.operands
[0].reg
;
13610 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13611 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13612 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13618 unsigned Rd
, Rn
, Rm
;
13620 Rd
= inst
.operands
[0].reg
;
13621 Rn
= inst
.operands
[1].reg
;
13622 Rm
= inst
.operands
[2].reg
;
13624 reject_bad_reg (Rd
);
13625 reject_bad_reg (Rn
);
13626 reject_bad_reg (Rm
);
13628 inst
.instruction
|= Rd
<< 8;
13629 inst
.instruction
|= Rn
<< 16;
13630 inst
.instruction
|= Rm
;
13631 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13639 Rd
= inst
.operands
[0].reg
;
13640 Rm
= inst
.operands
[1].reg
;
13642 reject_bad_reg (Rd
);
13643 reject_bad_reg (Rm
);
13645 if (inst
.instruction
<= 0xffff
13646 && inst
.size_req
!= 4
13647 && Rd
<= 7 && Rm
<= 7
13648 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13650 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13651 inst
.instruction
|= Rd
;
13652 inst
.instruction
|= Rm
<< 3;
13654 else if (unified_syntax
)
13656 if (inst
.instruction
<= 0xffff)
13657 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13658 inst
.instruction
|= Rd
<< 8;
13659 inst
.instruction
|= Rm
;
13660 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13664 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13665 _("Thumb encoding does not support rotation"));
13666 constraint (1, BAD_HIREG
);
13673 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13682 half
= (inst
.instruction
& 0x10) != 0;
13683 set_pred_insn_type_last ();
13684 constraint (inst
.operands
[0].immisreg
,
13685 _("instruction requires register index"));
13687 Rn
= inst
.operands
[0].reg
;
13688 Rm
= inst
.operands
[0].imm
;
13690 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13691 constraint (Rn
== REG_SP
, BAD_SP
);
13692 reject_bad_reg (Rm
);
13694 constraint (!half
&& inst
.operands
[0].shifted
,
13695 _("instruction does not allow shifted index"));
13696 inst
.instruction
|= (Rn
<< 16) | Rm
;
13702 if (!inst
.operands
[0].present
)
13703 inst
.operands
[0].imm
= 0;
13705 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13707 constraint (inst
.size_req
== 2,
13708 _("immediate value out of range"));
13709 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13710 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13711 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13715 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13716 inst
.instruction
|= inst
.operands
[0].imm
;
13719 set_pred_insn_type (NEUTRAL_IT_INSN
);
13726 do_t_ssat_usat (0);
13734 Rd
= inst
.operands
[0].reg
;
13735 Rn
= inst
.operands
[2].reg
;
13737 reject_bad_reg (Rd
);
13738 reject_bad_reg (Rn
);
13740 inst
.instruction
|= Rd
<< 8;
13741 inst
.instruction
|= inst
.operands
[1].imm
;
13742 inst
.instruction
|= Rn
<< 16;
13745 /* Checking the range of the branch offset (VAL) with NBITS bits
13746 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13748 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13750 gas_assert (nbits
> 0 && nbits
<= 32);
13753 int cmp
= (1 << (nbits
- 1));
13754 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13759 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13765 /* For branches in Armv8.1-M Mainline. */
13767 do_t_branch_future (void)
13769 unsigned long insn
= inst
.instruction
;
13771 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13772 if (inst
.operands
[0].hasreloc
== 0)
13774 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13775 as_bad (BAD_BRANCH_OFF
);
13777 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13781 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13782 inst
.relocs
[0].pc_rel
= 1;
13788 if (inst
.operands
[1].hasreloc
== 0)
13790 int val
= inst
.operands
[1].imm
;
13791 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13792 as_bad (BAD_BRANCH_OFF
);
13794 int immA
= (val
& 0x0001f000) >> 12;
13795 int immB
= (val
& 0x00000ffc) >> 2;
13796 int immC
= (val
& 0x00000002) >> 1;
13797 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13801 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13802 inst
.relocs
[1].pc_rel
= 1;
13807 if (inst
.operands
[1].hasreloc
== 0)
13809 int val
= inst
.operands
[1].imm
;
13810 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13811 as_bad (BAD_BRANCH_OFF
);
13813 int immA
= (val
& 0x0007f000) >> 12;
13814 int immB
= (val
& 0x00000ffc) >> 2;
13815 int immC
= (val
& 0x00000002) >> 1;
13816 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13820 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13821 inst
.relocs
[1].pc_rel
= 1;
13825 case T_MNEM_bfcsel
:
13827 if (inst
.operands
[1].hasreloc
== 0)
13829 int val
= inst
.operands
[1].imm
;
13830 int immA
= (val
& 0x00001000) >> 12;
13831 int immB
= (val
& 0x00000ffc) >> 2;
13832 int immC
= (val
& 0x00000002) >> 1;
13833 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13837 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13838 inst
.relocs
[1].pc_rel
= 1;
13842 if (inst
.operands
[2].hasreloc
== 0)
13844 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13845 int val2
= inst
.operands
[2].imm
;
13846 int val0
= inst
.operands
[0].imm
& 0x1f;
13847 int diff
= val2
- val0
;
13849 inst
.instruction
|= 1 << 17; /* T bit. */
13850 else if (diff
!= 2)
13851 as_bad (_("out of range label-relative fixup value"));
13855 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13856 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13857 inst
.relocs
[2].pc_rel
= 1;
13861 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13862 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13867 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13874 /* Helper function for do_t_loloop to handle relocations. */
13876 v8_1_loop_reloc (int is_le
)
13878 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13880 int value
= inst
.relocs
[0].exp
.X_add_number
;
13881 value
= (is_le
) ? -value
: value
;
13883 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13884 as_bad (BAD_BRANCH_OFF
);
13888 immh
= (value
& 0x00000ffc) >> 2;
13889 imml
= (value
& 0x00000002) >> 1;
13891 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13895 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13896 inst
.relocs
[0].pc_rel
= 1;
13900 /* To handle the Scalar Low Overhead Loop instructions
13901 in Armv8.1-M Mainline. */
13905 unsigned long insn
= inst
.instruction
;
13907 set_pred_insn_type (OUTSIDE_PRED_INSN
);
13908 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13914 if (!inst
.operands
[0].present
)
13915 inst
.instruction
|= 1 << 21;
13917 v8_1_loop_reloc (TRUE
);
13921 v8_1_loop_reloc (FALSE
);
13922 /* Fall through. */
13924 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13925 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13932 /* MVE instruction encoder helpers. */
13933 #define M_MNEM_vabav 0xee800f01
13934 #define M_MNEM_vmladav 0xeef00e00
13935 #define M_MNEM_vmladava 0xeef00e20
13936 #define M_MNEM_vmladavx 0xeef01e00
13937 #define M_MNEM_vmladavax 0xeef01e20
13938 #define M_MNEM_vmlsdav 0xeef00e01
13939 #define M_MNEM_vmlsdava 0xeef00e21
13940 #define M_MNEM_vmlsdavx 0xeef01e01
13941 #define M_MNEM_vmlsdavax 0xeef01e21
13942 #define M_MNEM_vmullt 0xee011e00
13943 #define M_MNEM_vmullb 0xee010e00
13944 #define M_MNEM_vst20 0xfc801e00
13945 #define M_MNEM_vst21 0xfc801e20
13946 #define M_MNEM_vst40 0xfc801e01
13947 #define M_MNEM_vst41 0xfc801e21
13948 #define M_MNEM_vst42 0xfc801e41
13949 #define M_MNEM_vst43 0xfc801e61
13950 #define M_MNEM_vld20 0xfc901e00
13951 #define M_MNEM_vld21 0xfc901e20
13952 #define M_MNEM_vld40 0xfc901e01
13953 #define M_MNEM_vld41 0xfc901e21
13954 #define M_MNEM_vld42 0xfc901e41
13955 #define M_MNEM_vld43 0xfc901e61
13956 #define M_MNEM_vstrb 0xec000e00
13957 #define M_MNEM_vstrh 0xec000e10
13958 #define M_MNEM_vstrw 0xec000e40
13959 #define M_MNEM_vstrd 0xec000e50
13960 #define M_MNEM_vldrb 0xec100e00
13961 #define M_MNEM_vldrh 0xec100e10
13962 #define M_MNEM_vldrw 0xec100e40
13963 #define M_MNEM_vldrd 0xec100e50
13965 /* Neon instruction encoder helpers. */
13967 /* Encodings for the different types for various Neon opcodes. */
13969 /* An "invalid" code for the following tables. */
13972 struct neon_tab_entry
13975 unsigned float_or_poly
;
13976 unsigned scalar_or_imm
;
13979 /* Map overloaded Neon opcodes to their respective encodings. */
13980 #define NEON_ENC_TAB \
13981 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13982 X(vabdl, 0x0800700, N_INV, N_INV), \
13983 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13984 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13985 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13986 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13987 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13988 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13989 X(vaddl, 0x0800000, N_INV, N_INV), \
13990 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13991 X(vsubl, 0x0800200, N_INV, N_INV), \
13992 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13993 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13994 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13995 /* Register variants of the following two instructions are encoded as
13996 vcge / vcgt with the operands reversed. */ \
13997 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13998 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13999 X(vfma, N_INV, 0x0000c10, N_INV), \
14000 X(vfms, N_INV, 0x0200c10, N_INV), \
14001 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
14002 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
14003 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
14004 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
14005 X(vmlal, 0x0800800, N_INV, 0x0800240), \
14006 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
14007 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
14008 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
14009 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
14010 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
14011 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
14012 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
14013 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
14014 X(vshl, 0x0000400, N_INV, 0x0800510), \
14015 X(vqshl, 0x0000410, N_INV, 0x0800710), \
14016 X(vand, 0x0000110, N_INV, 0x0800030), \
14017 X(vbic, 0x0100110, N_INV, 0x0800030), \
14018 X(veor, 0x1000110, N_INV, N_INV), \
14019 X(vorn, 0x0300110, N_INV, 0x0800010), \
14020 X(vorr, 0x0200110, N_INV, 0x0800010), \
14021 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
14022 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
14023 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
14024 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
14025 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
14026 X(vst1, 0x0000000, 0x0800000, N_INV), \
14027 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
14028 X(vst2, 0x0000100, 0x0800100, N_INV), \
14029 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
14030 X(vst3, 0x0000200, 0x0800200, N_INV), \
14031 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
14032 X(vst4, 0x0000300, 0x0800300, N_INV), \
14033 X(vmovn, 0x1b20200, N_INV, N_INV), \
14034 X(vtrn, 0x1b20080, N_INV, N_INV), \
14035 X(vqmovn, 0x1b20200, N_INV, N_INV), \
14036 X(vqmovun, 0x1b20240, N_INV, N_INV), \
14037 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
14038 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
14039 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
14040 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
14041 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
14042 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
14043 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
14044 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
14045 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
14046 X(vseleq, 0xe000a00, N_INV, N_INV), \
14047 X(vselvs, 0xe100a00, N_INV, N_INV), \
14048 X(vselge, 0xe200a00, N_INV, N_INV), \
14049 X(vselgt, 0xe300a00, N_INV, N_INV), \
14050 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
14051 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
14052 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
14053 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
14054 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
14055 X(aes, 0x3b00300, N_INV, N_INV), \
14056 X(sha3op, 0x2000c00, N_INV, N_INV), \
14057 X(sha1h, 0x3b902c0, N_INV, N_INV), \
14058 X(sha2op, 0x3ba0380, N_INV, N_INV)
14062 #define X(OPC,I,F,S) N_MNEM_##OPC
14067 static const struct neon_tab_entry neon_enc_tab
[] =
14069 #define X(OPC,I,F,S) { (I), (F), (S) }
14074 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
14075 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14076 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14077 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14078 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14079 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14080 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14081 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14082 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14083 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14084 #define NEON_ENC_SINGLE_(X) \
14085 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14086 #define NEON_ENC_DOUBLE_(X) \
14087 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14088 #define NEON_ENC_FPV8_(X) \
14089 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14091 #define NEON_ENCODE(type, inst) \
14094 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
14095 inst.is_neon = 1; \
14099 #define check_neon_suffixes \
14102 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
14104 as_bad (_("invalid neon suffix for non neon instruction")); \
14110 /* Define shapes for instruction operands. The following mnemonic characters
14111 are used in this table:
14113 F - VFP S<n> register
14114 D - Neon D<n> register
14115 Q - Neon Q<n> register
14119 L - D<n> register list
14121 This table is used to generate various data:
14122 - enumerations of the form NS_DDR to be used as arguments to
14124 - a table classifying shapes into single, double, quad, mixed.
14125 - a table used to drive neon_select_shape. */
14127 #define NEON_SHAPE_DEF \
14128 X(3, (R, Q, Q), QUAD), \
14129 X(3, (D, D, D), DOUBLE), \
14130 X(3, (Q, Q, Q), QUAD), \
14131 X(3, (D, D, I), DOUBLE), \
14132 X(3, (Q, Q, I), QUAD), \
14133 X(3, (D, D, S), DOUBLE), \
14134 X(3, (Q, Q, S), QUAD), \
14135 X(3, (Q, Q, R), QUAD), \
14136 X(2, (D, D), DOUBLE), \
14137 X(2, (Q, Q), QUAD), \
14138 X(2, (D, S), DOUBLE), \
14139 X(2, (Q, S), QUAD), \
14140 X(2, (D, R), DOUBLE), \
14141 X(2, (Q, R), QUAD), \
14142 X(2, (D, I), DOUBLE), \
14143 X(2, (Q, I), QUAD), \
14144 X(3, (D, L, D), DOUBLE), \
14145 X(2, (D, Q), MIXED), \
14146 X(2, (Q, D), MIXED), \
14147 X(3, (D, Q, I), MIXED), \
14148 X(3, (Q, D, I), MIXED), \
14149 X(3, (Q, D, D), MIXED), \
14150 X(3, (D, Q, Q), MIXED), \
14151 X(3, (Q, Q, D), MIXED), \
14152 X(3, (Q, D, S), MIXED), \
14153 X(3, (D, Q, S), MIXED), \
14154 X(4, (D, D, D, I), DOUBLE), \
14155 X(4, (Q, Q, Q, I), QUAD), \
14156 X(4, (D, D, S, I), DOUBLE), \
14157 X(4, (Q, Q, S, I), QUAD), \
14158 X(2, (F, F), SINGLE), \
14159 X(3, (F, F, F), SINGLE), \
14160 X(2, (F, I), SINGLE), \
14161 X(2, (F, D), MIXED), \
14162 X(2, (D, F), MIXED), \
14163 X(3, (F, F, I), MIXED), \
14164 X(4, (R, R, F, F), SINGLE), \
14165 X(4, (F, F, R, R), SINGLE), \
14166 X(3, (D, R, R), DOUBLE), \
14167 X(3, (R, R, D), DOUBLE), \
14168 X(2, (S, R), SINGLE), \
14169 X(2, (R, S), SINGLE), \
14170 X(2, (F, R), SINGLE), \
14171 X(2, (R, F), SINGLE), \
14172 /* Half float shape supported so far. */\
14173 X (2, (H, D), MIXED), \
14174 X (2, (D, H), MIXED), \
14175 X (2, (H, F), MIXED), \
14176 X (2, (F, H), MIXED), \
14177 X (2, (H, H), HALF), \
14178 X (2, (H, R), HALF), \
14179 X (2, (R, H), HALF), \
14180 X (2, (H, I), HALF), \
14181 X (3, (H, H, H), HALF), \
14182 X (3, (H, F, I), MIXED), \
14183 X (3, (F, H, I), MIXED), \
14184 X (3, (D, H, H), MIXED), \
14185 X (3, (D, H, S), MIXED)
14187 #define S2(A,B) NS_##A##B
14188 #define S3(A,B,C) NS_##A##B##C
14189 #define S4(A,B,C,D) NS_##A##B##C##D
14191 #define X(N, L, C) S##N L
14204 enum neon_shape_class
14213 #define X(N, L, C) SC_##C
14215 static enum neon_shape_class neon_shape_class
[] =
14234 /* Register widths of above. */
14235 static unsigned neon_shape_el_size
[] =
14247 struct neon_shape_info
14250 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
14253 #define S2(A,B) { SE_##A, SE_##B }
14254 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
14255 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
14257 #define X(N, L, C) { N, S##N L }
14259 static struct neon_shape_info neon_shape_tab
[] =
14269 /* Bit masks used in type checking given instructions.
14270 'N_EQK' means the type must be the same as (or based on in some way) the key
14271 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14272 set, various other bits can be set as well in order to modify the meaning of
14273 the type constraint. */
14275 enum neon_type_mask
14299 N_KEY
= 0x1000000, /* Key element (main type specifier). */
14300 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
14301 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
14302 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
14303 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
14304 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
14305 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14306 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14307 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14308 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
14309 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14311 N_MAX_NONSPECIAL
= N_P64
14314 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14316 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14317 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14318 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14319 #define N_S_32 (N_S8 | N_S16 | N_S32)
14320 #define N_F_16_32 (N_F16 | N_F32)
14321 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14322 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14323 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14324 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14325 #define N_I_MVE (N_I8 | N_I16 | N_I32)
14326 #define N_F_MVE (N_F16 | N_F32)
14327 #define N_SU_MVE (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14329 /* Pass this as the first type argument to neon_check_type to ignore types
14331 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14333 /* Select a "shape" for the current instruction (describing register types or
14334 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14335 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14336 function of operand parsing, so this function doesn't need to be called.
14337 Shapes should be listed in order of decreasing length. */
14339 static enum neon_shape
14340 neon_select_shape (enum neon_shape shape
, ...)
14343 enum neon_shape first_shape
= shape
;
14345 /* Fix missing optional operands. FIXME: we don't know at this point how
14346 many arguments we should have, so this makes the assumption that we have
14347 > 1. This is true of all current Neon opcodes, I think, but may not be
14348 true in the future. */
14349 if (!inst
.operands
[1].present
)
14350 inst
.operands
[1] = inst
.operands
[0];
14352 va_start (ap
, shape
);
14354 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
14359 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
14361 if (!inst
.operands
[j
].present
)
14367 switch (neon_shape_tab
[shape
].el
[j
])
14369 /* If a .f16, .16, .u16, .s16 type specifier is given over
14370 a VFP single precision register operand, it's essentially
14371 means only half of the register is used.
14373 If the type specifier is given after the mnemonics, the
14374 information is stored in inst.vectype. If the type specifier
14375 is given after register operand, the information is stored
14376 in inst.operands[].vectype.
14378 When there is only one type specifier, and all the register
14379 operands are the same type of hardware register, the type
14380 specifier applies to all register operands.
14382 If no type specifier is given, the shape is inferred from
14383 operand information.
14386 vadd.f16 s0, s1, s2: NS_HHH
14387 vabs.f16 s0, s1: NS_HH
14388 vmov.f16 s0, r1: NS_HR
14389 vmov.f16 r0, s1: NS_RH
14390 vcvt.f16 r0, s1: NS_RH
14391 vcvt.f16.s32 s2, s2, #29: NS_HFI
14392 vcvt.f16.s32 s2, s2: NS_HF
14395 if (!(inst
.operands
[j
].isreg
14396 && inst
.operands
[j
].isvec
14397 && inst
.operands
[j
].issingle
14398 && !inst
.operands
[j
].isquad
14399 && ((inst
.vectype
.elems
== 1
14400 && inst
.vectype
.el
[0].size
== 16)
14401 || (inst
.vectype
.elems
> 1
14402 && inst
.vectype
.el
[j
].size
== 16)
14403 || (inst
.vectype
.elems
== 0
14404 && inst
.operands
[j
].vectype
.type
!= NT_invtype
14405 && inst
.operands
[j
].vectype
.size
== 16))))
14410 if (!(inst
.operands
[j
].isreg
14411 && inst
.operands
[j
].isvec
14412 && inst
.operands
[j
].issingle
14413 && !inst
.operands
[j
].isquad
14414 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14415 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14416 || (inst
.vectype
.elems
== 0
14417 && (inst
.operands
[j
].vectype
.size
== 32
14418 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14423 if (!(inst
.operands
[j
].isreg
14424 && inst
.operands
[j
].isvec
14425 && !inst
.operands
[j
].isquad
14426 && !inst
.operands
[j
].issingle
))
14431 if (!(inst
.operands
[j
].isreg
14432 && !inst
.operands
[j
].isvec
))
14437 if (!(inst
.operands
[j
].isreg
14438 && inst
.operands
[j
].isvec
14439 && inst
.operands
[j
].isquad
14440 && !inst
.operands
[j
].issingle
))
14445 if (!(!inst
.operands
[j
].isreg
14446 && !inst
.operands
[j
].isscalar
))
14451 if (!(!inst
.operands
[j
].isreg
14452 && inst
.operands
[j
].isscalar
))
14462 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14463 /* We've matched all the entries in the shape table, and we don't
14464 have any left over operands which have not been matched. */
14470 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14471 first_error (_("invalid instruction shape"));
14476 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14477 means the Q bit should be set). */
14480 neon_quad (enum neon_shape shape
)
14482 return neon_shape_class
[shape
] == SC_QUAD
;
14486 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14489 /* Allow modification to be made to types which are constrained to be
14490 based on the key element, based on bits set alongside N_EQK. */
14491 if ((typebits
& N_EQK
) != 0)
14493 if ((typebits
& N_HLF
) != 0)
14495 else if ((typebits
& N_DBL
) != 0)
14497 if ((typebits
& N_SGN
) != 0)
14498 *g_type
= NT_signed
;
14499 else if ((typebits
& N_UNS
) != 0)
14500 *g_type
= NT_unsigned
;
14501 else if ((typebits
& N_INT
) != 0)
14502 *g_type
= NT_integer
;
14503 else if ((typebits
& N_FLT
) != 0)
14504 *g_type
= NT_float
;
14505 else if ((typebits
& N_SIZ
) != 0)
14506 *g_type
= NT_untyped
;
14510 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14511 operand type, i.e. the single type specified in a Neon instruction when it
14512 is the only one given. */
14514 static struct neon_type_el
14515 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14517 struct neon_type_el dest
= *key
;
14519 gas_assert ((thisarg
& N_EQK
) != 0);
14521 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14526 /* Convert Neon type and size into compact bitmask representation. */
14528 static enum neon_type_mask
14529 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14536 case 8: return N_8
;
14537 case 16: return N_16
;
14538 case 32: return N_32
;
14539 case 64: return N_64
;
14547 case 8: return N_I8
;
14548 case 16: return N_I16
;
14549 case 32: return N_I32
;
14550 case 64: return N_I64
;
14558 case 16: return N_F16
;
14559 case 32: return N_F32
;
14560 case 64: return N_F64
;
14568 case 8: return N_P8
;
14569 case 16: return N_P16
;
14570 case 64: return N_P64
;
14578 case 8: return N_S8
;
14579 case 16: return N_S16
;
14580 case 32: return N_S32
;
14581 case 64: return N_S64
;
14589 case 8: return N_U8
;
14590 case 16: return N_U16
;
14591 case 32: return N_U32
;
14592 case 64: return N_U64
;
14603 /* Convert compact Neon bitmask type representation to a type and size. Only
14604 handles the case where a single bit is set in the mask. */
14607 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14608 enum neon_type_mask mask
)
14610 if ((mask
& N_EQK
) != 0)
14613 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14615 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14617 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14619 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14624 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14626 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14627 *type
= NT_unsigned
;
14628 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14629 *type
= NT_integer
;
14630 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14631 *type
= NT_untyped
;
14632 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14634 else if ((mask
& (N_F_ALL
)) != 0)
14642 /* Modify a bitmask of allowed types. This is only needed for type
14646 modify_types_allowed (unsigned allowed
, unsigned mods
)
14649 enum neon_el_type type
;
14655 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14657 if (el_type_of_type_chk (&type
, &size
,
14658 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14660 neon_modify_type_size (mods
, &type
, &size
);
14661 destmask
|= type_chk_of_el_type (type
, size
);
14668 /* Check type and return type classification.
14669 The manual states (paraphrase): If one datatype is given, it indicates the
14671 - the second operand, if there is one
14672 - the operand, if there is no second operand
14673 - the result, if there are no operands.
14674 This isn't quite good enough though, so we use a concept of a "key" datatype
14675 which is set on a per-instruction basis, which is the one which matters when
14676 only one data type is written.
14677 Note: this function has side-effects (e.g. filling in missing operands). All
14678 Neon instructions should call it before performing bit encoding. */
14680 static struct neon_type_el
14681 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14684 unsigned i
, pass
, key_el
= 0;
14685 unsigned types
[NEON_MAX_TYPE_ELS
];
14686 enum neon_el_type k_type
= NT_invtype
;
14687 unsigned k_size
= -1u;
14688 struct neon_type_el badtype
= {NT_invtype
, -1};
14689 unsigned key_allowed
= 0;
14691 /* Optional registers in Neon instructions are always (not) in operand 1.
14692 Fill in the missing operand here, if it was omitted. */
14693 if (els
> 1 && !inst
.operands
[1].present
)
14694 inst
.operands
[1] = inst
.operands
[0];
14696 /* Suck up all the varargs. */
14698 for (i
= 0; i
< els
; i
++)
14700 unsigned thisarg
= va_arg (ap
, unsigned);
14701 if (thisarg
== N_IGNORE_TYPE
)
14706 types
[i
] = thisarg
;
14707 if ((thisarg
& N_KEY
) != 0)
14712 if (inst
.vectype
.elems
> 0)
14713 for (i
= 0; i
< els
; i
++)
14714 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14716 first_error (_("types specified in both the mnemonic and operands"));
14720 /* Duplicate inst.vectype elements here as necessary.
14721 FIXME: No idea if this is exactly the same as the ARM assembler,
14722 particularly when an insn takes one register and one non-register
14724 if (inst
.vectype
.elems
== 1 && els
> 1)
14727 inst
.vectype
.elems
= els
;
14728 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14729 for (j
= 0; j
< els
; j
++)
14731 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14734 else if (inst
.vectype
.elems
== 0 && els
> 0)
14737 /* No types were given after the mnemonic, so look for types specified
14738 after each operand. We allow some flexibility here; as long as the
14739 "key" operand has a type, we can infer the others. */
14740 for (j
= 0; j
< els
; j
++)
14741 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14742 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14744 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14746 for (j
= 0; j
< els
; j
++)
14747 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14748 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14753 first_error (_("operand types can't be inferred"));
14757 else if (inst
.vectype
.elems
!= els
)
14759 first_error (_("type specifier has the wrong number of parts"));
14763 for (pass
= 0; pass
< 2; pass
++)
14765 for (i
= 0; i
< els
; i
++)
14767 unsigned thisarg
= types
[i
];
14768 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14769 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14770 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14771 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14773 /* Decay more-specific signed & unsigned types to sign-insensitive
14774 integer types if sign-specific variants are unavailable. */
14775 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14776 && (types_allowed
& N_SU_ALL
) == 0)
14777 g_type
= NT_integer
;
14779 /* If only untyped args are allowed, decay any more specific types to
14780 them. Some instructions only care about signs for some element
14781 sizes, so handle that properly. */
14782 if (((types_allowed
& N_UNT
) == 0)
14783 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14784 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14785 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14786 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14787 g_type
= NT_untyped
;
14791 if ((thisarg
& N_KEY
) != 0)
14795 key_allowed
= thisarg
& ~N_KEY
;
14797 /* Check architecture constraint on FP16 extension. */
14799 && k_type
== NT_float
14800 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14802 inst
.error
= _(BAD_FP16
);
14809 if ((thisarg
& N_VFP
) != 0)
14811 enum neon_shape_el regshape
;
14812 unsigned regwidth
, match
;
14814 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14817 first_error (_("invalid instruction shape"));
14820 regshape
= neon_shape_tab
[ns
].el
[i
];
14821 regwidth
= neon_shape_el_size
[regshape
];
14823 /* In VFP mode, operands must match register widths. If we
14824 have a key operand, use its width, else use the width of
14825 the current operand. */
14831 /* FP16 will use a single precision register. */
14832 if (regwidth
== 32 && match
== 16)
14834 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14838 inst
.error
= _(BAD_FP16
);
14843 if (regwidth
!= match
)
14845 first_error (_("operand size must match register width"));
14850 if ((thisarg
& N_EQK
) == 0)
14852 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14854 if ((given_type
& types_allowed
) == 0)
14856 first_error (BAD_SIMD_TYPE
);
14862 enum neon_el_type mod_k_type
= k_type
;
14863 unsigned mod_k_size
= k_size
;
14864 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14865 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14867 first_error (_("inconsistent types in Neon instruction"));
14875 return inst
.vectype
.el
[key_el
];
14878 /* Neon-style VFP instruction forwarding. */
14880 /* Thumb VFP instructions have 0xE in the condition field. */
14883 do_vfp_cond_or_thumb (void)
14888 inst
.instruction
|= 0xe0000000;
14890 inst
.instruction
|= inst
.cond
<< 28;
14893 /* Look up and encode a simple mnemonic, for use as a helper function for the
14894 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14895 etc. It is assumed that operand parsing has already been done, and that the
14896 operands are in the form expected by the given opcode (this isn't necessarily
14897 the same as the form in which they were parsed, hence some massaging must
14898 take place before this function is called).
14899 Checks current arch version against that in the looked-up opcode. */
14902 do_vfp_nsyn_opcode (const char *opname
)
14904 const struct asm_opcode
*opcode
;
14906 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14911 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14912 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14919 inst
.instruction
= opcode
->tvalue
;
14920 opcode
->tencode ();
14924 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14925 opcode
->aencode ();
14930 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14932 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14934 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14937 do_vfp_nsyn_opcode ("fadds");
14939 do_vfp_nsyn_opcode ("fsubs");
14941 /* ARMv8.2 fp16 instruction. */
14943 do_scalar_fp16_v82_encode ();
14948 do_vfp_nsyn_opcode ("faddd");
14950 do_vfp_nsyn_opcode ("fsubd");
14954 /* Check operand types to see if this is a VFP instruction, and if so call
14958 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14960 enum neon_shape rs
;
14961 struct neon_type_el et
;
14966 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14967 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14971 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14972 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14973 N_F_ALL
| N_KEY
| N_VFP
);
14980 if (et
.type
!= NT_invtype
)
14991 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14993 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14995 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14998 do_vfp_nsyn_opcode ("fmacs");
15000 do_vfp_nsyn_opcode ("fnmacs");
15002 /* ARMv8.2 fp16 instruction. */
15004 do_scalar_fp16_v82_encode ();
15009 do_vfp_nsyn_opcode ("fmacd");
15011 do_vfp_nsyn_opcode ("fnmacd");
15016 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
15018 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
15020 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15023 do_vfp_nsyn_opcode ("ffmas");
15025 do_vfp_nsyn_opcode ("ffnmas");
15027 /* ARMv8.2 fp16 instruction. */
15029 do_scalar_fp16_v82_encode ();
15034 do_vfp_nsyn_opcode ("ffmad");
15036 do_vfp_nsyn_opcode ("ffnmad");
15041 do_vfp_nsyn_mul (enum neon_shape rs
)
15043 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15045 do_vfp_nsyn_opcode ("fmuls");
15047 /* ARMv8.2 fp16 instruction. */
15049 do_scalar_fp16_v82_encode ();
15052 do_vfp_nsyn_opcode ("fmuld");
15056 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
15058 int is_neg
= (inst
.instruction
& 0x80) != 0;
15059 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
15061 if (rs
== NS_FF
|| rs
== NS_HH
)
15064 do_vfp_nsyn_opcode ("fnegs");
15066 do_vfp_nsyn_opcode ("fabss");
15068 /* ARMv8.2 fp16 instruction. */
15070 do_scalar_fp16_v82_encode ();
15075 do_vfp_nsyn_opcode ("fnegd");
15077 do_vfp_nsyn_opcode ("fabsd");
15081 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15082 insns belong to Neon, and are handled elsewhere. */
15085 do_vfp_nsyn_ldm_stm (int is_dbmode
)
15087 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
15091 do_vfp_nsyn_opcode ("fldmdbs");
15093 do_vfp_nsyn_opcode ("fldmias");
15098 do_vfp_nsyn_opcode ("fstmdbs");
15100 do_vfp_nsyn_opcode ("fstmias");
15105 do_vfp_nsyn_sqrt (void)
15107 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15108 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15110 if (rs
== NS_FF
|| rs
== NS_HH
)
15112 do_vfp_nsyn_opcode ("fsqrts");
15114 /* ARMv8.2 fp16 instruction. */
15116 do_scalar_fp16_v82_encode ();
15119 do_vfp_nsyn_opcode ("fsqrtd");
15123 do_vfp_nsyn_div (void)
15125 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15126 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15127 N_F_ALL
| N_KEY
| N_VFP
);
15129 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15131 do_vfp_nsyn_opcode ("fdivs");
15133 /* ARMv8.2 fp16 instruction. */
15135 do_scalar_fp16_v82_encode ();
15138 do_vfp_nsyn_opcode ("fdivd");
15142 do_vfp_nsyn_nmul (void)
15144 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
15145 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
15146 N_F_ALL
| N_KEY
| N_VFP
);
15148 if (rs
== NS_FFF
|| rs
== NS_HHH
)
15150 NEON_ENCODE (SINGLE
, inst
);
15151 do_vfp_sp_dyadic ();
15153 /* ARMv8.2 fp16 instruction. */
15155 do_scalar_fp16_v82_encode ();
15159 NEON_ENCODE (DOUBLE
, inst
);
15160 do_vfp_dp_rd_rn_rm ();
15162 do_vfp_cond_or_thumb ();
15167 do_vfp_nsyn_cmp (void)
15169 enum neon_shape rs
;
15170 if (inst
.operands
[1].isreg
)
15172 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
15173 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
15175 if (rs
== NS_FF
|| rs
== NS_HH
)
15177 NEON_ENCODE (SINGLE
, inst
);
15178 do_vfp_sp_monadic ();
15182 NEON_ENCODE (DOUBLE
, inst
);
15183 do_vfp_dp_rd_rm ();
15188 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
15189 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
15191 switch (inst
.instruction
& 0x0fffffff)
15194 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
15197 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
15203 if (rs
== NS_FI
|| rs
== NS_HI
)
15205 NEON_ENCODE (SINGLE
, inst
);
15206 do_vfp_sp_compare_z ();
15210 NEON_ENCODE (DOUBLE
, inst
);
15214 do_vfp_cond_or_thumb ();
15216 /* ARMv8.2 fp16 instruction. */
15217 if (rs
== NS_HI
|| rs
== NS_HH
)
15218 do_scalar_fp16_v82_encode ();
15222 nsyn_insert_sp (void)
15224 inst
.operands
[1] = inst
.operands
[0];
15225 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
15226 inst
.operands
[0].reg
= REG_SP
;
15227 inst
.operands
[0].isreg
= 1;
15228 inst
.operands
[0].writeback
= 1;
15229 inst
.operands
[0].present
= 1;
15233 do_vfp_nsyn_push (void)
15237 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15238 _("register list must contain at least 1 and at most 16 "
15241 if (inst
.operands
[1].issingle
)
15242 do_vfp_nsyn_opcode ("fstmdbs");
15244 do_vfp_nsyn_opcode ("fstmdbd");
15248 do_vfp_nsyn_pop (void)
15252 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
15253 _("register list must contain at least 1 and at most 16 "
15256 if (inst
.operands
[1].issingle
)
15257 do_vfp_nsyn_opcode ("fldmias");
15259 do_vfp_nsyn_opcode ("fldmiad");
15262 /* Fix up Neon data-processing instructions, ORing in the correct bits for
15263 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
15266 neon_dp_fixup (struct arm_it
* insn
)
15268 unsigned int i
= insn
->instruction
;
15273 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15284 insn
->instruction
= i
;
15287 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15291 neon_logbits (unsigned x
)
15293 return ffs (x
) - 4;
15296 #define LOW4(R) ((R) & 0xf)
15297 #define HI1(R) (((R) >> 4) & 1)
15300 mve_encode_qqr (int size
, int fp
)
15302 if (inst
.operands
[2].reg
== REG_SP
)
15303 as_tsktsk (MVE_BAD_SP
);
15304 else if (inst
.operands
[2].reg
== REG_PC
)
15305 as_tsktsk (MVE_BAD_PC
);
15310 if (((unsigned)inst
.instruction
) == 0xd00)
15311 inst
.instruction
= 0xee300f40;
15313 else if (((unsigned)inst
.instruction
) == 0x200d00)
15314 inst
.instruction
= 0xee301f40;
15316 /* Setting size which is 1 for F16 and 0 for F32. */
15317 inst
.instruction
|= (size
== 16) << 28;
15322 if (((unsigned)inst
.instruction
) == 0x800)
15323 inst
.instruction
= 0xee010f40;
15325 else if (((unsigned)inst
.instruction
) == 0x1000800)
15326 inst
.instruction
= 0xee011f40;
15327 /* Setting bits for size. */
15328 inst
.instruction
|= neon_logbits (size
) << 20;
15330 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15331 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15332 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15333 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15334 inst
.instruction
|= inst
.operands
[2].reg
;
15339 mve_encode_rqq (unsigned bit28
, unsigned size
)
15341 inst
.instruction
|= bit28
<< 28;
15342 inst
.instruction
|= neon_logbits (size
) << 20;
15343 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15344 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
15345 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15346 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15347 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15352 mve_encode_qqq (int ubit
, int size
)
15355 inst
.instruction
|= (ubit
!= 0) << 28;
15356 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15357 inst
.instruction
|= neon_logbits (size
) << 20;
15358 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15359 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15360 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15361 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15362 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15368 /* Encode insns with bit pattern:
15370 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15371 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15373 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15374 different meaning for some instruction. */
15377 neon_three_same (int isquad
, int ubit
, int size
)
15379 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15380 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15381 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15382 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15383 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15384 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15385 inst
.instruction
|= (isquad
!= 0) << 6;
15386 inst
.instruction
|= (ubit
!= 0) << 24;
15388 inst
.instruction
|= neon_logbits (size
) << 20;
15390 neon_dp_fixup (&inst
);
15393 /* Encode instructions of the form:
15395 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15396 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15398 Don't write size if SIZE == -1. */
15401 neon_two_same (int qbit
, int ubit
, int size
)
15403 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15404 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15405 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15406 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15407 inst
.instruction
|= (qbit
!= 0) << 6;
15408 inst
.instruction
|= (ubit
!= 0) << 24;
15411 inst
.instruction
|= neon_logbits (size
) << 18;
15413 neon_dp_fixup (&inst
);
15416 /* Neon instruction encoders, in approximate order of appearance. */
15419 do_neon_dyadic_i_su (void)
15421 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15422 struct neon_type_el et
= neon_check_type (3, rs
,
15423 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
15424 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15428 do_neon_dyadic_i64_su (void)
15430 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15431 struct neon_type_el et
= neon_check_type (3, rs
,
15432 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15433 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15437 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
15440 unsigned size
= et
.size
>> 3;
15441 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15442 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15443 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15444 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15445 inst
.instruction
|= (isquad
!= 0) << 6;
15446 inst
.instruction
|= immbits
<< 16;
15447 inst
.instruction
|= (size
>> 3) << 7;
15448 inst
.instruction
|= (size
& 0x7) << 19;
15450 inst
.instruction
|= (uval
!= 0) << 24;
15452 neon_dp_fixup (&inst
);
15456 do_neon_shl_imm (void)
15458 if (!inst
.operands
[2].isreg
)
15460 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15461 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
15462 int imm
= inst
.operands
[2].imm
;
15464 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15465 _("immediate out of range for shift"));
15466 NEON_ENCODE (IMMED
, inst
);
15467 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15471 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15472 struct neon_type_el et
= neon_check_type (3, rs
,
15473 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15476 /* VSHL/VQSHL 3-register variants have syntax such as:
15478 whereas other 3-register operations encoded by neon_three_same have
15481 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15483 tmp
= inst
.operands
[2].reg
;
15484 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15485 inst
.operands
[1].reg
= tmp
;
15486 NEON_ENCODE (INTEGER
, inst
);
15487 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15492 do_neon_qshl_imm (void)
15494 if (!inst
.operands
[2].isreg
)
15496 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15497 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15498 int imm
= inst
.operands
[2].imm
;
15500 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15501 _("immediate out of range for shift"));
15502 NEON_ENCODE (IMMED
, inst
);
15503 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15507 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15508 struct neon_type_el et
= neon_check_type (3, rs
,
15509 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15512 /* See note in do_neon_shl_imm. */
15513 tmp
= inst
.operands
[2].reg
;
15514 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15515 inst
.operands
[1].reg
= tmp
;
15516 NEON_ENCODE (INTEGER
, inst
);
15517 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15522 do_neon_rshl (void)
15524 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15525 struct neon_type_el et
= neon_check_type (3, rs
,
15526 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15529 tmp
= inst
.operands
[2].reg
;
15530 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15531 inst
.operands
[1].reg
= tmp
;
15532 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15536 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15538 /* Handle .I8 pseudo-instructions. */
15541 /* Unfortunately, this will make everything apart from zero out-of-range.
15542 FIXME is this the intended semantics? There doesn't seem much point in
15543 accepting .I8 if so. */
15544 immediate
|= immediate
<< 8;
15550 if (immediate
== (immediate
& 0x000000ff))
15552 *immbits
= immediate
;
15555 else if (immediate
== (immediate
& 0x0000ff00))
15557 *immbits
= immediate
>> 8;
15560 else if (immediate
== (immediate
& 0x00ff0000))
15562 *immbits
= immediate
>> 16;
15565 else if (immediate
== (immediate
& 0xff000000))
15567 *immbits
= immediate
>> 24;
15570 if ((immediate
& 0xffff) != (immediate
>> 16))
15571 goto bad_immediate
;
15572 immediate
&= 0xffff;
15575 if (immediate
== (immediate
& 0x000000ff))
15577 *immbits
= immediate
;
15580 else if (immediate
== (immediate
& 0x0000ff00))
15582 *immbits
= immediate
>> 8;
15587 first_error (_("immediate value out of range"));
15592 do_neon_logic (void)
15594 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15596 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15597 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15598 /* U bit and size field were set as part of the bitmask. */
15599 NEON_ENCODE (INTEGER
, inst
);
15600 neon_three_same (neon_quad (rs
), 0, -1);
15604 const int three_ops_form
= (inst
.operands
[2].present
15605 && !inst
.operands
[2].isreg
);
15606 const int immoperand
= (three_ops_form
? 2 : 1);
15607 enum neon_shape rs
= (three_ops_form
15608 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15609 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15610 struct neon_type_el et
= neon_check_type (2, rs
,
15611 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15612 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15616 if (et
.type
== NT_invtype
)
15619 if (three_ops_form
)
15620 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15621 _("first and second operands shall be the same register"));
15623 NEON_ENCODE (IMMED
, inst
);
15625 immbits
= inst
.operands
[immoperand
].imm
;
15628 /* .i64 is a pseudo-op, so the immediate must be a repeating
15630 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15631 inst
.operands
[immoperand
].reg
: 0))
15633 /* Set immbits to an invalid constant. */
15634 immbits
= 0xdeadbeef;
15641 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15645 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15649 /* Pseudo-instruction for VBIC. */
15650 neon_invert_size (&immbits
, 0, et
.size
);
15651 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15655 /* Pseudo-instruction for VORR. */
15656 neon_invert_size (&immbits
, 0, et
.size
);
15657 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15667 inst
.instruction
|= neon_quad (rs
) << 6;
15668 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15669 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15670 inst
.instruction
|= cmode
<< 8;
15671 neon_write_immbits (immbits
);
15673 neon_dp_fixup (&inst
);
15678 do_neon_bitfield (void)
15680 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15681 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15682 neon_three_same (neon_quad (rs
), 0, -1);
15686 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15689 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
15690 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15692 if (et
.type
== NT_float
)
15694 NEON_ENCODE (FLOAT
, inst
);
15696 mve_encode_qqr (et
.size
, 1);
15698 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15702 NEON_ENCODE (INTEGER
, inst
);
15704 mve_encode_qqr (et
.size
, 0);
15706 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15712 do_neon_dyadic_if_su_d (void)
15714 /* This version only allow D registers, but that constraint is enforced during
15715 operand parsing so we don't need to do anything extra here. */
15716 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15720 do_neon_dyadic_if_i_d (void)
15722 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15723 affected if we specify unsigned args. */
15724 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15727 enum vfp_or_neon_is_neon_bits
15730 NEON_CHECK_ARCH
= 2,
15731 NEON_CHECK_ARCH8
= 4
15734 /* Call this function if an instruction which may have belonged to the VFP or
15735 Neon instruction sets, but turned out to be a Neon instruction (due to the
15736 operand types involved, etc.). We have to check and/or fix-up a couple of
15739 - Make sure the user hasn't attempted to make a Neon instruction
15741 - Alter the value in the condition code field if necessary.
15742 - Make sure that the arch supports Neon instructions.
15744 Which of these operations take place depends on bits from enum
15745 vfp_or_neon_is_neon_bits.
15747 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15748 current instruction's condition is COND_ALWAYS, the condition field is
15749 changed to inst.uncond_value. This is necessary because instructions shared
15750 between VFP and Neon may be conditional for the VFP variants only, and the
15751 unconditional Neon version must have, e.g., 0xF in the condition field. */
15754 vfp_or_neon_is_neon (unsigned check
)
15756 /* Conditions are always legal in Thumb mode (IT blocks). */
15757 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15759 if (inst
.cond
!= COND_ALWAYS
)
15761 first_error (_(BAD_COND
));
15764 if (inst
.uncond_value
!= -1)
15765 inst
.instruction
|= inst
.uncond_value
<< 28;
15769 if (((check
& NEON_CHECK_ARCH
) && !mark_feature_used (&fpu_neon_ext_v1
))
15770 || ((check
& NEON_CHECK_ARCH8
)
15771 && !mark_feature_used (&fpu_neon_ext_armv8
)))
15773 first_error (_(BAD_FPU
));
15781 check_simd_pred_availability (int fp
, unsigned check
)
15783 if (inst
.cond
> COND_ALWAYS
)
15785 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15787 inst
.error
= BAD_FPU
;
15790 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15792 else if (inst
.cond
< COND_ALWAYS
)
15794 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15795 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15796 else if (vfp_or_neon_is_neon (check
) == FAIL
)
15801 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fp
? mve_fp_ext
: mve_ext
)
15802 && vfp_or_neon_is_neon (check
) == FAIL
)
15805 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
15806 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15812 do_mve_vstr_vldr_QI (int size
, int elsize
, int load
)
15814 constraint (size
< 32, BAD_ADDR_MODE
);
15815 constraint (size
!= elsize
, BAD_EL_TYPE
);
15816 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
15817 constraint (!inst
.operands
[1].preind
, BAD_ADDR_MODE
);
15818 constraint (load
&& inst
.operands
[0].reg
== inst
.operands
[1].reg
,
15819 _("destination register and offset register may not be the"
15822 int imm
= inst
.relocs
[0].exp
.X_add_number
;
15829 constraint ((imm
% (size
/ 8) != 0)
15830 || imm
> (0x7f << neon_logbits (size
)),
15831 (size
== 32) ? _("immediate must be a multiple of 4 in the"
15832 " range of +/-[0,508]")
15833 : _("immediate must be a multiple of 8 in the"
15834 " range of +/-[0,1016]"));
15835 inst
.instruction
|= 0x11 << 24;
15836 inst
.instruction
|= add
<< 23;
15837 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15838 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
15839 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15840 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15841 inst
.instruction
|= 1 << 12;
15842 inst
.instruction
|= (size
== 64) << 8;
15843 inst
.instruction
&= 0xffffff00;
15844 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15845 inst
.instruction
|= imm
>> neon_logbits (size
);
15849 do_mve_vstr_vldr_RQ (int size
, int elsize
, int load
)
15851 unsigned os
= inst
.operands
[1].imm
>> 5;
15852 constraint (os
!= 0 && size
== 8,
15853 _("can not shift offsets when accessing less than half-word"));
15854 constraint (os
&& os
!= neon_logbits (size
),
15855 _("shift immediate must be 1, 2 or 3 for half-word, word"
15856 " or double-word accesses respectively"));
15857 if (inst
.operands
[1].reg
== REG_PC
)
15858 as_tsktsk (MVE_BAD_PC
);
15863 constraint (elsize
>= 64, BAD_EL_TYPE
);
15866 constraint (elsize
< 16 || elsize
>= 64, BAD_EL_TYPE
);
15870 constraint (elsize
!= size
, BAD_EL_TYPE
);
15875 constraint (inst
.operands
[1].writeback
|| !inst
.operands
[1].preind
,
15879 constraint (inst
.operands
[0].reg
== (inst
.operands
[1].imm
& 0x1f),
15880 _("destination register and offset register may not be"
15882 constraint (size
== elsize
&& inst
.vectype
.el
[0].type
!= NT_unsigned
,
15884 constraint (inst
.vectype
.el
[0].type
!= NT_unsigned
15885 && inst
.vectype
.el
[0].type
!= NT_signed
, BAD_EL_TYPE
);
15886 inst
.instruction
|= (inst
.vectype
.el
[0].type
== NT_unsigned
) << 28;
15890 constraint (inst
.vectype
.el
[0].type
!= NT_untyped
, BAD_EL_TYPE
);
15893 inst
.instruction
|= 1 << 23;
15894 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15895 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
15896 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15897 inst
.instruction
|= neon_logbits (elsize
) << 7;
15898 inst
.instruction
|= HI1 (inst
.operands
[1].imm
) << 5;
15899 inst
.instruction
|= LOW4 (inst
.operands
[1].imm
);
15900 inst
.instruction
|= !!os
;
15904 do_mve_vstr_vldr_RI (int size
, int elsize
, int load
)
15906 enum neon_el_type type
= inst
.vectype
.el
[0].type
;
15908 constraint (size
>= 64, BAD_ADDR_MODE
);
15912 constraint (elsize
< 16 || elsize
>= 64, BAD_EL_TYPE
);
15915 constraint (elsize
!= size
, BAD_EL_TYPE
);
15922 constraint (elsize
!= size
&& type
!= NT_unsigned
15923 && type
!= NT_signed
, BAD_EL_TYPE
);
15927 constraint (elsize
!= size
&& type
!= NT_untyped
, BAD_EL_TYPE
);
15930 int imm
= inst
.relocs
[0].exp
.X_add_number
;
15938 if ((imm
% (size
/ 8) != 0) || imm
> (0x7f << neon_logbits (size
)))
15943 constraint (1, _("immediate must be in the range of +/-[0,127]"));
15946 constraint (1, _("immediate must be a multiple of 2 in the"
15947 " range of +/-[0,254]"));
15950 constraint (1, _("immediate must be a multiple of 4 in the"
15951 " range of +/-[0,508]"));
15956 if (size
!= elsize
)
15958 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
15959 constraint (inst
.operands
[0].reg
> 14,
15960 _("MVE vector register in the range [Q0..Q7] expected"));
15961 inst
.instruction
|= (load
&& type
== NT_unsigned
) << 28;
15962 inst
.instruction
|= (size
== 16) << 19;
15963 inst
.instruction
|= neon_logbits (elsize
) << 7;
15967 if (inst
.operands
[1].reg
== REG_PC
)
15968 as_tsktsk (MVE_BAD_PC
);
15969 else if (inst
.operands
[1].reg
== REG_SP
&& inst
.operands
[1].writeback
)
15970 as_tsktsk (MVE_BAD_SP
);
15971 inst
.instruction
|= 1 << 12;
15972 inst
.instruction
|= neon_logbits (size
) << 7;
15974 inst
.instruction
|= inst
.operands
[1].preind
<< 24;
15975 inst
.instruction
|= add
<< 23;
15976 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15977 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
15978 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
15979 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15980 inst
.instruction
&= 0xffffff80;
15981 inst
.instruction
|= imm
>> neon_logbits (size
);
15986 do_mve_vstr_vldr (void)
15991 if (inst
.cond
> COND_ALWAYS
)
15992 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
15994 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
15996 switch (inst
.instruction
)
16003 /* fall through. */
16009 /* fall through. */
16015 /* fall through. */
16021 /* fall through. */
16026 unsigned elsize
= inst
.vectype
.el
[0].size
;
16028 if (inst
.operands
[1].isquad
)
16030 /* We are dealing with [Q, imm]{!} cases. */
16031 do_mve_vstr_vldr_QI (size
, elsize
, load
);
16035 if (inst
.operands
[1].immisreg
== 2)
16037 /* We are dealing with [R, Q, {UXTW #os}] cases. */
16038 do_mve_vstr_vldr_RQ (size
, elsize
, load
);
16040 else if (!inst
.operands
[1].immisreg
)
16042 /* We are dealing with [R, Imm]{!}/[R], Imm cases. */
16043 do_mve_vstr_vldr_RI (size
, elsize
, load
);
16046 constraint (1, BAD_ADDR_MODE
);
16053 do_mve_vst_vld (void)
16055 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16058 constraint (!inst
.operands
[1].preind
|| inst
.relocs
[0].exp
.X_add_symbol
!= 0
16059 || inst
.relocs
[0].exp
.X_add_number
!= 0
16060 || inst
.operands
[1].immisreg
!= 0,
16062 constraint (inst
.vectype
.el
[0].size
> 32, BAD_EL_TYPE
);
16063 if (inst
.operands
[1].reg
== REG_PC
)
16064 as_tsktsk (MVE_BAD_PC
);
16065 else if (inst
.operands
[1].reg
== REG_SP
&& inst
.operands
[1].writeback
)
16066 as_tsktsk (MVE_BAD_SP
);
16069 /* These instructions are one of the "exceptions" mentioned in
16070 handle_pred_state. They are MVE instructions that are not VPT compatible
16071 and do not accept a VPT code, thus appending such a code is a syntax
16073 if (inst
.cond
> COND_ALWAYS
)
16074 first_error (BAD_SYNTAX
);
16075 /* If we append a scalar condition code we can set this to
16076 MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error. */
16077 else if (inst
.cond
< COND_ALWAYS
)
16078 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16080 inst
.pred_insn_type
= MVE_UNPREDICABLE_INSN
;
16082 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16083 inst
.instruction
|= inst
.operands
[1].writeback
<< 21;
16084 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
16085 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16086 inst
.instruction
|= neon_logbits (inst
.vectype
.el
[0].size
) << 7;
16091 do_neon_dyadic_if_su (void)
16093 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
16094 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16097 if (check_simd_pred_availability (et
.type
== NT_float
,
16098 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16101 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
16105 do_neon_addsub_if_i (void)
16107 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1xd
)
16108 && try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
16111 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
16112 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
,
16113 N_EQK
, N_IF_32
| N_I64
| N_KEY
);
16115 constraint (rs
== NS_QQR
&& et
.size
== 64, BAD_FPU
);
16116 /* If we are parsing Q registers and the element types match MVE, which NEON
16117 also supports, then we must check whether this is an instruction that can
16118 be used by both MVE/NEON. This distinction can be made based on whether
16119 they are predicated or not. */
16120 if ((rs
== NS_QQQ
|| rs
== NS_QQR
) && et
.size
!= 64)
16122 if (check_simd_pred_availability (et
.type
== NT_float
,
16123 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16128 /* If they are either in a D register or are using an unsupported. */
16130 && vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16134 /* The "untyped" case can't happen. Do this to stop the "U" bit being
16135 affected if we specify unsigned args. */
16136 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
16139 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
16141 V<op> A,B (A is operand 0, B is operand 2)
16146 so handle that case specially. */
16149 neon_exchange_operands (void)
16151 if (inst
.operands
[1].present
)
16153 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
16155 /* Swap operands[1] and operands[2]. */
16156 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
16157 inst
.operands
[1] = inst
.operands
[2];
16158 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
16163 inst
.operands
[1] = inst
.operands
[2];
16164 inst
.operands
[2] = inst
.operands
[0];
16169 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
16171 if (inst
.operands
[2].isreg
)
16174 neon_exchange_operands ();
16175 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
16179 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16180 struct neon_type_el et
= neon_check_type (2, rs
,
16181 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
16183 NEON_ENCODE (IMMED
, inst
);
16184 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16185 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16186 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16187 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16188 inst
.instruction
|= neon_quad (rs
) << 6;
16189 inst
.instruction
|= (et
.type
== NT_float
) << 10;
16190 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16192 neon_dp_fixup (&inst
);
16199 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
16203 do_neon_cmp_inv (void)
16205 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
16211 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
16214 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
16215 scalars, which are encoded in 5 bits, M : Rm.
16216 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
16217 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
16220 Dot Product instructions are similar to multiply instructions except elsize
16221 should always be 32.
16223 This function translates SCALAR, which is GAS's internal encoding of indexed
16224 scalar register, to raw encoding. There is also register and index range
16225 check based on ELSIZE. */
16228 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
16230 unsigned regno
= NEON_SCALAR_REG (scalar
);
16231 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
16236 if (regno
> 7 || elno
> 3)
16238 return regno
| (elno
<< 3);
16241 if (regno
> 15 || elno
> 1)
16243 return regno
| (elno
<< 4);
16247 first_error (_("scalar out of range for multiply instruction"));
16253 /* Encode multiply / multiply-accumulate scalar instructions. */
16256 neon_mul_mac (struct neon_type_el et
, int ubit
)
16260 /* Give a more helpful error message if we have an invalid type. */
16261 if (et
.type
== NT_invtype
)
16264 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
16265 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16266 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16267 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16268 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16269 inst
.instruction
|= LOW4 (scalar
);
16270 inst
.instruction
|= HI1 (scalar
) << 5;
16271 inst
.instruction
|= (et
.type
== NT_float
) << 8;
16272 inst
.instruction
|= neon_logbits (et
.size
) << 20;
16273 inst
.instruction
|= (ubit
!= 0) << 24;
16275 neon_dp_fixup (&inst
);
16279 do_neon_mac_maybe_scalar (void)
16281 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
16284 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16287 if (inst
.operands
[2].isscalar
)
16289 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16290 struct neon_type_el et
= neon_check_type (3, rs
,
16291 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
16292 NEON_ENCODE (SCALAR
, inst
);
16293 neon_mul_mac (et
, neon_quad (rs
));
16297 /* The "untyped" case can't happen. Do this to stop the "U" bit being
16298 affected if we specify unsigned args. */
16299 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
16304 do_neon_fmac (void)
16306 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
16309 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16312 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
16318 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16319 struct neon_type_el et
= neon_check_type (3, rs
,
16320 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16321 neon_three_same (neon_quad (rs
), 0, et
.size
);
16324 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
16325 same types as the MAC equivalents. The polynomial type for this instruction
16326 is encoded the same as the integer type. */
16331 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
16334 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16337 if (inst
.operands
[2].isscalar
)
16338 do_neon_mac_maybe_scalar ();
16340 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
16344 do_neon_qdmulh (void)
16346 if (inst
.operands
[2].isscalar
)
16348 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16349 struct neon_type_el et
= neon_check_type (3, rs
,
16350 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16351 NEON_ENCODE (SCALAR
, inst
);
16352 neon_mul_mac (et
, neon_quad (rs
));
16356 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16357 struct neon_type_el et
= neon_check_type (3, rs
,
16358 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16359 NEON_ENCODE (INTEGER
, inst
);
16360 /* The U bit (rounding) comes from bit mask. */
16361 neon_three_same (neon_quad (rs
), 0, et
.size
);
16366 do_mve_vmull (void)
16369 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_DDS
,
16370 NS_QQS
, NS_QQQ
, NS_QQR
, NS_NULL
);
16371 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
16372 && inst
.cond
== COND_ALWAYS
16373 && ((unsigned)inst
.instruction
) == M_MNEM_vmullt
)
16378 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16379 N_SUF_32
| N_F64
| N_P8
16380 | N_P16
| N_I_MVE
| N_KEY
);
16381 if (((et
.type
== NT_poly
) && et
.size
== 8
16382 && ARM_CPU_IS_ANY (cpu_variant
))
16383 || (et
.type
== NT_integer
) || (et
.type
== NT_float
))
16390 constraint (rs
!= NS_QQQ
, BAD_FPU
);
16391 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16392 N_SU_32
| N_P8
| N_P16
| N_KEY
);
16394 /* We are dealing with MVE's vmullt. */
16396 && (inst
.operands
[0].reg
== inst
.operands
[1].reg
16397 || inst
.operands
[0].reg
== inst
.operands
[2].reg
))
16398 as_tsktsk (BAD_MVE_SRCDEST
);
16400 if (inst
.cond
> COND_ALWAYS
)
16401 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16403 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16405 if (et
.type
== NT_poly
)
16406 mve_encode_qqq (neon_logbits (et
.size
), 64);
16408 mve_encode_qqq (et
.type
== NT_unsigned
, et
.size
);
16413 inst
.instruction
= N_MNEM_vmul
;
16416 inst
.pred_insn_type
= INSIDE_IT_INSN
;
16421 do_mve_vabav (void)
16423 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
16428 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
16431 struct neon_type_el et
= neon_check_type (2, NS_NULL
, N_EQK
, N_KEY
| N_S8
16432 | N_S16
| N_S32
| N_U8
| N_U16
16435 if (inst
.cond
> COND_ALWAYS
)
16436 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16438 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16440 mve_encode_rqq (et
.type
== NT_unsigned
, et
.size
);
16444 do_mve_vmladav (void)
16446 enum neon_shape rs
= neon_select_shape (NS_RQQ
, NS_NULL
);
16447 struct neon_type_el et
= neon_check_type (3, rs
,
16448 N_EQK
, N_EQK
, N_SU_MVE
| N_KEY
);
16450 if (et
.type
== NT_unsigned
16451 && (inst
.instruction
== M_MNEM_vmladavx
16452 || inst
.instruction
== M_MNEM_vmladavax
16453 || inst
.instruction
== M_MNEM_vmlsdav
16454 || inst
.instruction
== M_MNEM_vmlsdava
16455 || inst
.instruction
== M_MNEM_vmlsdavx
16456 || inst
.instruction
== M_MNEM_vmlsdavax
))
16457 first_error (BAD_SIMD_TYPE
);
16459 constraint (inst
.operands
[2].reg
> 14,
16460 _("MVE vector register in the range [Q0..Q7] expected"));
16462 if (inst
.cond
> COND_ALWAYS
)
16463 inst
.pred_insn_type
= INSIDE_VPT_INSN
;
16465 inst
.pred_insn_type
= MVE_OUTSIDE_PRED_INSN
;
16467 if (inst
.instruction
== M_MNEM_vmlsdav
16468 || inst
.instruction
== M_MNEM_vmlsdava
16469 || inst
.instruction
== M_MNEM_vmlsdavx
16470 || inst
.instruction
== M_MNEM_vmlsdavax
)
16471 inst
.instruction
|= (et
.size
== 8) << 28;
16473 inst
.instruction
|= (et
.size
== 8) << 8;
16475 mve_encode_rqq (et
.type
== NT_unsigned
, 64);
16476 inst
.instruction
|= (et
.size
== 32) << 16;
16480 do_neon_qrdmlah (void)
16482 /* Check we're on the correct architecture. */
16483 if (!mark_feature_used (&fpu_neon_ext_armv8
))
16485 _("instruction form not available on this architecture.");
16486 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
16488 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
16489 record_feature_use (&fpu_neon_ext_v8_1
);
16492 if (inst
.operands
[2].isscalar
)
16494 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
16495 struct neon_type_el et
= neon_check_type (3, rs
,
16496 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16497 NEON_ENCODE (SCALAR
, inst
);
16498 neon_mul_mac (et
, neon_quad (rs
));
16502 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16503 struct neon_type_el et
= neon_check_type (3, rs
,
16504 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
16505 NEON_ENCODE (INTEGER
, inst
);
16506 /* The U bit (rounding) comes from bit mask. */
16507 neon_three_same (neon_quad (rs
), 0, et
.size
);
16512 do_neon_fcmp_absolute (void)
16514 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16515 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16516 N_F_16_32
| N_KEY
);
16517 /* Size field comes from bit mask. */
16518 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
16522 do_neon_fcmp_absolute_inv (void)
16524 neon_exchange_operands ();
16525 do_neon_fcmp_absolute ();
16529 do_neon_step (void)
16531 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
16532 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
16533 N_F_16_32
| N_KEY
);
16534 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
16538 do_neon_abs_neg (void)
16540 enum neon_shape rs
;
16541 struct neon_type_el et
;
16543 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
16546 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16547 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
16549 if (check_simd_pred_availability (et
.type
== NT_float
,
16550 NEON_CHECK_ARCH
| NEON_CHECK_CC
))
16553 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16554 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16555 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16556 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16557 inst
.instruction
|= neon_quad (rs
) << 6;
16558 inst
.instruction
|= (et
.type
== NT_float
) << 10;
16559 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16561 neon_dp_fixup (&inst
);
16567 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16568 struct neon_type_el et
= neon_check_type (2, rs
,
16569 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16570 int imm
= inst
.operands
[2].imm
;
16571 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16572 _("immediate out of range for insert"));
16573 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16579 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16580 struct neon_type_el et
= neon_check_type (2, rs
,
16581 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16582 int imm
= inst
.operands
[2].imm
;
16583 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16584 _("immediate out of range for insert"));
16585 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
16589 do_neon_qshlu_imm (void)
16591 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
16592 struct neon_type_el et
= neon_check_type (2, rs
,
16593 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
16594 int imm
= inst
.operands
[2].imm
;
16595 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
16596 _("immediate out of range for shift"));
16597 /* Only encodes the 'U present' variant of the instruction.
16598 In this case, signed types have OP (bit 8) set to 0.
16599 Unsigned types have OP set to 1. */
16600 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
16601 /* The rest of the bits are the same as other immediate shifts. */
16602 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
16606 do_neon_qmovn (void)
16608 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16609 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16610 /* Saturating move where operands can be signed or unsigned, and the
16611 destination has the same signedness. */
16612 NEON_ENCODE (INTEGER
, inst
);
16613 if (et
.type
== NT_unsigned
)
16614 inst
.instruction
|= 0xc0;
16616 inst
.instruction
|= 0x80;
16617 neon_two_same (0, 1, et
.size
/ 2);
16621 do_neon_qmovun (void)
16623 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16624 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16625 /* Saturating move with unsigned results. Operands must be signed. */
16626 NEON_ENCODE (INTEGER
, inst
);
16627 neon_two_same (0, 1, et
.size
/ 2);
16631 do_neon_rshift_sat_narrow (void)
16633 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16634 or unsigned. If operands are unsigned, results must also be unsigned. */
16635 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16636 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
16637 int imm
= inst
.operands
[2].imm
;
16638 /* This gets the bounds check, size encoding and immediate bits calculation
16642 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
16643 VQMOVN.I<size> <Dd>, <Qm>. */
16646 inst
.operands
[2].present
= 0;
16647 inst
.instruction
= N_MNEM_vqmovn
;
16652 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16653 _("immediate out of range"));
16654 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
16658 do_neon_rshift_sat_narrow_u (void)
16660 /* FIXME: Types for narrowing. If operands are signed, results can be signed
16661 or unsigned. If operands are unsigned, results must also be unsigned. */
16662 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16663 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
16664 int imm
= inst
.operands
[2].imm
;
16665 /* This gets the bounds check, size encoding and immediate bits calculation
16669 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
16670 VQMOVUN.I<size> <Dd>, <Qm>. */
16673 inst
.operands
[2].present
= 0;
16674 inst
.instruction
= N_MNEM_vqmovun
;
16679 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16680 _("immediate out of range"));
16681 /* FIXME: The manual is kind of unclear about what value U should have in
16682 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
16684 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
16688 do_neon_movn (void)
16690 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
16691 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16692 NEON_ENCODE (INTEGER
, inst
);
16693 neon_two_same (0, 1, et
.size
/ 2);
16697 do_neon_rshift_narrow (void)
16699 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
16700 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
16701 int imm
= inst
.operands
[2].imm
;
16702 /* This gets the bounds check, size encoding and immediate bits calculation
16706 /* If immediate is zero then we are a pseudo-instruction for
16707 VMOVN.I<size> <Dd>, <Qm> */
16710 inst
.operands
[2].present
= 0;
16711 inst
.instruction
= N_MNEM_vmovn
;
16716 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
16717 _("immediate out of range for narrowing operation"));
16718 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
16722 do_neon_shll (void)
16724 /* FIXME: Type checking when lengthening. */
16725 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
16726 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
16727 unsigned imm
= inst
.operands
[2].imm
;
16729 if (imm
== et
.size
)
16731 /* Maximum shift variant. */
16732 NEON_ENCODE (INTEGER
, inst
);
16733 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16734 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16735 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16736 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16737 inst
.instruction
|= neon_logbits (et
.size
) << 18;
16739 neon_dp_fixup (&inst
);
16743 /* A more-specific type check for non-max versions. */
16744 et
= neon_check_type (2, NS_QDI
,
16745 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16746 NEON_ENCODE (IMMED
, inst
);
16747 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
16751 /* Check the various types for the VCVT instruction, and return which version
16752 the current instruction is. */
16754 #define CVT_FLAVOUR_VAR \
16755 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
16756 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
16757 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
16758 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
16759 /* Half-precision conversions. */ \
16760 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16761 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
16762 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
16763 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
16764 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
16765 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
16766 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
16767 Compared with single/double precision variants, only the co-processor \
16768 field is different, so the encoding flow is reused here. */ \
16769 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
16770 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
16771 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
16772 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
16773 /* VFP instructions. */ \
16774 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
16775 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
16776 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
16777 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
16778 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
16779 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
16780 /* VFP instructions with bitshift. */ \
16781 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
16782 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
16783 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
16784 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
16785 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
16786 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
16787 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
16788 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
16790 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
16791 neon_cvt_flavour_##C,
16793 /* The different types of conversions we can do. */
16794 enum neon_cvt_flavour
16797 neon_cvt_flavour_invalid
,
16798 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
16803 static enum neon_cvt_flavour
16804 get_neon_cvt_flavour (enum neon_shape rs
)
16806 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16807 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16808 if (et.type != NT_invtype) \
16810 inst.error = NULL; \
16811 return (neon_cvt_flavour_##C); \
16814 struct neon_type_el et
;
16815 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
16816 || rs
== NS_FF
) ? N_VFP
: 0;
16817 /* The instruction versions which take an immediate take one register
16818 argument, which is extended to the width of the full register. Thus the
16819 "source" and "destination" registers must have the same width. Hack that
16820 here by making the size equal to the key (wider, in this case) operand. */
16821 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
16825 return neon_cvt_flavour_invalid
;
16840 /* Neon-syntax VFP conversions. */
16843 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
16845 const char *opname
= 0;
16847 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
16848 || rs
== NS_FHI
|| rs
== NS_HFI
)
16850 /* Conversions with immediate bitshift. */
16851 const char *enc
[] =
16853 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16859 if (flavour
< (int) ARRAY_SIZE (enc
))
16861 opname
= enc
[flavour
];
16862 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
16863 _("operands 0 and 1 must be the same register"));
16864 inst
.operands
[1] = inst
.operands
[2];
16865 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
16870 /* Conversions without bitshift. */
16871 const char *enc
[] =
16873 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16879 if (flavour
< (int) ARRAY_SIZE (enc
))
16880 opname
= enc
[flavour
];
16884 do_vfp_nsyn_opcode (opname
);
16886 /* ARMv8.2 fp16 VCVT instruction. */
16887 if (flavour
== neon_cvt_flavour_s32_f16
16888 || flavour
== neon_cvt_flavour_u32_f16
16889 || flavour
== neon_cvt_flavour_f16_u32
16890 || flavour
== neon_cvt_flavour_f16_s32
)
16891 do_scalar_fp16_v82_encode ();
16895 do_vfp_nsyn_cvtz (void)
16897 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
16898 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16899 const char *enc
[] =
16901 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16907 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
16908 do_vfp_nsyn_opcode (enc
[flavour
]);
16912 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
16913 enum neon_cvt_mode mode
)
16918 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16919 D register operands. */
16920 if (flavour
== neon_cvt_flavour_s32_f64
16921 || flavour
== neon_cvt_flavour_u32_f64
)
16922 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16925 if (flavour
== neon_cvt_flavour_s32_f16
16926 || flavour
== neon_cvt_flavour_u32_f16
)
16927 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
16930 set_pred_insn_type (OUTSIDE_PRED_INSN
);
16934 case neon_cvt_flavour_s32_f64
:
16938 case neon_cvt_flavour_s32_f32
:
16942 case neon_cvt_flavour_s32_f16
:
16946 case neon_cvt_flavour_u32_f64
:
16950 case neon_cvt_flavour_u32_f32
:
16954 case neon_cvt_flavour_u32_f16
:
16959 first_error (_("invalid instruction shape"));
16965 case neon_cvt_mode_a
: rm
= 0; break;
16966 case neon_cvt_mode_n
: rm
= 1; break;
16967 case neon_cvt_mode_p
: rm
= 2; break;
16968 case neon_cvt_mode_m
: rm
= 3; break;
16969 default: first_error (_("invalid rounding mode")); return;
16972 NEON_ENCODE (FPV8
, inst
);
16973 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16974 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16975 inst
.instruction
|= sz
<< 8;
16977 /* ARMv8.2 fp16 VCVT instruction. */
16978 if (flavour
== neon_cvt_flavour_s32_f16
16979 ||flavour
== neon_cvt_flavour_u32_f16
)
16980 do_scalar_fp16_v82_encode ();
16981 inst
.instruction
|= op
<< 7;
16982 inst
.instruction
|= rm
<< 16;
16983 inst
.instruction
|= 0xf0000000;
16984 inst
.is_neon
= TRUE
;
16988 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16990 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16991 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16992 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16994 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16996 if (flavour
== neon_cvt_flavour_invalid
)
16999 /* PR11109: Handle round-to-zero for VCVT conversions. */
17000 if (mode
== neon_cvt_mode_z
17001 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
17002 && (flavour
== neon_cvt_flavour_s16_f16
17003 || flavour
== neon_cvt_flavour_u16_f16
17004 || flavour
== neon_cvt_flavour_s32_f32
17005 || flavour
== neon_cvt_flavour_u32_f32
17006 || flavour
== neon_cvt_flavour_s32_f64
17007 || flavour
== neon_cvt_flavour_u32_f64
)
17008 && (rs
== NS_FD
|| rs
== NS_FF
))
17010 do_vfp_nsyn_cvtz ();
17014 /* ARMv8.2 fp16 VCVT conversions. */
17015 if (mode
== neon_cvt_mode_z
17016 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
17017 && (flavour
== neon_cvt_flavour_s32_f16
17018 || flavour
== neon_cvt_flavour_u32_f16
)
17021 do_vfp_nsyn_cvtz ();
17022 do_scalar_fp16_v82_encode ();
17026 /* VFP rather than Neon conversions. */
17027 if (flavour
>= neon_cvt_flavour_first_fp
)
17029 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
17030 do_vfp_nsyn_cvt (rs
, flavour
);
17032 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
17040 if (mode
== neon_cvt_mode_z
17041 && (flavour
== neon_cvt_flavour_f16_s16
17042 || flavour
== neon_cvt_flavour_f16_u16
17043 || flavour
== neon_cvt_flavour_s16_f16
17044 || flavour
== neon_cvt_flavour_u16_f16
17045 || flavour
== neon_cvt_flavour_f32_u32
17046 || flavour
== neon_cvt_flavour_f32_s32
17047 || flavour
== neon_cvt_flavour_s32_f32
17048 || flavour
== neon_cvt_flavour_u32_f32
))
17050 if (check_simd_pred_availability (1, NEON_CHECK_CC
| NEON_CHECK_ARCH
))
17053 else if (mode
== neon_cvt_mode_n
)
17055 /* We are dealing with vcvt with the 'ne' condition. */
17057 inst
.instruction
= N_MNEM_vcvt
;
17058 do_neon_cvt_1 (neon_cvt_mode_z
);
17061 /* fall through. */
17065 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
17066 0x0000100, 0x1000100, 0x0, 0x1000000};
17068 if ((rs
!= NS_QQI
|| !ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
17069 && vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17072 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
17074 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0,
17075 _("immediate value out of range"));
17078 case neon_cvt_flavour_f16_s16
:
17079 case neon_cvt_flavour_f16_u16
:
17080 case neon_cvt_flavour_s16_f16
:
17081 case neon_cvt_flavour_u16_f16
:
17082 constraint (inst
.operands
[2].imm
> 16,
17083 _("immediate value out of range"));
17085 case neon_cvt_flavour_f32_u32
:
17086 case neon_cvt_flavour_f32_s32
:
17087 case neon_cvt_flavour_s32_f32
:
17088 case neon_cvt_flavour_u32_f32
:
17089 constraint (inst
.operands
[2].imm
> 32,
17090 _("immediate value out of range"));
17093 inst
.error
= BAD_FPU
;
17098 /* Fixed-point conversion with #0 immediate is encoded as an
17099 integer conversion. */
17100 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
17102 NEON_ENCODE (IMMED
, inst
);
17103 if (flavour
!= neon_cvt_flavour_invalid
)
17104 inst
.instruction
|= enctab
[flavour
];
17105 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17106 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17107 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17108 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17109 inst
.instruction
|= neon_quad (rs
) << 6;
17110 inst
.instruction
|= 1 << 21;
17111 if (flavour
< neon_cvt_flavour_s16_f16
)
17113 inst
.instruction
|= 1 << 21;
17114 immbits
= 32 - inst
.operands
[2].imm
;
17115 inst
.instruction
|= immbits
<< 16;
17119 inst
.instruction
|= 3 << 20;
17120 immbits
= 16 - inst
.operands
[2].imm
;
17121 inst
.instruction
|= immbits
<< 16;
17122 inst
.instruction
&= ~(1 << 9);
17125 neon_dp_fixup (&inst
);
17130 if ((mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17131 || mode
== neon_cvt_mode_m
|| mode
== neon_cvt_mode_p
)
17132 && (flavour
== neon_cvt_flavour_s16_f16
17133 || flavour
== neon_cvt_flavour_u16_f16
17134 || flavour
== neon_cvt_flavour_s32_f32
17135 || flavour
== neon_cvt_flavour_u32_f32
))
17137 if (check_simd_pred_availability (1,
17138 NEON_CHECK_CC
| NEON_CHECK_ARCH8
))
17141 else if (mode
== neon_cvt_mode_z
17142 && (flavour
== neon_cvt_flavour_f16_s16
17143 || flavour
== neon_cvt_flavour_f16_u16
17144 || flavour
== neon_cvt_flavour_s16_f16
17145 || flavour
== neon_cvt_flavour_u16_f16
17146 || flavour
== neon_cvt_flavour_f32_u32
17147 || flavour
== neon_cvt_flavour_f32_s32
17148 || flavour
== neon_cvt_flavour_s32_f32
17149 || flavour
== neon_cvt_flavour_u32_f32
))
17151 if (check_simd_pred_availability (1,
17152 NEON_CHECK_CC
| NEON_CHECK_ARCH
))
17155 /* fall through. */
17157 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
17160 NEON_ENCODE (FLOAT
, inst
);
17161 if (check_simd_pred_availability (1,
17162 NEON_CHECK_CC
| NEON_CHECK_ARCH8
))
17165 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17166 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17167 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17168 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17169 inst
.instruction
|= neon_quad (rs
) << 6;
17170 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
17171 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
17172 inst
.instruction
|= mode
<< 8;
17173 if (flavour
== neon_cvt_flavour_u16_f16
17174 || flavour
== neon_cvt_flavour_s16_f16
)
17175 /* Mask off the original size bits and reencode them. */
17176 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
17179 inst
.instruction
|= 0xfc000000;
17181 inst
.instruction
|= 0xf0000000;
17187 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
17188 0x100, 0x180, 0x0, 0x080};
17190 NEON_ENCODE (INTEGER
, inst
);
17192 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, mve_fp_ext
))
17194 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17198 if (flavour
!= neon_cvt_flavour_invalid
)
17199 inst
.instruction
|= enctab
[flavour
];
17201 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17202 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17203 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17204 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17205 inst
.instruction
|= neon_quad (rs
) << 6;
17206 if (flavour
>= neon_cvt_flavour_s16_f16
17207 && flavour
<= neon_cvt_flavour_f16_u16
)
17208 /* Half precision. */
17209 inst
.instruction
|= 1 << 18;
17211 inst
.instruction
|= 2 << 18;
17213 neon_dp_fixup (&inst
);
17218 /* Half-precision conversions for Advanced SIMD -- neon. */
17221 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17225 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
17227 as_bad (_("operand size must match register width"));
17232 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
17234 as_bad (_("operand size must match register width"));
17239 inst
.instruction
= 0x3b60600;
17241 inst
.instruction
= 0x3b60700;
17243 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17244 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17245 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17246 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17247 neon_dp_fixup (&inst
);
17251 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
17252 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
17253 do_vfp_nsyn_cvt (rs
, flavour
);
17255 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
17260 do_neon_cvtr (void)
17262 do_neon_cvt_1 (neon_cvt_mode_x
);
17268 do_neon_cvt_1 (neon_cvt_mode_z
);
17272 do_neon_cvta (void)
17274 do_neon_cvt_1 (neon_cvt_mode_a
);
17278 do_neon_cvtn (void)
17280 do_neon_cvt_1 (neon_cvt_mode_n
);
17284 do_neon_cvtp (void)
17286 do_neon_cvt_1 (neon_cvt_mode_p
);
17290 do_neon_cvtm (void)
17292 do_neon_cvt_1 (neon_cvt_mode_m
);
17296 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
17299 mark_feature_used (&fpu_vfp_ext_armv8
);
17301 encode_arm_vfp_reg (inst
.operands
[0].reg
,
17302 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
17303 encode_arm_vfp_reg (inst
.operands
[1].reg
,
17304 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
17305 inst
.instruction
|= to
? 0x10000 : 0;
17306 inst
.instruction
|= t
? 0x80 : 0;
17307 inst
.instruction
|= is_double
? 0x100 : 0;
17308 do_vfp_cond_or_thumb ();
17312 do_neon_cvttb_1 (bfd_boolean t
)
17314 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
17315 NS_DF
, NS_DH
, NS_QQ
, NS_QQI
, NS_NULL
);
17319 else if (rs
== NS_QQ
|| rs
== NS_QQI
)
17321 int single_to_half
= 0;
17322 if (check_simd_pred_availability (1, NEON_CHECK_ARCH
))
17325 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
17327 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
17328 && (flavour
== neon_cvt_flavour_u16_f16
17329 || flavour
== neon_cvt_flavour_s16_f16
17330 || flavour
== neon_cvt_flavour_f16_s16
17331 || flavour
== neon_cvt_flavour_f16_u16
17332 || flavour
== neon_cvt_flavour_u32_f32
17333 || flavour
== neon_cvt_flavour_s32_f32
17334 || flavour
== neon_cvt_flavour_f32_s32
17335 || flavour
== neon_cvt_flavour_f32_u32
))
17338 inst
.instruction
= N_MNEM_vcvt
;
17339 set_pred_insn_type (INSIDE_VPT_INSN
);
17340 do_neon_cvt_1 (neon_cvt_mode_z
);
17343 else if (rs
== NS_QQ
&& flavour
== neon_cvt_flavour_f32_f16
)
17344 single_to_half
= 1;
17345 else if (rs
== NS_QQ
&& flavour
!= neon_cvt_flavour_f16_f32
)
17347 first_error (BAD_FPU
);
17351 inst
.instruction
= 0xee3f0e01;
17352 inst
.instruction
|= single_to_half
<< 28;
17353 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17354 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 13;
17355 inst
.instruction
|= t
<< 12;
17356 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17357 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 1;
17360 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
17363 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
17365 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
17368 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
17370 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
17372 /* The VCVTB and VCVTT instructions with D-register operands
17373 don't work for SP only targets. */
17374 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17378 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
17380 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
17382 /* The VCVTB and VCVTT instructions with D-register operands
17383 don't work for SP only targets. */
17384 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17388 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
17395 do_neon_cvtb (void)
17397 do_neon_cvttb_1 (FALSE
);
17402 do_neon_cvtt (void)
17404 do_neon_cvttb_1 (TRUE
);
17408 neon_move_immediate (void)
17410 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
17411 struct neon_type_el et
= neon_check_type (2, rs
,
17412 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
17413 unsigned immlo
, immhi
= 0, immbits
;
17414 int op
, cmode
, float_p
;
17416 constraint (et
.type
== NT_invtype
,
17417 _("operand size must be specified for immediate VMOV"));
17419 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
17420 op
= (inst
.instruction
& (1 << 5)) != 0;
17422 immlo
= inst
.operands
[1].imm
;
17423 if (inst
.operands
[1].regisimm
)
17424 immhi
= inst
.operands
[1].reg
;
17426 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
17427 _("immediate has bits set outside the operand size"));
17429 float_p
= inst
.operands
[1].immisfloat
;
17431 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
17432 et
.size
, et
.type
)) == FAIL
)
17434 /* Invert relevant bits only. */
17435 neon_invert_size (&immlo
, &immhi
, et
.size
);
17436 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
17437 with one or the other; those cases are caught by
17438 neon_cmode_for_move_imm. */
17440 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
17441 &op
, et
.size
, et
.type
)) == FAIL
)
17443 first_error (_("immediate out of range"));
17448 inst
.instruction
&= ~(1 << 5);
17449 inst
.instruction
|= op
<< 5;
17451 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17452 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17453 inst
.instruction
|= neon_quad (rs
) << 6;
17454 inst
.instruction
|= cmode
<< 8;
17456 neon_write_immbits (immbits
);
17462 if (inst
.operands
[1].isreg
)
17464 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17466 NEON_ENCODE (INTEGER
, inst
);
17467 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17468 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17469 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17470 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17471 inst
.instruction
|= neon_quad (rs
) << 6;
17475 NEON_ENCODE (IMMED
, inst
);
17476 neon_move_immediate ();
17479 neon_dp_fixup (&inst
);
17482 /* Encode instructions of form:
17484 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
17485 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
17488 neon_mixed_length (struct neon_type_el et
, unsigned size
)
17490 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17491 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17492 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17493 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17494 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17495 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17496 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
17497 inst
.instruction
|= neon_logbits (size
) << 20;
17499 neon_dp_fixup (&inst
);
17503 do_neon_dyadic_long (void)
17505 enum neon_shape rs
= neon_select_shape (NS_QDD
, NS_QQQ
, NS_QQR
, NS_NULL
);
17508 if (vfp_or_neon_is_neon (NEON_CHECK_ARCH
| NEON_CHECK_CC
) == FAIL
)
17511 NEON_ENCODE (INTEGER
, inst
);
17512 /* FIXME: Type checking for lengthening op. */
17513 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17514 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
17515 neon_mixed_length (et
, et
.size
);
17517 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
)
17518 && (inst
.cond
== 0xf || inst
.cond
== 0x10))
17520 /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
17521 in an IT block with le/lt conditions. */
17523 if (inst
.cond
== 0xf)
17525 else if (inst
.cond
== 0x10)
17528 inst
.pred_insn_type
= INSIDE_IT_INSN
;
17530 if (inst
.instruction
== N_MNEM_vaddl
)
17532 inst
.instruction
= N_MNEM_vadd
;
17533 do_neon_addsub_if_i ();
17535 else if (inst
.instruction
== N_MNEM_vsubl
)
17537 inst
.instruction
= N_MNEM_vsub
;
17538 do_neon_addsub_if_i ();
17540 else if (inst
.instruction
== N_MNEM_vabdl
)
17542 inst
.instruction
= N_MNEM_vabd
;
17543 do_neon_dyadic_if_su ();
17547 first_error (BAD_FPU
);
17551 do_neon_abal (void)
17553 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17554 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
17555 neon_mixed_length (et
, et
.size
);
17559 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
17561 if (inst
.operands
[2].isscalar
)
17563 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
17564 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
17565 NEON_ENCODE (SCALAR
, inst
);
17566 neon_mul_mac (et
, et
.type
== NT_unsigned
);
17570 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17571 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
17572 NEON_ENCODE (INTEGER
, inst
);
17573 neon_mixed_length (et
, et
.size
);
17578 do_neon_mac_maybe_scalar_long (void)
17580 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
17583 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
17584 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
17587 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
17589 unsigned regno
= NEON_SCALAR_REG (scalar
);
17590 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
17594 if (regno
> 7 || elno
> 3)
17597 return ((regno
& 0x7)
17598 | ((elno
& 0x1) << 3)
17599 | (((elno
>> 1) & 0x1) << 5));
17603 if (regno
> 15 || elno
> 1)
17606 return (((regno
& 0x1) << 5)
17607 | ((regno
>> 1) & 0x7)
17608 | ((elno
& 0x1) << 3));
17612 first_error (_("scalar out of range for multiply instruction"));
17617 do_neon_fmac_maybe_scalar_long (int subtype
)
17619 enum neon_shape rs
;
17621 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
17622 field (bits[21:20]) has different meaning. For scalar index variant, it's
17623 used to differentiate add and subtract, otherwise it's with fixed value
17627 if (inst
.cond
!= COND_ALWAYS
)
17628 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
17629 "behaviour is UNPREDICTABLE"));
17631 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
17634 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
17637 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
17638 be a scalar index register. */
17639 if (inst
.operands
[2].isscalar
)
17641 high8
= 0xfe000000;
17644 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
17648 high8
= 0xfc000000;
17651 inst
.instruction
|= (0x1 << 23);
17652 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
17655 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
17657 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
17658 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
17659 so we simply pass -1 as size. */
17660 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
17661 neon_three_same (quad_p
, 0, size
);
17663 /* Undo neon_dp_fixup. Redo the high eight bits. */
17664 inst
.instruction
&= 0x00ffffff;
17665 inst
.instruction
|= high8
;
17667 #define LOW1(R) ((R) & 0x1)
17668 #define HI4(R) (((R) >> 1) & 0xf)
17669 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
17670 whether the instruction is in Q form and whether Vm is a scalar indexed
17672 if (inst
.operands
[2].isscalar
)
17675 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
17676 inst
.instruction
&= 0xffffffd0;
17677 inst
.instruction
|= rm
;
17681 /* Redo Rn as well. */
17682 inst
.instruction
&= 0xfff0ff7f;
17683 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17684 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17689 /* Redo Rn and Rm. */
17690 inst
.instruction
&= 0xfff0ff50;
17691 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
17692 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
17693 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
17694 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
17699 do_neon_vfmal (void)
17701 return do_neon_fmac_maybe_scalar_long (0);
17705 do_neon_vfmsl (void)
17707 return do_neon_fmac_maybe_scalar_long (1);
17711 do_neon_dyadic_wide (void)
17713 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
17714 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17715 neon_mixed_length (et
, et
.size
);
17719 do_neon_dyadic_narrow (void)
17721 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17722 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
17723 /* Operand sign is unimportant, and the U bit is part of the opcode,
17724 so force the operand type to integer. */
17725 et
.type
= NT_integer
;
17726 neon_mixed_length (et
, et
.size
/ 2);
17730 do_neon_mul_sat_scalar_long (void)
17732 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
17736 do_neon_vmull (void)
17738 if (inst
.operands
[2].isscalar
)
17739 do_neon_mac_maybe_scalar_long ();
17742 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
17743 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
17745 if (et
.type
== NT_poly
)
17746 NEON_ENCODE (POLY
, inst
);
17748 NEON_ENCODE (INTEGER
, inst
);
17750 /* For polynomial encoding the U bit must be zero, and the size must
17751 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
17752 obviously, as 0b10). */
17755 /* Check we're on the correct architecture. */
17756 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
17758 _("Instruction form not available on this architecture.");
17763 neon_mixed_length (et
, et
.size
);
17770 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
17771 struct neon_type_el et
= neon_check_type (3, rs
,
17772 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
17773 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
17775 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
17776 _("shift out of range"));
17777 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17778 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17779 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17780 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17781 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17782 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17783 inst
.instruction
|= neon_quad (rs
) << 6;
17784 inst
.instruction
|= imm
<< 8;
17786 neon_dp_fixup (&inst
);
17792 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17793 struct neon_type_el et
= neon_check_type (2, rs
,
17794 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17795 unsigned op
= (inst
.instruction
>> 7) & 3;
17796 /* N (width of reversed regions) is encoded as part of the bitmask. We
17797 extract it here to check the elements to be reversed are smaller.
17798 Otherwise we'd get a reserved instruction. */
17799 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
17800 gas_assert (elsize
!= 0);
17801 constraint (et
.size
>= elsize
,
17802 _("elements must be smaller than reversal region"));
17803 neon_two_same (neon_quad (rs
), 1, et
.size
);
17809 if (inst
.operands
[1].isscalar
)
17811 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
17812 struct neon_type_el et
= neon_check_type (2, rs
,
17813 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17814 unsigned sizebits
= et
.size
>> 3;
17815 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17816 int logsize
= neon_logbits (et
.size
);
17817 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
17819 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
17822 NEON_ENCODE (SCALAR
, inst
);
17823 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17824 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17825 inst
.instruction
|= LOW4 (dm
);
17826 inst
.instruction
|= HI1 (dm
) << 5;
17827 inst
.instruction
|= neon_quad (rs
) << 6;
17828 inst
.instruction
|= x
<< 17;
17829 inst
.instruction
|= sizebits
<< 16;
17831 neon_dp_fixup (&inst
);
17835 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
17836 struct neon_type_el et
= neon_check_type (2, rs
,
17837 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17838 /* Duplicate ARM register to lanes of vector. */
17839 NEON_ENCODE (ARMREG
, inst
);
17842 case 8: inst
.instruction
|= 0x400000; break;
17843 case 16: inst
.instruction
|= 0x000020; break;
17844 case 32: inst
.instruction
|= 0x000000; break;
17847 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17848 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
17849 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
17850 inst
.instruction
|= neon_quad (rs
) << 21;
17851 /* The encoding for this instruction is identical for the ARM and Thumb
17852 variants, except for the condition field. */
17853 do_vfp_cond_or_thumb ();
17857 /* VMOV has particularly many variations. It can be one of:
17858 0. VMOV<c><q> <Qd>, <Qm>
17859 1. VMOV<c><q> <Dd>, <Dm>
17860 (Register operations, which are VORR with Rm = Rn.)
17861 2. VMOV<c><q>.<dt> <Qd>, #<imm>
17862 3. VMOV<c><q>.<dt> <Dd>, #<imm>
17864 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
17865 (ARM register to scalar.)
17866 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
17867 (Two ARM registers to vector.)
17868 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
17869 (Scalar to ARM register.)
17870 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
17871 (Vector to two ARM registers.)
17872 8. VMOV.F32 <Sd>, <Sm>
17873 9. VMOV.F64 <Dd>, <Dm>
17874 (VFP register moves.)
17875 10. VMOV.F32 <Sd>, #imm
17876 11. VMOV.F64 <Dd>, #imm
17877 (VFP float immediate load.)
17878 12. VMOV <Rd>, <Sm>
17879 (VFP single to ARM reg.)
17880 13. VMOV <Sd>, <Rm>
17881 (ARM reg to VFP single.)
17882 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
17883 (Two ARM regs to two VFP singles.)
17884 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
17885 (Two VFP singles to two ARM regs.)
17887 These cases can be disambiguated using neon_select_shape, except cases 1/9
17888 and 3/11 which depend on the operand type too.
17890 All the encoded bits are hardcoded by this function.
17892 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
17893 Cases 5, 7 may be used with VFPv2 and above.
17895 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
17896 can specify a type where it doesn't make sense to, and is ignored). */
17901 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
17902 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
17903 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
17904 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
17905 struct neon_type_el et
;
17906 const char *ldconst
= 0;
17910 case NS_DD
: /* case 1/9. */
17911 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17912 /* It is not an error here if no type is given. */
17914 if (et
.type
== NT_float
&& et
.size
== 64)
17916 do_vfp_nsyn_opcode ("fcpyd");
17919 /* fall through. */
17921 case NS_QQ
: /* case 0/1. */
17923 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17925 /* The architecture manual I have doesn't explicitly state which
17926 value the U bit should have for register->register moves, but
17927 the equivalent VORR instruction has U = 0, so do that. */
17928 inst
.instruction
= 0x0200110;
17929 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17930 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17931 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17932 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17933 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17934 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17935 inst
.instruction
|= neon_quad (rs
) << 6;
17937 neon_dp_fixup (&inst
);
17941 case NS_DI
: /* case 3/11. */
17942 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
17944 if (et
.type
== NT_float
&& et
.size
== 64)
17946 /* case 11 (fconstd). */
17947 ldconst
= "fconstd";
17948 goto encode_fconstd
;
17950 /* fall through. */
17952 case NS_QI
: /* case 2/3. */
17953 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
17955 inst
.instruction
= 0x0800010;
17956 neon_move_immediate ();
17957 neon_dp_fixup (&inst
);
17960 case NS_SR
: /* case 4. */
17962 unsigned bcdebits
= 0;
17964 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
17965 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
17967 /* .<size> is optional here, defaulting to .32. */
17968 if (inst
.vectype
.elems
== 0
17969 && inst
.operands
[0].vectype
.type
== NT_invtype
17970 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17972 inst
.vectype
.el
[0].type
= NT_untyped
;
17973 inst
.vectype
.el
[0].size
= 32;
17974 inst
.vectype
.elems
= 1;
17977 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17978 logsize
= neon_logbits (et
.size
);
17980 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17982 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17983 && et
.size
!= 32, _(BAD_FPU
));
17984 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17985 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17989 case 8: bcdebits
= 0x8; break;
17990 case 16: bcdebits
= 0x1; break;
17991 case 32: bcdebits
= 0x0; break;
17995 bcdebits
|= x
<< logsize
;
17997 inst
.instruction
= 0xe000b10;
17998 do_vfp_cond_or_thumb ();
17999 inst
.instruction
|= LOW4 (dn
) << 16;
18000 inst
.instruction
|= HI1 (dn
) << 7;
18001 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
18002 inst
.instruction
|= (bcdebits
& 3) << 5;
18003 inst
.instruction
|= (bcdebits
>> 2) << 21;
18007 case NS_DRR
: /* case 5 (fmdrr). */
18008 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
18011 inst
.instruction
= 0xc400b10;
18012 do_vfp_cond_or_thumb ();
18013 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
18014 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
18015 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
18016 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
18019 case NS_RS
: /* case 6. */
18022 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
18023 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
18024 unsigned abcdebits
= 0;
18026 /* .<dt> is optional here, defaulting to .32. */
18027 if (inst
.vectype
.elems
== 0
18028 && inst
.operands
[0].vectype
.type
== NT_invtype
18029 && inst
.operands
[1].vectype
.type
== NT_invtype
)
18031 inst
.vectype
.el
[0].type
= NT_untyped
;
18032 inst
.vectype
.el
[0].size
= 32;
18033 inst
.vectype
.elems
= 1;
18036 et
= neon_check_type (2, NS_NULL
,
18037 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
18038 logsize
= neon_logbits (et
.size
);
18040 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
18042 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
18043 && et
.size
!= 32, _(BAD_FPU
));
18044 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
18045 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
18049 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
18050 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
18051 case 32: abcdebits
= 0x00; break;
18055 abcdebits
|= x
<< logsize
;
18056 inst
.instruction
= 0xe100b10;
18057 do_vfp_cond_or_thumb ();
18058 inst
.instruction
|= LOW4 (dn
) << 16;
18059 inst
.instruction
|= HI1 (dn
) << 7;
18060 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
18061 inst
.instruction
|= (abcdebits
& 3) << 5;
18062 inst
.instruction
|= (abcdebits
>> 2) << 21;
18066 case NS_RRD
: /* case 7 (fmrrd). */
18067 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
18070 inst
.instruction
= 0xc500b10;
18071 do_vfp_cond_or_thumb ();
18072 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
18073 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
18074 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
18075 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
18078 case NS_FF
: /* case 8 (fcpys). */
18079 do_vfp_nsyn_opcode ("fcpys");
18083 case NS_FI
: /* case 10 (fconsts). */
18084 ldconst
= "fconsts";
18086 if (!inst
.operands
[1].immisfloat
)
18089 /* Immediate has to fit in 8 bits so float is enough. */
18090 float imm
= (float) inst
.operands
[1].imm
;
18091 memcpy (&new_imm
, &imm
, sizeof (float));
18092 /* But the assembly may have been written to provide an integer
18093 bit pattern that equates to a float, so check that the
18094 conversion has worked. */
18095 if (is_quarter_float (new_imm
))
18097 if (is_quarter_float (inst
.operands
[1].imm
))
18098 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
18100 inst
.operands
[1].imm
= new_imm
;
18101 inst
.operands
[1].immisfloat
= 1;
18105 if (is_quarter_float (inst
.operands
[1].imm
))
18107 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
18108 do_vfp_nsyn_opcode (ldconst
);
18110 /* ARMv8.2 fp16 vmov.f16 instruction. */
18112 do_scalar_fp16_v82_encode ();
18115 first_error (_("immediate out of range"));
18119 case NS_RF
: /* case 12 (fmrs). */
18120 do_vfp_nsyn_opcode ("fmrs");
18121 /* ARMv8.2 fp16 vmov.f16 instruction. */
18123 do_scalar_fp16_v82_encode ();
18127 case NS_FR
: /* case 13 (fmsr). */
18128 do_vfp_nsyn_opcode ("fmsr");
18129 /* ARMv8.2 fp16 vmov.f16 instruction. */
18131 do_scalar_fp16_v82_encode ();
18134 /* The encoders for the fmrrs and fmsrr instructions expect three operands
18135 (one of which is a list), but we have parsed four. Do some fiddling to
18136 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
18138 case NS_RRFF
: /* case 14 (fmrrs). */
18139 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
18140 _("VFP registers must be adjacent"));
18141 inst
.operands
[2].imm
= 2;
18142 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
18143 do_vfp_nsyn_opcode ("fmrrs");
18146 case NS_FFRR
: /* case 15 (fmsrr). */
18147 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
18148 _("VFP registers must be adjacent"));
18149 inst
.operands
[1] = inst
.operands
[2];
18150 inst
.operands
[2] = inst
.operands
[3];
18151 inst
.operands
[0].imm
= 2;
18152 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
18153 do_vfp_nsyn_opcode ("fmsrr");
18157 /* neon_select_shape has determined that the instruction
18158 shape is wrong and has already set the error message. */
18167 do_neon_rshift_round_imm (void)
18169 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
18170 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
18171 int imm
= inst
.operands
[2].imm
;
18173 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
18176 inst
.operands
[2].present
= 0;
18181 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
18182 _("immediate out of range for shift"));
18183 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
18188 do_neon_movhf (void)
18190 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
18191 constraint (rs
!= NS_HH
, _("invalid suffix"));
18193 if (inst
.cond
!= COND_ALWAYS
)
18197 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
18198 " the behaviour is UNPREDICTABLE"));
18202 inst
.error
= BAD_COND
;
18207 do_vfp_sp_monadic ();
18210 inst
.instruction
|= 0xf0000000;
18214 do_neon_movl (void)
18216 struct neon_type_el et
= neon_check_type (2, NS_QD
,
18217 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
18218 unsigned sizebits
= et
.size
>> 3;
18219 inst
.instruction
|= sizebits
<< 19;
18220 neon_two_same (0, et
.type
== NT_unsigned
, -1);
18226 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18227 struct neon_type_el et
= neon_check_type (2, rs
,
18228 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
18229 NEON_ENCODE (INTEGER
, inst
);
18230 neon_two_same (neon_quad (rs
), 1, et
.size
);
18234 do_neon_zip_uzp (void)
18236 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18237 struct neon_type_el et
= neon_check_type (2, rs
,
18238 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
18239 if (rs
== NS_DD
&& et
.size
== 32)
18241 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
18242 inst
.instruction
= N_MNEM_vtrn
;
18246 neon_two_same (neon_quad (rs
), 1, et
.size
);
18250 do_neon_sat_abs_neg (void)
18252 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18253 struct neon_type_el et
= neon_check_type (2, rs
,
18254 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18255 neon_two_same (neon_quad (rs
), 1, et
.size
);
18259 do_neon_pair_long (void)
18261 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18262 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
18263 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
18264 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
18265 neon_two_same (neon_quad (rs
), 1, et
.size
);
18269 do_neon_recip_est (void)
18271 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18272 struct neon_type_el et
= neon_check_type (2, rs
,
18273 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
18274 inst
.instruction
|= (et
.type
== NT_float
) << 8;
18275 neon_two_same (neon_quad (rs
), 1, et
.size
);
18281 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18282 struct neon_type_el et
= neon_check_type (2, rs
,
18283 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
18284 neon_two_same (neon_quad (rs
), 1, et
.size
);
18290 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18291 struct neon_type_el et
= neon_check_type (2, rs
,
18292 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
18293 neon_two_same (neon_quad (rs
), 1, et
.size
);
18299 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18300 struct neon_type_el et
= neon_check_type (2, rs
,
18301 N_EQK
| N_INT
, N_8
| N_KEY
);
18302 neon_two_same (neon_quad (rs
), 1, et
.size
);
18308 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
18309 neon_two_same (neon_quad (rs
), 1, -1);
18313 do_neon_tbl_tbx (void)
18315 unsigned listlenbits
;
18316 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
18318 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
18320 first_error (_("bad list length for table lookup"));
18324 listlenbits
= inst
.operands
[1].imm
- 1;
18325 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18326 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18327 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18328 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18329 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
18330 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
18331 inst
.instruction
|= listlenbits
<< 8;
18333 neon_dp_fixup (&inst
);
18337 do_neon_ldm_stm (void)
18339 /* P, U and L bits are part of bitmask. */
18340 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
18341 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
18343 if (inst
.operands
[1].issingle
)
18345 do_vfp_nsyn_ldm_stm (is_dbmode
);
18349 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
18350 _("writeback (!) must be used for VLDMDB and VSTMDB"));
18352 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
18353 _("register list must contain at least 1 and at most 16 "
18356 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
18357 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
18358 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
18359 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
18361 inst
.instruction
|= offsetbits
;
18363 do_vfp_cond_or_thumb ();
18367 do_neon_ldr_str (void)
18369 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
18371 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
18372 And is UNPREDICTABLE in thumb mode. */
18374 && inst
.operands
[1].reg
== REG_PC
18375 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
18378 inst
.error
= _("Use of PC here is UNPREDICTABLE");
18379 else if (warn_on_deprecated
)
18380 as_tsktsk (_("Use of PC here is deprecated"));
18383 if (inst
.operands
[0].issingle
)
18386 do_vfp_nsyn_opcode ("flds");
18388 do_vfp_nsyn_opcode ("fsts");
18390 /* ARMv8.2 vldr.16/vstr.16 instruction. */
18391 if (inst
.vectype
.el
[0].size
== 16)
18392 do_scalar_fp16_v82_encode ();
18397 do_vfp_nsyn_opcode ("fldd");
18399 do_vfp_nsyn_opcode ("fstd");
18404 do_t_vldr_vstr_sysreg (void)
18406 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
18407 bfd_boolean is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
18409 /* Use of PC is UNPREDICTABLE. */
18410 if (inst
.operands
[1].reg
== REG_PC
)
18411 inst
.error
= _("Use of PC here is UNPREDICTABLE");
18413 if (inst
.operands
[1].immisreg
)
18414 inst
.error
= _("instruction does not accept register index");
18416 if (!inst
.operands
[1].isreg
)
18417 inst
.error
= _("instruction does not accept PC-relative addressing");
18419 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
18420 inst
.error
= _("immediate value out of range");
18422 inst
.instruction
= 0xec000f80;
18424 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
18425 encode_arm_cp_address (1, TRUE
, FALSE
, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
18426 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
18427 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
18431 do_vldr_vstr (void)
18433 bfd_boolean sysreg_op
= !inst
.operands
[0].isreg
;
18435 /* VLDR/VSTR (System Register). */
18438 if (!mark_feature_used (&arm_ext_v8_1m_main
))
18439 as_bad (_("Instruction not permitted on this architecture"));
18441 do_t_vldr_vstr_sysreg ();
18446 if (!mark_feature_used (&fpu_vfp_ext_v1xd
))
18447 as_bad (_("Instruction not permitted on this architecture"));
18448 do_neon_ldr_str ();
18452 /* "interleave" version also handles non-interleaving register VLD1/VST1
18456 do_neon_ld_st_interleave (void)
18458 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
18459 N_8
| N_16
| N_32
| N_64
);
18460 unsigned alignbits
= 0;
18462 /* The bits in this table go:
18463 0: register stride of one (0) or two (1)
18464 1,2: register list length, minus one (1, 2, 3, 4).
18465 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
18466 We use -1 for invalid entries. */
18467 const int typetable
[] =
18469 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
18470 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
18471 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
18472 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
18476 if (et
.type
== NT_invtype
)
18479 if (inst
.operands
[1].immisalign
)
18480 switch (inst
.operands
[1].imm
>> 8)
18482 case 64: alignbits
= 1; break;
18484 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
18485 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
18486 goto bad_alignment
;
18490 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
18491 goto bad_alignment
;
18496 first_error (_("bad alignment"));
18500 inst
.instruction
|= alignbits
<< 4;
18501 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18503 /* Bits [4:6] of the immediate in a list specifier encode register stride
18504 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
18505 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
18506 up the right value for "type" in a table based on this value and the given
18507 list style, then stick it back. */
18508 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
18509 | (((inst
.instruction
>> 8) & 3) << 3);
18511 typebits
= typetable
[idx
];
18513 constraint (typebits
== -1, _("bad list type for instruction"));
18514 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
18517 inst
.instruction
&= ~0xf00;
18518 inst
.instruction
|= typebits
<< 8;
18521 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
18522 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
18523 otherwise. The variable arguments are a list of pairs of legal (size, align)
18524 values, terminated with -1. */
18527 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
18530 int result
= FAIL
, thissize
, thisalign
;
18532 if (!inst
.operands
[1].immisalign
)
18538 va_start (ap
, do_alignment
);
18542 thissize
= va_arg (ap
, int);
18543 if (thissize
== -1)
18545 thisalign
= va_arg (ap
, int);
18547 if (size
== thissize
&& align
== thisalign
)
18550 while (result
!= SUCCESS
);
18554 if (result
== SUCCESS
)
18557 first_error (_("unsupported alignment for instruction"));
18563 do_neon_ld_st_lane (void)
18565 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18566 int align_good
, do_alignment
= 0;
18567 int logsize
= neon_logbits (et
.size
);
18568 int align
= inst
.operands
[1].imm
>> 8;
18569 int n
= (inst
.instruction
>> 8) & 3;
18570 int max_el
= 64 / et
.size
;
18572 if (et
.type
== NT_invtype
)
18575 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
18576 _("bad list length"));
18577 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
18578 _("scalar index out of range"));
18579 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
18581 _("stride of 2 unavailable when element size is 8"));
18585 case 0: /* VLD1 / VST1. */
18586 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
18588 if (align_good
== FAIL
)
18592 unsigned alignbits
= 0;
18595 case 16: alignbits
= 0x1; break;
18596 case 32: alignbits
= 0x3; break;
18599 inst
.instruction
|= alignbits
<< 4;
18603 case 1: /* VLD2 / VST2. */
18604 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
18605 16, 32, 32, 64, -1);
18606 if (align_good
== FAIL
)
18609 inst
.instruction
|= 1 << 4;
18612 case 2: /* VLD3 / VST3. */
18613 constraint (inst
.operands
[1].immisalign
,
18614 _("can't use alignment with this instruction"));
18617 case 3: /* VLD4 / VST4. */
18618 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18619 16, 64, 32, 64, 32, 128, -1);
18620 if (align_good
== FAIL
)
18624 unsigned alignbits
= 0;
18627 case 8: alignbits
= 0x1; break;
18628 case 16: alignbits
= 0x1; break;
18629 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
18632 inst
.instruction
|= alignbits
<< 4;
18639 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
18640 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18641 inst
.instruction
|= 1 << (4 + logsize
);
18643 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
18644 inst
.instruction
|= logsize
<< 10;
18647 /* Encode single n-element structure to all lanes VLD<n> instructions. */
18650 do_neon_ld_dup (void)
18652 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
18653 int align_good
, do_alignment
= 0;
18655 if (et
.type
== NT_invtype
)
18658 switch ((inst
.instruction
>> 8) & 3)
18660 case 0: /* VLD1. */
18661 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
18662 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18663 &do_alignment
, 16, 16, 32, 32, -1);
18664 if (align_good
== FAIL
)
18666 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
18669 case 2: inst
.instruction
|= 1 << 5; break;
18670 default: first_error (_("bad list length")); return;
18672 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18675 case 1: /* VLD2. */
18676 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
18677 &do_alignment
, 8, 16, 16, 32, 32, 64,
18679 if (align_good
== FAIL
)
18681 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
18682 _("bad list length"));
18683 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18684 inst
.instruction
|= 1 << 5;
18685 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18688 case 2: /* VLD3. */
18689 constraint (inst
.operands
[1].immisalign
,
18690 _("can't use alignment with this instruction"));
18691 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
18692 _("bad list length"));
18693 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18694 inst
.instruction
|= 1 << 5;
18695 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18698 case 3: /* VLD4. */
18700 int align
= inst
.operands
[1].imm
>> 8;
18701 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
18702 16, 64, 32, 64, 32, 128, -1);
18703 if (align_good
== FAIL
)
18705 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
18706 _("bad list length"));
18707 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
18708 inst
.instruction
|= 1 << 5;
18709 if (et
.size
== 32 && align
== 128)
18710 inst
.instruction
|= 0x3 << 6;
18712 inst
.instruction
|= neon_logbits (et
.size
) << 6;
18719 inst
.instruction
|= do_alignment
<< 4;
18722 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
18723 apart from bits [11:4]. */
18726 do_neon_ldx_stx (void)
18728 if (inst
.operands
[1].isreg
)
18729 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
18731 switch (NEON_LANE (inst
.operands
[0].imm
))
18733 case NEON_INTERLEAVE_LANES
:
18734 NEON_ENCODE (INTERLV
, inst
);
18735 do_neon_ld_st_interleave ();
18738 case NEON_ALL_LANES
:
18739 NEON_ENCODE (DUP
, inst
);
18740 if (inst
.instruction
== N_INV
)
18742 first_error ("only loads support such operands");
18749 NEON_ENCODE (LANE
, inst
);
18750 do_neon_ld_st_lane ();
18753 /* L bit comes from bit mask. */
18754 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18755 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18756 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
18758 if (inst
.operands
[1].postind
)
18760 int postreg
= inst
.operands
[1].imm
& 0xf;
18761 constraint (!inst
.operands
[1].immisreg
,
18762 _("post-index must be a register"));
18763 constraint (postreg
== 0xd || postreg
== 0xf,
18764 _("bad register for post-index"));
18765 inst
.instruction
|= postreg
;
18769 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
18770 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
18771 || inst
.relocs
[0].exp
.X_add_number
!= 0,
18774 if (inst
.operands
[1].writeback
)
18776 inst
.instruction
|= 0xd;
18779 inst
.instruction
|= 0xf;
18783 inst
.instruction
|= 0xf9000000;
18785 inst
.instruction
|= 0xf4000000;
18790 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
18792 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18793 D register operands. */
18794 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18795 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18798 NEON_ENCODE (FPV8
, inst
);
18800 if (rs
== NS_FFF
|| rs
== NS_HHH
)
18802 do_vfp_sp_dyadic ();
18804 /* ARMv8.2 fp16 instruction. */
18806 do_scalar_fp16_v82_encode ();
18809 do_vfp_dp_rd_rn_rm ();
18812 inst
.instruction
|= 0x100;
18814 inst
.instruction
|= 0xf0000000;
18820 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18822 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
18823 first_error (_("invalid instruction shape"));
18829 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18831 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
18834 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18837 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
18841 do_vrint_1 (enum neon_cvt_mode mode
)
18843 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
18844 struct neon_type_el et
;
18849 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18850 D register operands. */
18851 if (neon_shape_class
[rs
] == SC_DOUBLE
)
18852 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18855 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
18857 if (et
.type
!= NT_invtype
)
18859 /* VFP encodings. */
18860 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
18861 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
18862 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18864 NEON_ENCODE (FPV8
, inst
);
18865 if (rs
== NS_FF
|| rs
== NS_HH
)
18866 do_vfp_sp_monadic ();
18868 do_vfp_dp_rd_rm ();
18872 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
18873 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
18874 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
18875 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
18876 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
18877 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
18878 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
18882 inst
.instruction
|= (rs
== NS_DD
) << 8;
18883 do_vfp_cond_or_thumb ();
18885 /* ARMv8.2 fp16 vrint instruction. */
18887 do_scalar_fp16_v82_encode ();
18891 /* Neon encodings (or something broken...). */
18893 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
18895 if (et
.type
== NT_invtype
)
18898 set_pred_insn_type (OUTSIDE_PRED_INSN
);
18899 NEON_ENCODE (FLOAT
, inst
);
18901 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
18904 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18905 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18906 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18907 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18908 inst
.instruction
|= neon_quad (rs
) << 6;
18909 /* Mask off the original size bits and reencode them. */
18910 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
18911 | neon_logbits (et
.size
) << 18);
18915 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
18916 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
18917 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
18918 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
18919 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
18920 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
18921 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
18926 inst
.instruction
|= 0xfc000000;
18928 inst
.instruction
|= 0xf0000000;
18935 do_vrint_1 (neon_cvt_mode_x
);
18941 do_vrint_1 (neon_cvt_mode_z
);
18947 do_vrint_1 (neon_cvt_mode_r
);
18953 do_vrint_1 (neon_cvt_mode_a
);
18959 do_vrint_1 (neon_cvt_mode_n
);
18965 do_vrint_1 (neon_cvt_mode_p
);
18971 do_vrint_1 (neon_cvt_mode_m
);
18975 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
18977 unsigned regno
= NEON_SCALAR_REG (opnd
);
18978 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
18980 if (elsize
== 16 && elno
< 2 && regno
< 16)
18981 return regno
| (elno
<< 4);
18982 else if (elsize
== 32 && elno
== 0)
18985 first_error (_("scalar out of range"));
18992 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18994 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18995 _("expression too complex"));
18996 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18997 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
18998 _("immediate out of range"));
19000 if (inst
.operands
[2].isscalar
)
19002 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
19003 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
19004 N_KEY
| N_F16
| N_F32
).size
;
19005 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
19007 inst
.instruction
= 0xfe000800;
19008 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19009 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19010 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
19011 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
19012 inst
.instruction
|= LOW4 (m
);
19013 inst
.instruction
|= HI1 (m
) << 5;
19014 inst
.instruction
|= neon_quad (rs
) << 6;
19015 inst
.instruction
|= rot
<< 20;
19016 inst
.instruction
|= (size
== 32) << 23;
19020 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
19021 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
19022 N_KEY
| N_F16
| N_F32
).size
;
19023 neon_three_same (neon_quad (rs
), 0, -1);
19024 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
19025 inst
.instruction
|= 0xfc200800;
19026 inst
.instruction
|= rot
<< 23;
19027 inst
.instruction
|= (size
== 32) << 20;
19034 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
19036 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
19037 _("expression too complex"));
19038 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
19039 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
19040 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
19041 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
19042 N_KEY
| N_F16
| N_F32
).size
;
19043 neon_three_same (neon_quad (rs
), 0, -1);
19044 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
19045 inst
.instruction
|= 0xfc800800;
19046 inst
.instruction
|= (rot
== 270) << 24;
19047 inst
.instruction
|= (size
== 32) << 20;
19050 /* Dot Product instructions encoding support. */
19053 do_neon_dotproduct (int unsigned_p
)
19055 enum neon_shape rs
;
19056 unsigned scalar_oprd2
= 0;
19059 if (inst
.cond
!= COND_ALWAYS
)
19060 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
19061 "is UNPREDICTABLE"));
19063 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
19066 /* Dot Product instructions are in three-same D/Q register format or the third
19067 operand can be a scalar index register. */
19068 if (inst
.operands
[2].isscalar
)
19070 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
19071 high8
= 0xfe000000;
19072 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
19076 high8
= 0xfc000000;
19077 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
19081 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
19083 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
19085 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
19086 Product instruction, so we pass 0 as the "ubit" parameter. And the
19087 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
19088 neon_three_same (neon_quad (rs
), 0, 32);
19090 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
19091 different NEON three-same encoding. */
19092 inst
.instruction
&= 0x00ffffff;
19093 inst
.instruction
|= high8
;
19094 /* Encode 'U' bit which indicates signedness. */
19095 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
19096 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
19097 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
19098 the instruction encoding. */
19099 if (inst
.operands
[2].isscalar
)
19101 inst
.instruction
&= 0xffffffd0;
19102 inst
.instruction
|= LOW4 (scalar_oprd2
);
19103 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
19107 /* Dot Product instructions for signed integer. */
19110 do_neon_dotproduct_s (void)
19112 return do_neon_dotproduct (0);
19115 /* Dot Product instructions for unsigned integer. */
19118 do_neon_dotproduct_u (void)
19120 return do_neon_dotproduct (1);
19123 /* Crypto v1 instructions. */
19125 do_crypto_2op_1 (unsigned elttype
, int op
)
19127 set_pred_insn_type (OUTSIDE_PRED_INSN
);
19129 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
19135 NEON_ENCODE (INTEGER
, inst
);
19136 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
19137 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
19138 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
19139 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
19141 inst
.instruction
|= op
<< 6;
19144 inst
.instruction
|= 0xfc000000;
19146 inst
.instruction
|= 0xf0000000;
19150 do_crypto_3op_1 (int u
, int op
)
19152 set_pred_insn_type (OUTSIDE_PRED_INSN
);
19154 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
19155 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
19160 NEON_ENCODE (INTEGER
, inst
);
19161 neon_three_same (1, u
, 8 << op
);
19167 do_crypto_2op_1 (N_8
, 0);
19173 do_crypto_2op_1 (N_8
, 1);
19179 do_crypto_2op_1 (N_8
, 2);
19185 do_crypto_2op_1 (N_8
, 3);
19191 do_crypto_3op_1 (0, 0);
19197 do_crypto_3op_1 (0, 1);
19203 do_crypto_3op_1 (0, 2);
19209 do_crypto_3op_1 (0, 3);
19215 do_crypto_3op_1 (1, 0);
19221 do_crypto_3op_1 (1, 1);
19225 do_sha256su1 (void)
19227 do_crypto_3op_1 (1, 2);
19233 do_crypto_2op_1 (N_32
, -1);
19239 do_crypto_2op_1 (N_32
, 0);
19243 do_sha256su0 (void)
19245 do_crypto_2op_1 (N_32
, 1);
19249 do_crc32_1 (unsigned int poly
, unsigned int sz
)
19251 unsigned int Rd
= inst
.operands
[0].reg
;
19252 unsigned int Rn
= inst
.operands
[1].reg
;
19253 unsigned int Rm
= inst
.operands
[2].reg
;
19255 set_pred_insn_type (OUTSIDE_PRED_INSN
);
19256 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
19257 inst
.instruction
|= LOW4 (Rn
) << 16;
19258 inst
.instruction
|= LOW4 (Rm
);
19259 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
19260 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
19262 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
19263 as_warn (UNPRED_REG ("r15"));
19305 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
19307 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
19308 do_vfp_sp_dp_cvt ();
19309 do_vfp_cond_or_thumb ();
19313 /* Overall per-instruction processing. */
19315 /* We need to be able to fix up arbitrary expressions in some statements.
19316 This is so that we can handle symbols that are an arbitrary distance from
19317 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
19318 which returns part of an address in a form which will be valid for
19319 a data instruction. We do this by pushing the expression into a symbol
19320 in the expr_section, and creating a fix for that. */
19323 fix_new_arm (fragS
* frag
,
19337 /* Create an absolute valued symbol, so we have something to
19338 refer to in the object file. Unfortunately for us, gas's
19339 generic expression parsing will already have folded out
19340 any use of .set foo/.type foo %function that may have
19341 been used to set type information of the target location,
19342 that's being specified symbolically. We have to presume
19343 the user knows what they are doing. */
19347 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
19349 symbol
= symbol_find_or_make (name
);
19350 S_SET_SEGMENT (symbol
, absolute_section
);
19351 symbol_set_frag (symbol
, &zero_address_frag
);
19352 S_SET_VALUE (symbol
, exp
->X_add_number
);
19353 exp
->X_op
= O_symbol
;
19354 exp
->X_add_symbol
= symbol
;
19355 exp
->X_add_number
= 0;
19361 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
19362 (enum bfd_reloc_code_real
) reloc
);
19366 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
19367 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
19371 /* Mark whether the fix is to a THUMB instruction, or an ARM
19373 new_fix
->tc_fix_data
= thumb_mode
;
19376 /* Create a frg for an instruction requiring relaxation. */
19378 output_relax_insn (void)
19384 /* The size of the instruction is unknown, so tie the debug info to the
19385 start of the instruction. */
19386 dwarf2_emit_insn (0);
19388 switch (inst
.relocs
[0].exp
.X_op
)
19391 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
19392 offset
= inst
.relocs
[0].exp
.X_add_number
;
19396 offset
= inst
.relocs
[0].exp
.X_add_number
;
19399 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
19403 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
19404 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
19405 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
19408 /* Write a 32-bit thumb instruction to buf. */
19410 put_thumb32_insn (char * buf
, unsigned long insn
)
19412 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
19413 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
19417 output_inst (const char * str
)
19423 as_bad ("%s -- `%s'", inst
.error
, str
);
19428 output_relax_insn ();
19431 if (inst
.size
== 0)
19434 to
= frag_more (inst
.size
);
19435 /* PR 9814: Record the thumb mode into the current frag so that we know
19436 what type of NOP padding to use, if necessary. We override any previous
19437 setting so that if the mode has changed then the NOPS that we use will
19438 match the encoding of the last instruction in the frag. */
19439 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
19441 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
19443 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
19444 put_thumb32_insn (to
, inst
.instruction
);
19446 else if (inst
.size
> INSN_SIZE
)
19448 gas_assert (inst
.size
== (2 * INSN_SIZE
));
19449 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
19450 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
19453 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
19456 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19458 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
19459 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
19460 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
19461 inst
.relocs
[r
].type
);
19464 dwarf2_emit_insn (inst
.size
);
19468 output_it_inst (int cond
, int mask
, char * to
)
19470 unsigned long instruction
= 0xbf00;
19473 instruction
|= mask
;
19474 instruction
|= cond
<< 4;
19478 to
= frag_more (2);
19480 dwarf2_emit_insn (2);
19484 md_number_to_chars (to
, instruction
, 2);
19489 /* Tag values used in struct asm_opcode's tag field. */
19492 OT_unconditional
, /* Instruction cannot be conditionalized.
19493 The ARM condition field is still 0xE. */
19494 OT_unconditionalF
, /* Instruction cannot be conditionalized
19495 and carries 0xF in its ARM condition field. */
19496 OT_csuffix
, /* Instruction takes a conditional suffix. */
19497 OT_csuffixF
, /* Some forms of the instruction take a scalar
19498 conditional suffix, others place 0xF where the
19499 condition field would be, others take a vector
19500 conditional suffix. */
19501 OT_cinfix3
, /* Instruction takes a conditional infix,
19502 beginning at character index 3. (In
19503 unified mode, it becomes a suffix.) */
19504 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
19505 tsts, cmps, cmns, and teqs. */
19506 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
19507 character index 3, even in unified mode. Used for
19508 legacy instructions where suffix and infix forms
19509 may be ambiguous. */
19510 OT_csuf_or_in3
, /* Instruction takes either a conditional
19511 suffix or an infix at character index 3. */
19512 OT_odd_infix_unc
, /* This is the unconditional variant of an
19513 instruction that takes a conditional infix
19514 at an unusual position. In unified mode,
19515 this variant will accept a suffix. */
19516 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
19517 are the conditional variants of instructions that
19518 take conditional infixes in unusual positions.
19519 The infix appears at character index
19520 (tag - OT_odd_infix_0). These are not accepted
19521 in unified mode. */
19524 /* Subroutine of md_assemble, responsible for looking up the primary
19525 opcode from the mnemonic the user wrote. STR points to the
19526 beginning of the mnemonic.
19528 This is not simply a hash table lookup, because of conditional
19529 variants. Most instructions have conditional variants, which are
19530 expressed with a _conditional affix_ to the mnemonic. If we were
19531 to encode each conditional variant as a literal string in the opcode
19532 table, it would have approximately 20,000 entries.
19534 Most mnemonics take this affix as a suffix, and in unified syntax,
19535 'most' is upgraded to 'all'. However, in the divided syntax, some
19536 instructions take the affix as an infix, notably the s-variants of
19537 the arithmetic instructions. Of those instructions, all but six
19538 have the infix appear after the third character of the mnemonic.
19540 Accordingly, the algorithm for looking up primary opcodes given
19543 1. Look up the identifier in the opcode table.
19544 If we find a match, go to step U.
19546 2. Look up the last two characters of the identifier in the
19547 conditions table. If we find a match, look up the first N-2
19548 characters of the identifier in the opcode table. If we
19549 find a match, go to step CE.
19551 3. Look up the fourth and fifth characters of the identifier in
19552 the conditions table. If we find a match, extract those
19553 characters from the identifier, and look up the remaining
19554 characters in the opcode table. If we find a match, go
19559 U. Examine the tag field of the opcode structure, in case this is
19560 one of the six instructions with its conditional infix in an
19561 unusual place. If it is, the tag tells us where to find the
19562 infix; look it up in the conditions table and set inst.cond
19563 accordingly. Otherwise, this is an unconditional instruction.
19564 Again set inst.cond accordingly. Return the opcode structure.
19566 CE. Examine the tag field to make sure this is an instruction that
19567 should receive a conditional suffix. If it is not, fail.
19568 Otherwise, set inst.cond from the suffix we already looked up,
19569 and return the opcode structure.
19571 CM. Examine the tag field to make sure this is an instruction that
19572 should receive a conditional infix after the third character.
19573 If it is not, fail. Otherwise, undo the edits to the current
19574 line of input and proceed as for case CE. */
19576 static const struct asm_opcode
*
19577 opcode_lookup (char **str
)
19581 const struct asm_opcode
*opcode
;
19582 const struct asm_cond
*cond
;
19585 /* Scan up to the end of the mnemonic, which must end in white space,
19586 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
19587 for (base
= end
= *str
; *end
!= '\0'; end
++)
19588 if (*end
== ' ' || *end
== '.')
19594 /* Handle a possible width suffix and/or Neon type suffix. */
19599 /* The .w and .n suffixes are only valid if the unified syntax is in
19601 if (unified_syntax
&& end
[1] == 'w')
19603 else if (unified_syntax
&& end
[1] == 'n')
19608 inst
.vectype
.elems
= 0;
19610 *str
= end
+ offset
;
19612 if (end
[offset
] == '.')
19614 /* See if we have a Neon type suffix (possible in either unified or
19615 non-unified ARM syntax mode). */
19616 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
19619 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
19625 /* Look for unaffixed or special-case affixed mnemonic. */
19626 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19631 if (opcode
->tag
< OT_odd_infix_0
)
19633 inst
.cond
= COND_ALWAYS
;
19637 if (warn_on_deprecated
&& unified_syntax
)
19638 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19639 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
19640 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19643 inst
.cond
= cond
->value
;
19646 if (ARM_CPU_HAS_FEATURE (cpu_variant
, mve_ext
))
19648 /* Cannot have a conditional suffix on a mnemonic of less than a character.
19650 if (end
- base
< 2)
19653 cond
= (const struct asm_cond
*) hash_find_n (arm_vcond_hsh
, affix
, 1);
19654 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19656 /* If this opcode can not be vector predicated then don't accept it with a
19657 vector predication code. */
19658 if (opcode
&& !opcode
->mayBeVecPred
)
19661 if (!opcode
|| !cond
)
19663 /* Cannot have a conditional suffix on a mnemonic of less than two
19665 if (end
- base
< 3)
19668 /* Look for suffixed mnemonic. */
19670 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19671 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19675 if (opcode
&& cond
)
19678 switch (opcode
->tag
)
19680 case OT_cinfix3_legacy
:
19681 /* Ignore conditional suffixes matched on infix only mnemonics. */
19685 case OT_cinfix3_deprecated
:
19686 case OT_odd_infix_unc
:
19687 if (!unified_syntax
)
19689 /* Fall through. */
19693 case OT_csuf_or_in3
:
19694 inst
.cond
= cond
->value
;
19697 case OT_unconditional
:
19698 case OT_unconditionalF
:
19700 inst
.cond
= cond
->value
;
19703 /* Delayed diagnostic. */
19704 inst
.error
= BAD_COND
;
19705 inst
.cond
= COND_ALWAYS
;
19714 /* Cannot have a usual-position infix on a mnemonic of less than
19715 six characters (five would be a suffix). */
19716 if (end
- base
< 6)
19719 /* Look for infixed mnemonic in the usual position. */
19721 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
19725 memcpy (save
, affix
, 2);
19726 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
19727 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
19729 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
19730 memcpy (affix
, save
, 2);
19733 && (opcode
->tag
== OT_cinfix3
19734 || opcode
->tag
== OT_cinfix3_deprecated
19735 || opcode
->tag
== OT_csuf_or_in3
19736 || opcode
->tag
== OT_cinfix3_legacy
))
19739 if (warn_on_deprecated
&& unified_syntax
19740 && (opcode
->tag
== OT_cinfix3
19741 || opcode
->tag
== OT_cinfix3_deprecated
))
19742 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
19744 inst
.cond
= cond
->value
;
19751 /* This function generates an initial IT instruction, leaving its block
19752 virtually open for the new instructions. Eventually,
19753 the mask will be updated by now_pred_add_mask () each time
19754 a new instruction needs to be included in the IT block.
19755 Finally, the block is closed with close_automatic_it_block ().
19756 The block closure can be requested either from md_assemble (),
19757 a tencode (), or due to a label hook. */
19760 new_automatic_it_block (int cond
)
19762 now_pred
.state
= AUTOMATIC_PRED_BLOCK
;
19763 now_pred
.mask
= 0x18;
19764 now_pred
.cc
= cond
;
19765 now_pred
.block_length
= 1;
19766 mapping_state (MAP_THUMB
);
19767 now_pred
.insn
= output_it_inst (cond
, now_pred
.mask
, NULL
);
19768 now_pred
.warn_deprecated
= FALSE
;
19769 now_pred
.insn_cond
= TRUE
;
19772 /* Close an automatic IT block.
19773 See comments in new_automatic_it_block (). */
19776 close_automatic_it_block (void)
19778 now_pred
.mask
= 0x10;
19779 now_pred
.block_length
= 0;
19782 /* Update the mask of the current automatically-generated IT
19783 instruction. See comments in new_automatic_it_block (). */
19786 now_pred_add_mask (int cond
)
19788 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
19789 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
19790 | ((bitvalue) << (nbit)))
19791 const int resulting_bit
= (cond
& 1);
19793 now_pred
.mask
&= 0xf;
19794 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19796 (5 - now_pred
.block_length
));
19797 now_pred
.mask
= SET_BIT_VALUE (now_pred
.mask
,
19799 ((5 - now_pred
.block_length
) - 1));
19800 output_it_inst (now_pred
.cc
, now_pred
.mask
, now_pred
.insn
);
19803 #undef SET_BIT_VALUE
19806 /* The IT blocks handling machinery is accessed through the these functions:
19807 it_fsm_pre_encode () from md_assemble ()
19808 set_pred_insn_type () optional, from the tencode functions
19809 set_pred_insn_type_last () ditto
19810 in_pred_block () ditto
19811 it_fsm_post_encode () from md_assemble ()
19812 force_automatic_it_block_close () from label handling functions
19815 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
19816 initializing the IT insn type with a generic initial value depending
19817 on the inst.condition.
19818 2) During the tencode function, two things may happen:
19819 a) The tencode function overrides the IT insn type by
19820 calling either set_pred_insn_type (type) or
19821 set_pred_insn_type_last ().
19822 b) The tencode function queries the IT block state by
19823 calling in_pred_block () (i.e. to determine narrow/not narrow mode).
19825 Both set_pred_insn_type and in_pred_block run the internal FSM state
19826 handling function (handle_pred_state), because: a) setting the IT insn
19827 type may incur in an invalid state (exiting the function),
19828 and b) querying the state requires the FSM to be updated.
19829 Specifically we want to avoid creating an IT block for conditional
19830 branches, so it_fsm_pre_encode is actually a guess and we can't
19831 determine whether an IT block is required until the tencode () routine
19832 has decided what type of instruction this actually it.
19833 Because of this, if set_pred_insn_type and in_pred_block have to be
19834 used, set_pred_insn_type has to be called first.
19836 set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
19837 that determines the insn IT type depending on the inst.cond code.
19838 When a tencode () routine encodes an instruction that can be
19839 either outside an IT block, or, in the case of being inside, has to be
19840 the last one, set_pred_insn_type_last () will determine the proper
19841 IT instruction type based on the inst.cond code. Otherwise,
19842 set_pred_insn_type can be called for overriding that logic or
19843 for covering other cases.
19845 Calling handle_pred_state () may not transition the IT block state to
19846 OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
19847 still queried. Instead, if the FSM determines that the state should
19848 be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
19849 after the tencode () function: that's what it_fsm_post_encode () does.
19851 Since in_pred_block () calls the state handling function to get an
19852 updated state, an error may occur (due to invalid insns combination).
19853 In that case, inst.error is set.
19854 Therefore, inst.error has to be checked after the execution of
19855 the tencode () routine.
19857 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
19858 any pending state change (if any) that didn't take place in
19859 handle_pred_state () as explained above. */
19862 it_fsm_pre_encode (void)
19864 if (inst
.cond
!= COND_ALWAYS
)
19865 inst
.pred_insn_type
= INSIDE_IT_INSN
;
19867 inst
.pred_insn_type
= OUTSIDE_PRED_INSN
;
19869 now_pred
.state_handled
= 0;
19872 /* IT state FSM handling function. */
19873 /* MVE instructions and non-MVE instructions are handled differently because of
19874 the introduction of VPT blocks.
19875 Specifications say that any non-MVE instruction inside a VPT block is
19876 UNPREDICTABLE, with the exception of the BKPT instruction. Whereas most MVE
19877 instructions are deemed to be UNPREDICTABLE if inside an IT block. For the
19878 few exceptions we have MVE_UNPREDICABLE_INSN.
19879 The error messages provided depending on the different combinations possible
19880 are described in the cases below:
19881 For 'most' MVE instructions:
19882 1) In an IT block, with an IT code: syntax error
19883 2) In an IT block, with a VPT code: error: must be in a VPT block
19884 3) In an IT block, with no code: warning: UNPREDICTABLE
19885 4) In a VPT block, with an IT code: syntax error
19886 5) In a VPT block, with a VPT code: OK!
19887 6) In a VPT block, with no code: error: missing code
19888 7) Outside a pred block, with an IT code: error: syntax error
19889 8) Outside a pred block, with a VPT code: error: should be in a VPT block
19890 9) Outside a pred block, with no code: OK!
19891 For non-MVE instructions:
19892 10) In an IT block, with an IT code: OK!
19893 11) In an IT block, with a VPT code: syntax error
19894 12) In an IT block, with no code: error: missing code
19895 13) In a VPT block, with an IT code: error: should be in an IT block
19896 14) In a VPT block, with a VPT code: syntax error
19897 15) In a VPT block, with no code: UNPREDICTABLE
19898 16) Outside a pred block, with an IT code: error: should be in an IT block
19899 17) Outside a pred block, with a VPT code: syntax error
19900 18) Outside a pred block, with no code: OK!
19905 handle_pred_state (void)
19907 now_pred
.state_handled
= 1;
19908 now_pred
.insn_cond
= FALSE
;
19910 switch (now_pred
.state
)
19912 case OUTSIDE_PRED_BLOCK
:
19913 switch (inst
.pred_insn_type
)
19915 case MVE_UNPREDICABLE_INSN
:
19916 case MVE_OUTSIDE_PRED_INSN
:
19917 if (inst
.cond
< COND_ALWAYS
)
19919 /* Case 7: Outside a pred block, with an IT code: error: syntax
19921 inst
.error
= BAD_SYNTAX
;
19924 /* Case 9: Outside a pred block, with no code: OK! */
19926 case OUTSIDE_PRED_INSN
:
19927 if (inst
.cond
> COND_ALWAYS
)
19929 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19931 inst
.error
= BAD_SYNTAX
;
19934 /* Case 18: Outside a pred block, with no code: OK! */
19937 case INSIDE_VPT_INSN
:
19938 /* Case 8: Outside a pred block, with a VPT code: error: should be in
19940 inst
.error
= BAD_OUT_VPT
;
19943 case INSIDE_IT_INSN
:
19944 case INSIDE_IT_LAST_INSN
:
19945 if (inst
.cond
< COND_ALWAYS
)
19947 /* Case 16: Outside a pred block, with an IT code: error: should
19948 be in an IT block. */
19949 if (thumb_mode
== 0)
19952 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
19953 as_tsktsk (_("Warning: conditional outside an IT block"\
19958 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
19959 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
19961 /* Automatically generate the IT instruction. */
19962 new_automatic_it_block (inst
.cond
);
19963 if (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
)
19964 close_automatic_it_block ();
19968 inst
.error
= BAD_OUT_IT
;
19974 else if (inst
.cond
> COND_ALWAYS
)
19976 /* Case 17: Outside a pred block, with a VPT code: syntax error.
19978 inst
.error
= BAD_SYNTAX
;
19983 case IF_INSIDE_IT_LAST_INSN
:
19984 case NEUTRAL_IT_INSN
:
19988 if (inst
.cond
!= COND_ALWAYS
)
19989 first_error (BAD_SYNTAX
);
19990 now_pred
.state
= MANUAL_PRED_BLOCK
;
19991 now_pred
.block_length
= 0;
19992 now_pred
.type
= VECTOR_PRED
;
19996 now_pred
.state
= MANUAL_PRED_BLOCK
;
19997 now_pred
.block_length
= 0;
19998 now_pred
.type
= SCALAR_PRED
;
20003 case AUTOMATIC_PRED_BLOCK
:
20004 /* Three things may happen now:
20005 a) We should increment current it block size;
20006 b) We should close current it block (closing insn or 4 insns);
20007 c) We should close current it block and start a new one (due
20008 to incompatible conditions or
20009 4 insns-length block reached). */
20011 switch (inst
.pred_insn_type
)
20013 case INSIDE_VPT_INSN
:
20015 case MVE_UNPREDICABLE_INSN
:
20016 case MVE_OUTSIDE_PRED_INSN
:
20018 case OUTSIDE_PRED_INSN
:
20019 /* The closure of the block shall happen immediately,
20020 so any in_pred_block () call reports the block as closed. */
20021 force_automatic_it_block_close ();
20024 case INSIDE_IT_INSN
:
20025 case INSIDE_IT_LAST_INSN
:
20026 case IF_INSIDE_IT_LAST_INSN
:
20027 now_pred
.block_length
++;
20029 if (now_pred
.block_length
> 4
20030 || !now_pred_compatible (inst
.cond
))
20032 force_automatic_it_block_close ();
20033 if (inst
.pred_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
20034 new_automatic_it_block (inst
.cond
);
20038 now_pred
.insn_cond
= TRUE
;
20039 now_pred_add_mask (inst
.cond
);
20042 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
20043 && (inst
.pred_insn_type
== INSIDE_IT_LAST_INSN
20044 || inst
.pred_insn_type
== IF_INSIDE_IT_LAST_INSN
))
20045 close_automatic_it_block ();
20048 case NEUTRAL_IT_INSN
:
20049 now_pred
.block_length
++;
20050 now_pred
.insn_cond
= TRUE
;
20052 if (now_pred
.block_length
> 4)
20053 force_automatic_it_block_close ();
20055 now_pred_add_mask (now_pred
.cc
& 1);
20059 close_automatic_it_block ();
20060 now_pred
.state
= MANUAL_PRED_BLOCK
;
20065 case MANUAL_PRED_BLOCK
:
20068 if (now_pred
.type
== SCALAR_PRED
)
20070 /* Check conditional suffixes. */
20071 cond
= now_pred
.cc
^ ((now_pred
.mask
>> 4) & 1) ^ 1;
20072 now_pred
.mask
<<= 1;
20073 now_pred
.mask
&= 0x1f;
20074 is_last
= (now_pred
.mask
== 0x10);
20078 now_pred
.cc
^= (now_pred
.mask
>> 4);
20079 cond
= now_pred
.cc
+ 0xf;
20080 now_pred
.mask
<<= 1;
20081 now_pred
.mask
&= 0x1f;
20082 is_last
= now_pred
.mask
== 0x10;
20084 now_pred
.insn_cond
= TRUE
;
20086 switch (inst
.pred_insn_type
)
20088 case OUTSIDE_PRED_INSN
:
20089 if (now_pred
.type
== SCALAR_PRED
)
20091 if (inst
.cond
== COND_ALWAYS
)
20093 /* Case 12: In an IT block, with no code: error: missing
20095 inst
.error
= BAD_NOT_IT
;
20098 else if (inst
.cond
> COND_ALWAYS
)
20100 /* Case 11: In an IT block, with a VPT code: syntax error.
20102 inst
.error
= BAD_SYNTAX
;
20105 else if (thumb_mode
)
20107 /* This is for some special cases where a non-MVE
20108 instruction is not allowed in an IT block, such as cbz,
20109 but are put into one with a condition code.
20110 You could argue this should be a syntax error, but we
20111 gave the 'not allowed in IT block' diagnostic in the
20112 past so we will keep doing so. */
20113 inst
.error
= BAD_NOT_IT
;
20120 /* Case 15: In a VPT block, with no code: UNPREDICTABLE. */
20121 as_tsktsk (MVE_NOT_VPT
);
20124 case MVE_OUTSIDE_PRED_INSN
:
20125 if (now_pred
.type
== SCALAR_PRED
)
20127 if (inst
.cond
== COND_ALWAYS
)
20129 /* Case 3: In an IT block, with no code: warning:
20131 as_tsktsk (MVE_NOT_IT
);
20134 else if (inst
.cond
< COND_ALWAYS
)
20136 /* Case 1: In an IT block, with an IT code: syntax error.
20138 inst
.error
= BAD_SYNTAX
;
20146 if (inst
.cond
< COND_ALWAYS
)
20148 /* Case 4: In a VPT block, with an IT code: syntax error.
20150 inst
.error
= BAD_SYNTAX
;
20153 else if (inst
.cond
== COND_ALWAYS
)
20155 /* Case 6: In a VPT block, with no code: error: missing
20157 inst
.error
= BAD_NOT_VPT
;
20165 case MVE_UNPREDICABLE_INSN
:
20166 as_tsktsk (now_pred
.type
== SCALAR_PRED
? MVE_NOT_IT
: MVE_NOT_VPT
);
20168 case INSIDE_IT_INSN
:
20169 if (inst
.cond
> COND_ALWAYS
)
20171 /* Case 11: In an IT block, with a VPT code: syntax error. */
20172 /* Case 14: In a VPT block, with a VPT code: syntax error. */
20173 inst
.error
= BAD_SYNTAX
;
20176 else if (now_pred
.type
== SCALAR_PRED
)
20178 /* Case 10: In an IT block, with an IT code: OK! */
20179 if (cond
!= inst
.cond
)
20181 inst
.error
= now_pred
.type
== SCALAR_PRED
? BAD_IT_COND
:
20188 /* Case 13: In a VPT block, with an IT code: error: should be
20190 inst
.error
= BAD_OUT_IT
;
20195 case INSIDE_VPT_INSN
:
20196 if (now_pred
.type
== SCALAR_PRED
)
20198 /* Case 2: In an IT block, with a VPT code: error: must be in a
20200 inst
.error
= BAD_OUT_VPT
;
20203 /* Case 5: In a VPT block, with a VPT code: OK! */
20204 else if (cond
!= inst
.cond
)
20206 inst
.error
= BAD_VPT_COND
;
20210 case INSIDE_IT_LAST_INSN
:
20211 case IF_INSIDE_IT_LAST_INSN
:
20212 if (now_pred
.type
== VECTOR_PRED
|| inst
.cond
> COND_ALWAYS
)
20214 /* Case 4: In a VPT block, with an IT code: syntax error. */
20215 /* Case 11: In an IT block, with a VPT code: syntax error. */
20216 inst
.error
= BAD_SYNTAX
;
20219 else if (cond
!= inst
.cond
)
20221 inst
.error
= BAD_IT_COND
;
20226 inst
.error
= BAD_BRANCH
;
20231 case NEUTRAL_IT_INSN
:
20232 /* The BKPT instruction is unconditional even in a IT or VPT
20237 if (now_pred
.type
== SCALAR_PRED
)
20239 inst
.error
= BAD_IT_IT
;
20242 /* fall through. */
20244 if (inst
.cond
== COND_ALWAYS
)
20246 /* Executing a VPT/VPST instruction inside an IT block or a
20247 VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
20249 if (now_pred
.type
== SCALAR_PRED
)
20250 as_tsktsk (MVE_NOT_IT
);
20252 as_tsktsk (MVE_NOT_VPT
);
20257 /* VPT/VPST do not accept condition codes. */
20258 inst
.error
= BAD_SYNTAX
;
20269 struct depr_insn_mask
20271 unsigned long pattern
;
20272 unsigned long mask
;
20273 const char* description
;
20276 /* List of 16-bit instruction patterns deprecated in an IT block in
20278 static const struct depr_insn_mask depr_it_insns
[] = {
20279 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
20280 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
20281 { 0xa000, 0xb800, N_("ADR") },
20282 { 0x4800, 0xf800, N_("Literal loads") },
20283 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
20284 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
20285 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
20286 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
20287 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
20292 it_fsm_post_encode (void)
20296 if (!now_pred
.state_handled
)
20297 handle_pred_state ();
20299 if (now_pred
.insn_cond
20300 && !now_pred
.warn_deprecated
20301 && warn_on_deprecated
20302 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
20303 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
20305 if (inst
.instruction
>= 0x10000)
20307 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
20308 "performance deprecated in ARMv8-A and ARMv8-R"));
20309 now_pred
.warn_deprecated
= TRUE
;
20313 const struct depr_insn_mask
*p
= depr_it_insns
;
20315 while (p
->mask
!= 0)
20317 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
20319 as_tsktsk (_("IT blocks containing 16-bit Thumb "
20320 "instructions of the following class are "
20321 "performance deprecated in ARMv8-A and "
20322 "ARMv8-R: %s"), p
->description
);
20323 now_pred
.warn_deprecated
= TRUE
;
20331 if (now_pred
.block_length
> 1)
20333 as_tsktsk (_("IT blocks containing more than one conditional "
20334 "instruction are performance deprecated in ARMv8-A and "
20336 now_pred
.warn_deprecated
= TRUE
;
20340 is_last
= (now_pred
.mask
== 0x10);
20343 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
20349 force_automatic_it_block_close (void)
20351 if (now_pred
.state
== AUTOMATIC_PRED_BLOCK
)
20353 close_automatic_it_block ();
20354 now_pred
.state
= OUTSIDE_PRED_BLOCK
;
20360 in_pred_block (void)
20362 if (!now_pred
.state_handled
)
20363 handle_pred_state ();
20365 return now_pred
.state
!= OUTSIDE_PRED_BLOCK
;
20368 /* Whether OPCODE only has T32 encoding. Since this function is only used by
20369 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
20370 here, hence the "known" in the function name. */
20373 known_t32_only_insn (const struct asm_opcode
*opcode
)
20375 /* Original Thumb-1 wide instruction. */
20376 if (opcode
->tencode
== do_t_blx
20377 || opcode
->tencode
== do_t_branch23
20378 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
20379 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
20382 /* Wide-only instruction added to ARMv8-M Baseline. */
20383 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
20384 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
20385 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
20386 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
20392 /* Whether wide instruction variant can be used if available for a valid OPCODE
20396 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
20398 if (known_t32_only_insn (opcode
))
20401 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
20402 of variant T3 of B.W is checked in do_t_branch. */
20403 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
20404 && opcode
->tencode
== do_t_branch
)
20407 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
20408 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
20409 && opcode
->tencode
== do_t_mov_cmp
20410 /* Make sure CMP instruction is not affected. */
20411 && opcode
->aencode
== do_mov
)
20414 /* Wide instruction variants of all instructions with narrow *and* wide
20415 variants become available with ARMv6t2. Other opcodes are either
20416 narrow-only or wide-only and are thus available if OPCODE is valid. */
20417 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
20420 /* OPCODE with narrow only instruction variant or wide variant not
20426 md_assemble (char *str
)
20429 const struct asm_opcode
* opcode
;
20431 /* Align the previous label if needed. */
20432 if (last_label_seen
!= NULL
)
20434 symbol_set_frag (last_label_seen
, frag_now
);
20435 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
20436 S_SET_SEGMENT (last_label_seen
, now_seg
);
20439 memset (&inst
, '\0', sizeof (inst
));
20441 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
20442 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
20444 opcode
= opcode_lookup (&p
);
20447 /* It wasn't an instruction, but it might be a register alias of
20448 the form alias .req reg, or a Neon .dn/.qn directive. */
20449 if (! create_register_alias (str
, p
)
20450 && ! create_neon_reg_alias (str
, p
))
20451 as_bad (_("bad instruction `%s'"), str
);
20456 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
20457 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
20459 /* The value which unconditional instructions should have in place of the
20460 condition field. */
20461 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
20465 arm_feature_set variant
;
20467 variant
= cpu_variant
;
20468 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
20469 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
20470 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
20471 /* Check that this instruction is supported for this CPU. */
20472 if (!opcode
->tvariant
20473 || (thumb_mode
== 1
20474 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
20476 if (opcode
->tencode
== do_t_swi
)
20477 as_bad (_("SVC is not permitted on this architecture"));
20479 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
20482 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
20483 && opcode
->tencode
!= do_t_branch
)
20485 as_bad (_("Thumb does not support conditional execution"));
20489 /* Two things are addressed here:
20490 1) Implicit require narrow instructions on Thumb-1.
20491 This avoids relaxation accidentally introducing Thumb-2
20493 2) Reject wide instructions in non Thumb-2 cores.
20495 Only instructions with narrow and wide variants need to be handled
20496 but selecting all non wide-only instructions is easier. */
20497 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
20498 && !t32_insn_ok (variant
, opcode
))
20500 if (inst
.size_req
== 0)
20502 else if (inst
.size_req
== 4)
20504 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
20505 as_bad (_("selected processor does not support 32bit wide "
20506 "variant of instruction `%s'"), str
);
20508 as_bad (_("selected processor does not support `%s' in "
20509 "Thumb-2 mode"), str
);
20514 inst
.instruction
= opcode
->tvalue
;
20516 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
20518 /* Prepare the pred_insn_type for those encodings that don't set
20520 it_fsm_pre_encode ();
20522 opcode
->tencode ();
20524 it_fsm_post_encode ();
20527 if (!(inst
.error
|| inst
.relax
))
20529 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
20530 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
20531 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
20533 as_bad (_("cannot honor width suffix -- `%s'"), str
);
20538 /* Something has gone badly wrong if we try to relax a fixed size
20540 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
20542 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
20543 *opcode
->tvariant
);
20544 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
20545 set those bits when Thumb-2 32-bit instructions are seen. The impact
20546 of relaxable instructions will be considered later after we finish all
20548 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
20549 variant
= arm_arch_none
;
20551 variant
= cpu_variant
;
20552 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
20553 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
20556 check_neon_suffixes
;
20560 mapping_state (MAP_THUMB
);
20563 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
20567 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
20568 is_bx
= (opcode
->aencode
== do_bx
);
20570 /* Check that this instruction is supported for this CPU. */
20571 if (!(is_bx
&& fix_v4bx
)
20572 && !(opcode
->avariant
&&
20573 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
20575 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
20580 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
20584 inst
.instruction
= opcode
->avalue
;
20585 if (opcode
->tag
== OT_unconditionalF
)
20586 inst
.instruction
|= 0xFU
<< 28;
20588 inst
.instruction
|= inst
.cond
<< 28;
20589 inst
.size
= INSN_SIZE
;
20590 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
20592 it_fsm_pre_encode ();
20593 opcode
->aencode ();
20594 it_fsm_post_encode ();
20596 /* Arm mode bx is marked as both v4T and v5 because it's still required
20597 on a hypothetical non-thumb v5 core. */
20599 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
20601 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
20602 *opcode
->avariant
);
20604 check_neon_suffixes
;
20608 mapping_state (MAP_ARM
);
20613 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
20621 check_pred_blocks_finished (void)
20626 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
20627 if (seg_info (sect
)->tc_segment_info_data
.current_pred
.state
20628 == MANUAL_PRED_BLOCK
)
20630 if (now_pred
.type
== SCALAR_PRED
)
20631 as_warn (_("section '%s' finished with an open IT block."),
20634 as_warn (_("section '%s' finished with an open VPT/VPST block."),
20638 if (now_pred
.state
== MANUAL_PRED_BLOCK
)
20640 if (now_pred
.type
== SCALAR_PRED
)
20641 as_warn (_("file finished with an open IT block."));
20643 as_warn (_("file finished with an open VPT/VPST block."));
20648 /* Various frobbings of labels and their addresses. */
20651 arm_start_line_hook (void)
20653 last_label_seen
= NULL
;
20657 arm_frob_label (symbolS
* sym
)
20659 last_label_seen
= sym
;
20661 ARM_SET_THUMB (sym
, thumb_mode
);
20663 #if defined OBJ_COFF || defined OBJ_ELF
20664 ARM_SET_INTERWORK (sym
, support_interwork
);
20667 force_automatic_it_block_close ();
20669 /* Note - do not allow local symbols (.Lxxx) to be labelled
20670 as Thumb functions. This is because these labels, whilst
20671 they exist inside Thumb code, are not the entry points for
20672 possible ARM->Thumb calls. Also, these labels can be used
20673 as part of a computed goto or switch statement. eg gcc
20674 can generate code that looks like this:
20676 ldr r2, [pc, .Laaa]
20686 The first instruction loads the address of the jump table.
20687 The second instruction converts a table index into a byte offset.
20688 The third instruction gets the jump address out of the table.
20689 The fourth instruction performs the jump.
20691 If the address stored at .Laaa is that of a symbol which has the
20692 Thumb_Func bit set, then the linker will arrange for this address
20693 to have the bottom bit set, which in turn would mean that the
20694 address computation performed by the third instruction would end
20695 up with the bottom bit set. Since the ARM is capable of unaligned
20696 word loads, the instruction would then load the incorrect address
20697 out of the jump table, and chaos would ensue. */
20698 if (label_is_thumb_function_name
20699 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
20700 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
20702 /* When the address of a Thumb function is taken the bottom
20703 bit of that address should be set. This will allow
20704 interworking between Arm and Thumb functions to work
20707 THUMB_SET_FUNC (sym
, 1);
20709 label_is_thumb_function_name
= FALSE
;
20712 dwarf2_emit_label (sym
);
20716 arm_data_in_code (void)
20718 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
20720 *input_line_pointer
= '/';
20721 input_line_pointer
+= 5;
20722 *input_line_pointer
= 0;
20730 arm_canonicalize_symbol_name (char * name
)
20734 if (thumb_mode
&& (len
= strlen (name
)) > 5
20735 && streq (name
+ len
- 5, "/data"))
20736 *(name
+ len
- 5) = 0;
20741 /* Table of all register names defined by default. The user can
20742 define additional names with .req. Note that all register names
20743 should appear in both upper and lowercase variants. Some registers
20744 also have mixed-case names. */
20746 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
20747 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
20748 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
20749 #define REGSET(p,t) \
20750 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
20751 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
20752 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
20753 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
20754 #define REGSETH(p,t) \
20755 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
20756 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
20757 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
20758 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
20759 #define REGSET2(p,t) \
20760 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
20761 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
20762 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
20763 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
20764 #define SPLRBANK(base,bank,t) \
20765 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
20766 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
20767 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
20768 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
20769 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
20770 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
20772 static const struct reg_entry reg_names
[] =
20774 /* ARM integer registers. */
20775 REGSET(r
, RN
), REGSET(R
, RN
),
20777 /* ATPCS synonyms. */
20778 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
20779 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
20780 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
20782 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
20783 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
20784 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
20786 /* Well-known aliases. */
20787 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
20788 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
20790 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
20791 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
20793 /* Coprocessor numbers. */
20794 REGSET(p
, CP
), REGSET(P
, CP
),
20796 /* Coprocessor register numbers. The "cr" variants are for backward
20798 REGSET(c
, CN
), REGSET(C
, CN
),
20799 REGSET(cr
, CN
), REGSET(CR
, CN
),
20801 /* ARM banked registers. */
20802 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
20803 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
20804 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
20805 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
20806 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
20807 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
20808 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
20810 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
20811 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
20812 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
20813 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
20814 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
20815 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
20816 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
20817 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
20819 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
20820 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
20821 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
20822 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
20823 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
20824 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
20825 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
20826 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20827 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
20829 /* FPA registers. */
20830 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
20831 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
20833 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
20834 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
20836 /* VFP SP registers. */
20837 REGSET(s
,VFS
), REGSET(S
,VFS
),
20838 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
20840 /* VFP DP Registers. */
20841 REGSET(d
,VFD
), REGSET(D
,VFD
),
20842 /* Extra Neon DP registers. */
20843 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
20845 /* Neon QP registers. */
20846 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
20848 /* VFP control registers. */
20849 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
20850 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
20851 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
20852 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
20853 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
20854 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
20855 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
20857 /* Maverick DSP coprocessor registers. */
20858 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
20859 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
20861 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
20862 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
20863 REGDEF(dspsc
,0,DSPSC
),
20865 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
20866 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
20867 REGDEF(DSPSC
,0,DSPSC
),
20869 /* iWMMXt data registers - p0, c0-15. */
20870 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
20872 /* iWMMXt control registers - p1, c0-3. */
20873 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
20874 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
20875 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
20876 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
20878 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
20879 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
20880 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
20881 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
20882 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
20884 /* XScale accumulator registers. */
20885 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
20891 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
20892 within psr_required_here. */
20893 static const struct asm_psr psrs
[] =
20895 /* Backward compatibility notation. Note that "all" is no longer
20896 truly all possible PSR bits. */
20897 {"all", PSR_c
| PSR_f
},
20901 /* Individual flags. */
20907 /* Combinations of flags. */
20908 {"fs", PSR_f
| PSR_s
},
20909 {"fx", PSR_f
| PSR_x
},
20910 {"fc", PSR_f
| PSR_c
},
20911 {"sf", PSR_s
| PSR_f
},
20912 {"sx", PSR_s
| PSR_x
},
20913 {"sc", PSR_s
| PSR_c
},
20914 {"xf", PSR_x
| PSR_f
},
20915 {"xs", PSR_x
| PSR_s
},
20916 {"xc", PSR_x
| PSR_c
},
20917 {"cf", PSR_c
| PSR_f
},
20918 {"cs", PSR_c
| PSR_s
},
20919 {"cx", PSR_c
| PSR_x
},
20920 {"fsx", PSR_f
| PSR_s
| PSR_x
},
20921 {"fsc", PSR_f
| PSR_s
| PSR_c
},
20922 {"fxs", PSR_f
| PSR_x
| PSR_s
},
20923 {"fxc", PSR_f
| PSR_x
| PSR_c
},
20924 {"fcs", PSR_f
| PSR_c
| PSR_s
},
20925 {"fcx", PSR_f
| PSR_c
| PSR_x
},
20926 {"sfx", PSR_s
| PSR_f
| PSR_x
},
20927 {"sfc", PSR_s
| PSR_f
| PSR_c
},
20928 {"sxf", PSR_s
| PSR_x
| PSR_f
},
20929 {"sxc", PSR_s
| PSR_x
| PSR_c
},
20930 {"scf", PSR_s
| PSR_c
| PSR_f
},
20931 {"scx", PSR_s
| PSR_c
| PSR_x
},
20932 {"xfs", PSR_x
| PSR_f
| PSR_s
},
20933 {"xfc", PSR_x
| PSR_f
| PSR_c
},
20934 {"xsf", PSR_x
| PSR_s
| PSR_f
},
20935 {"xsc", PSR_x
| PSR_s
| PSR_c
},
20936 {"xcf", PSR_x
| PSR_c
| PSR_f
},
20937 {"xcs", PSR_x
| PSR_c
| PSR_s
},
20938 {"cfs", PSR_c
| PSR_f
| PSR_s
},
20939 {"cfx", PSR_c
| PSR_f
| PSR_x
},
20940 {"csf", PSR_c
| PSR_s
| PSR_f
},
20941 {"csx", PSR_c
| PSR_s
| PSR_x
},
20942 {"cxf", PSR_c
| PSR_x
| PSR_f
},
20943 {"cxs", PSR_c
| PSR_x
| PSR_s
},
20944 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
20945 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
20946 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
20947 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
20948 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
20949 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
20950 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
20951 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
20952 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
20953 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
20954 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
20955 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
20956 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
20957 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
20958 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
20959 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
20960 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
20961 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
20962 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
20963 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
20964 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
20965 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
20966 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
20967 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
20970 /* Table of V7M psr names. */
20971 static const struct asm_psr v7m_psrs
[] =
20973 {"apsr", 0x0 }, {"APSR", 0x0 },
20974 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
20975 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
20976 {"psr", 0x3 }, {"PSR", 0x3 },
20977 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
20978 {"ipsr", 0x5 }, {"IPSR", 0x5 },
20979 {"epsr", 0x6 }, {"EPSR", 0x6 },
20980 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
20981 {"msp", 0x8 }, {"MSP", 0x8 },
20982 {"psp", 0x9 }, {"PSP", 0x9 },
20983 {"msplim", 0xa }, {"MSPLIM", 0xa },
20984 {"psplim", 0xb }, {"PSPLIM", 0xb },
20985 {"primask", 0x10}, {"PRIMASK", 0x10},
20986 {"basepri", 0x11}, {"BASEPRI", 0x11},
20987 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
20988 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
20989 {"control", 0x14}, {"CONTROL", 0x14},
20990 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
20991 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
20992 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
20993 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
20994 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
20995 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
20996 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
20997 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
20998 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
21001 /* Table of all shift-in-operand names. */
21002 static const struct asm_shift_name shift_names
[] =
21004 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
21005 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
21006 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
21007 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
21008 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
21009 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
},
21010 { "uxtw", SHIFT_UXTW
}, { "UXTW", SHIFT_UXTW
}
21013 /* Table of all explicit relocation names. */
21015 static struct reloc_entry reloc_names
[] =
21017 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
21018 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
21019 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
21020 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
21021 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
21022 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
21023 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
21024 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
21025 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
21026 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
21027 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
21028 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
21029 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
21030 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
21031 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
21032 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
21033 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
21034 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
21035 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
21036 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
21037 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
21038 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
21039 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
21040 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
21041 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
21042 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
21043 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
21047 /* Table of all conditional affixes. */
21048 static const struct asm_cond conds
[] =
21052 {"cs", 0x2}, {"hs", 0x2},
21053 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
21066 static const struct asm_cond vconds
[] =
21072 #define UL_BARRIER(L,U,CODE,FEAT) \
21073 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
21074 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
21076 static struct asm_barrier_opt barrier_opt_names
[] =
21078 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
21079 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
21080 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
21081 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
21082 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
21083 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
21084 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
21085 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
21086 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
21087 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
21088 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
21089 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
21090 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
21091 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
21092 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
21093 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
21098 /* Table of ARM-format instructions. */
21100 /* Macros for gluing together operand strings. N.B. In all cases
21101 other than OPS0, the trailing OP_stop comes from default
21102 zero-initialization of the unspecified elements of the array. */
21103 #define OPS0() { OP_stop, }
21104 #define OPS1(a) { OP_##a, }
21105 #define OPS2(a,b) { OP_##a,OP_##b, }
21106 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
21107 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
21108 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
21109 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
21111 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
21112 This is useful when mixing operands for ARM and THUMB, i.e. using the
21113 MIX_ARM_THUMB_OPERANDS macro.
21114 In order to use these macros, prefix the number of operands with _
21116 #define OPS_1(a) { a, }
21117 #define OPS_2(a,b) { a,b, }
21118 #define OPS_3(a,b,c) { a,b,c, }
21119 #define OPS_4(a,b,c,d) { a,b,c,d, }
21120 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
21121 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
21123 /* These macros abstract out the exact format of the mnemonic table and
21124 save some repeated characters. */
21126 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
21127 #define TxCE(mnem, op, top, nops, ops, ae, te) \
21128 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
21129 THUMB_VARIANT, do_##ae, do_##te, 0 }
21131 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
21132 a T_MNEM_xyz enumerator. */
21133 #define TCE(mnem, aop, top, nops, ops, ae, te) \
21134 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
21135 #define tCE(mnem, aop, top, nops, ops, ae, te) \
21136 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21138 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
21139 infix after the third character. */
21140 #define TxC3(mnem, op, top, nops, ops, ae, te) \
21141 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
21142 THUMB_VARIANT, do_##ae, do_##te, 0 }
21143 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
21144 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
21145 THUMB_VARIANT, do_##ae, do_##te, 0 }
21146 #define TC3(mnem, aop, top, nops, ops, ae, te) \
21147 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
21148 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
21149 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
21150 #define tC3(mnem, aop, top, nops, ops, ae, te) \
21151 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21152 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
21153 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
21155 /* Mnemonic that cannot be conditionalized. The ARM condition-code
21156 field is still 0xE. Many of the Thumb variants can be executed
21157 conditionally, so this is checked separately. */
21158 #define TUE(mnem, op, top, nops, ops, ae, te) \
21159 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
21160 THUMB_VARIANT, do_##ae, do_##te, 0 }
21162 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
21163 Used by mnemonics that have very minimal differences in the encoding for
21164 ARM and Thumb variants and can be handled in a common function. */
21165 #define TUEc(mnem, op, top, nops, ops, en) \
21166 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
21167 THUMB_VARIANT, do_##en, do_##en, 0 }
21169 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
21170 condition code field. */
21171 #define TUF(mnem, op, top, nops, ops, ae, te) \
21172 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
21173 THUMB_VARIANT, do_##ae, do_##te, 0 }
21175 /* ARM-only variants of all the above. */
21176 #define CE(mnem, op, nops, ops, ae) \
21177 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21179 #define C3(mnem, op, nops, ops, ae) \
21180 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21182 /* Thumb-only variants of TCE and TUE. */
21183 #define ToC(mnem, top, nops, ops, te) \
21184 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
21187 #define ToU(mnem, top, nops, ops, te) \
21188 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
21191 /* T_MNEM_xyz enumerator variants of ToC. */
21192 #define toC(mnem, top, nops, ops, te) \
21193 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
21196 /* T_MNEM_xyz enumerator variants of ToU. */
21197 #define toU(mnem, top, nops, ops, te) \
21198 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
21201 /* Legacy mnemonics that always have conditional infix after the third
21203 #define CL(mnem, op, nops, ops, ae) \
21204 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
21205 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21207 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
21208 #define cCE(mnem, op, nops, ops, ae) \
21209 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21211 /* Legacy coprocessor instructions where conditional infix and conditional
21212 suffix are ambiguous. For consistency this includes all FPA instructions,
21213 not just the potentially ambiguous ones. */
21214 #define cCL(mnem, op, nops, ops, ae) \
21215 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
21216 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21218 /* Coprocessor, takes either a suffix or a position-3 infix
21219 (for an FPA corner case). */
21220 #define C3E(mnem, op, nops, ops, ae) \
21221 { mnem, OPS##nops ops, OT_csuf_or_in3, \
21222 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
21224 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
21225 { m1 #m2 m3, OPS##nops ops, \
21226 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
21227 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21229 #define CM(m1, m2, op, nops, ops, ae) \
21230 xCM_ (m1, , m2, op, nops, ops, ae), \
21231 xCM_ (m1, eq, m2, op, nops, ops, ae), \
21232 xCM_ (m1, ne, m2, op, nops, ops, ae), \
21233 xCM_ (m1, cs, m2, op, nops, ops, ae), \
21234 xCM_ (m1, hs, m2, op, nops, ops, ae), \
21235 xCM_ (m1, cc, m2, op, nops, ops, ae), \
21236 xCM_ (m1, ul, m2, op, nops, ops, ae), \
21237 xCM_ (m1, lo, m2, op, nops, ops, ae), \
21238 xCM_ (m1, mi, m2, op, nops, ops, ae), \
21239 xCM_ (m1, pl, m2, op, nops, ops, ae), \
21240 xCM_ (m1, vs, m2, op, nops, ops, ae), \
21241 xCM_ (m1, vc, m2, op, nops, ops, ae), \
21242 xCM_ (m1, hi, m2, op, nops, ops, ae), \
21243 xCM_ (m1, ls, m2, op, nops, ops, ae), \
21244 xCM_ (m1, ge, m2, op, nops, ops, ae), \
21245 xCM_ (m1, lt, m2, op, nops, ops, ae), \
21246 xCM_ (m1, gt, m2, op, nops, ops, ae), \
21247 xCM_ (m1, le, m2, op, nops, ops, ae), \
21248 xCM_ (m1, al, m2, op, nops, ops, ae)
21250 #define UE(mnem, op, nops, ops, ae) \
21251 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21253 #define UF(mnem, op, nops, ops, ae) \
21254 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
21256 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
21257 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
21258 use the same encoding function for each. */
21259 #define NUF(mnem, op, nops, ops, enc) \
21260 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
21261 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
21263 /* Neon data processing, version which indirects through neon_enc_tab for
21264 the various overloaded versions of opcodes. */
21265 #define nUF(mnem, op, nops, ops, enc) \
21266 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
21267 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
21269 /* Neon insn with conditional suffix for the ARM version, non-overloaded
21271 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
21272 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
21273 THUMB_VARIANT, do_##enc, do_##enc, mve_p }
21275 #define NCE(mnem, op, nops, ops, enc) \
21276 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
21278 #define NCEF(mnem, op, nops, ops, enc) \
21279 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
21281 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
21282 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p) \
21283 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
21284 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
21286 #define nCE(mnem, op, nops, ops, enc) \
21287 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
21289 #define nCEF(mnem, op, nops, ops, enc) \
21290 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
21293 #define mCEF(mnem, op, nops, ops, enc) \
21294 { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op, \
21295 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21298 /* nCEF but for MVE predicated instructions. */
21299 #define mnCEF(mnem, op, nops, ops, enc) \
21300 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
21302 /* nCE but for MVE predicated instructions. */
21303 #define mnCE(mnem, op, nops, ops, enc) \
21304 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
21306 /* NUF but for potentially MVE predicated instructions. */
21307 #define MNUF(mnem, op, nops, ops, enc) \
21308 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
21309 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21311 /* nUF but for potentially MVE predicated instructions. */
21312 #define mnUF(mnem, op, nops, ops, enc) \
21313 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
21314 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
21316 /* ToC but for potentially MVE predicated instructions. */
21317 #define mToC(mnem, top, nops, ops, te) \
21318 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
21321 /* NCE but for MVE predicated instructions. */
21322 #define MNCE(mnem, op, nops, ops, enc) \
21323 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
21325 /* NCEF but for MVE predicated instructions. */
21326 #define MNCEF(mnem, op, nops, ops, enc) \
21327 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
21330 static const struct asm_opcode insns
[] =
21332 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
21333 #define THUMB_VARIANT & arm_ext_v4t
21334 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21335 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21336 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21337 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21338 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
21339 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
21340 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
21341 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
21342 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21343 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21344 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
21345 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
21346 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21347 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
21348 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
21349 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
21351 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
21352 for setting PSR flag bits. They are obsolete in V6 and do not
21353 have Thumb equivalents. */
21354 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21355 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21356 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
21357 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
21358 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
21359 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
21360 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21361 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21362 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
21364 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
21365 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
21366 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
21367 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
21369 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
21370 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
21371 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
21373 OP_ADDRGLDR
),ldst
, t_ldst
),
21374 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
21376 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21377 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21378 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21379 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21380 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21381 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21383 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
21384 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
21387 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
21388 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
21389 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
21390 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
21392 /* Thumb-compatibility pseudo ops. */
21393 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21394 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21395 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21396 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21397 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21398 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21399 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21400 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
21401 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
21402 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
21403 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
21404 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
21406 /* These may simplify to neg. */
21407 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
21408 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
21410 #undef THUMB_VARIANT
21411 #define THUMB_VARIANT & arm_ext_os
21413 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
21414 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
21416 #undef THUMB_VARIANT
21417 #define THUMB_VARIANT & arm_ext_v6
21419 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
21421 /* V1 instructions with no Thumb analogue prior to V6T2. */
21422 #undef THUMB_VARIANT
21423 #define THUMB_VARIANT & arm_ext_v6t2
21425 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21426 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
21427 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
21429 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21430 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21431 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
21432 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
21434 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21435 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21437 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21438 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
21440 /* V1 instructions with no Thumb analogue at all. */
21441 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
21442 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
21444 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
21445 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
21446 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
21447 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
21448 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
21449 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
21450 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
21451 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
21454 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
21455 #undef THUMB_VARIANT
21456 #define THUMB_VARIANT & arm_ext_v4t
21458 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
21459 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
21461 #undef THUMB_VARIANT
21462 #define THUMB_VARIANT & arm_ext_v6t2
21464 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
21465 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
21467 /* Generic coprocessor instructions. */
21468 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
21469 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21470 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21471 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21472 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21473 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21474 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21477 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
21479 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
21480 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
21483 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
21484 #undef THUMB_VARIANT
21485 #define THUMB_VARIANT & arm_ext_msr
21487 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
21488 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
21491 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
21492 #undef THUMB_VARIANT
21493 #define THUMB_VARIANT & arm_ext_v6t2
21495 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21496 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21497 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21498 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21499 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21500 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21501 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
21502 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
21505 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
21506 #undef THUMB_VARIANT
21507 #define THUMB_VARIANT & arm_ext_v4t
21509 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21510 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21511 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21512 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21513 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21514 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
21517 #define ARM_VARIANT & arm_ext_v4t_5
21519 /* ARM Architecture 4T. */
21520 /* Note: bx (and blx) are required on V5, even if the processor does
21521 not support Thumb. */
21522 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
21525 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
21526 #undef THUMB_VARIANT
21527 #define THUMB_VARIANT & arm_ext_v5t
21529 /* Note: blx has 2 variants; the .value coded here is for
21530 BLX(2). Only this variant has conditional execution. */
21531 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
21532 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
21534 #undef THUMB_VARIANT
21535 #define THUMB_VARIANT & arm_ext_v6t2
21537 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
21538 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21539 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21540 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21541 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
21542 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
21543 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21544 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
21547 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
21548 #undef THUMB_VARIANT
21549 #define THUMB_VARIANT & arm_ext_v5exp
21551 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21552 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21553 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21554 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21556 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21557 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
21559 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21560 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21561 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21562 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
21564 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21565 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21566 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21567 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21569 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21570 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21572 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21573 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21574 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21575 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
21578 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
21579 #undef THUMB_VARIANT
21580 #define THUMB_VARIANT & arm_ext_v6t2
21582 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
21583 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
21585 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
21586 ADDRGLDRS
), ldrd
, t_ldstd
),
21588 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21589 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21592 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
21594 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
21597 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
21598 #undef THUMB_VARIANT
21599 #define THUMB_VARIANT & arm_ext_v6
21601 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
21602 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
21603 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21604 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21605 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
21606 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21607 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21608 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21609 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21610 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
21612 #undef THUMB_VARIANT
21613 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21615 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
21616 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21618 #undef THUMB_VARIANT
21619 #define THUMB_VARIANT & arm_ext_v6t2
21621 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21622 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
21624 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
21625 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
21627 /* ARM V6 not included in V7M. */
21628 #undef THUMB_VARIANT
21629 #define THUMB_VARIANT & arm_ext_v6_notm
21630 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21631 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21632 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
21633 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
21634 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21635 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
21636 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
21637 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
21638 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
21639 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21640 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21641 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
21642 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21643 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
21644 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
21645 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
21646 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21647 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
21648 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
21650 /* ARM V6 not included in V7M (eg. integer SIMD). */
21651 #undef THUMB_VARIANT
21652 #define THUMB_VARIANT & arm_ext_v6_dsp
21653 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
21654 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
21655 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21656 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21657 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21658 /* Old name for QASX. */
21659 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21660 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21661 /* Old name for QSAX. */
21662 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21663 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21664 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21665 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21666 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21667 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21668 /* Old name for SASX. */
21669 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21670 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21671 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21672 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21673 /* Old name for SHASX. */
21674 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21675 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21676 /* Old name for SHSAX. */
21677 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21678 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21679 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21680 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21681 /* Old name for SSAX. */
21682 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21683 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21684 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21685 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21686 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21687 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21688 /* Old name for UASX. */
21689 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21690 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21691 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21692 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21693 /* Old name for UHASX. */
21694 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21695 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21696 /* Old name for UHSAX. */
21697 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21698 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21699 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21700 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21701 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21702 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21703 /* Old name for UQASX. */
21704 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21705 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21706 /* Old name for UQSAX. */
21707 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21708 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21709 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21710 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21711 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21712 /* Old name for USAX. */
21713 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21714 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21715 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21716 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21717 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21718 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21719 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21720 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21721 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
21722 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
21723 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
21724 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21725 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21726 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21727 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21728 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21729 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21730 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21731 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
21732 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21733 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21734 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21735 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21736 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21737 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21738 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21739 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21740 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21741 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21742 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
21743 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
21744 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
21745 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
21746 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
21749 #define ARM_VARIANT & arm_ext_v6k_v6t2
21750 #undef THUMB_VARIANT
21751 #define THUMB_VARIANT & arm_ext_v6k_v6t2
21753 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
21754 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
21755 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
21756 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
21758 #undef THUMB_VARIANT
21759 #define THUMB_VARIANT & arm_ext_v6_notm
21760 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
21762 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
21763 RRnpcb
), strexd
, t_strexd
),
21765 #undef THUMB_VARIANT
21766 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21767 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
21769 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
21771 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21773 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
21775 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
21778 #define ARM_VARIANT & arm_ext_sec
21779 #undef THUMB_VARIANT
21780 #define THUMB_VARIANT & arm_ext_sec
21782 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
21785 #define ARM_VARIANT & arm_ext_virt
21786 #undef THUMB_VARIANT
21787 #define THUMB_VARIANT & arm_ext_virt
21789 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
21790 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
21793 #define ARM_VARIANT & arm_ext_pan
21794 #undef THUMB_VARIANT
21795 #define THUMB_VARIANT & arm_ext_pan
21797 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
21800 #define ARM_VARIANT & arm_ext_v6t2
21801 #undef THUMB_VARIANT
21802 #define THUMB_VARIANT & arm_ext_v6t2
21804 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
21805 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
21806 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21807 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
21809 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
21810 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
21812 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21813 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21814 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21815 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
21818 #define ARM_VARIANT & arm_ext_v3
21819 #undef THUMB_VARIANT
21820 #define THUMB_VARIANT & arm_ext_v6t2
21822 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
21823 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
21824 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
21827 #define ARM_VARIANT & arm_ext_v6t2
21828 #undef THUMB_VARIANT
21829 #define THUMB_VARIANT & arm_ext_v6t2_v8m
21830 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21831 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
21833 /* Thumb-only instructions. */
21835 #define ARM_VARIANT NULL
21836 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
21837 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
21839 /* ARM does not really have an IT instruction, so always allow it.
21840 The opcode is copied from Thumb in order to allow warnings in
21841 -mimplicit-it=[never | arm] modes. */
21843 #define ARM_VARIANT & arm_ext_v1
21844 #undef THUMB_VARIANT
21845 #define THUMB_VARIANT & arm_ext_v6t2
21847 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
21848 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
21849 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
21850 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
21851 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
21852 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
21853 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
21854 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
21855 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
21856 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
21857 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
21858 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
21859 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
21860 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
21861 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
21862 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
21863 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21864 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
21866 /* Thumb2 only instructions. */
21868 #define ARM_VARIANT NULL
21870 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21871 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
21872 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21873 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
21874 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
21875 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
21877 /* Hardware division instructions. */
21879 #define ARM_VARIANT & arm_ext_adiv
21880 #undef THUMB_VARIANT
21881 #define THUMB_VARIANT & arm_ext_div
21883 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21884 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
21886 /* ARM V6M/V7 instructions. */
21888 #define ARM_VARIANT & arm_ext_barrier
21889 #undef THUMB_VARIANT
21890 #define THUMB_VARIANT & arm_ext_barrier
21892 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
21893 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
21894 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
21896 /* ARM V7 instructions. */
21898 #define ARM_VARIANT & arm_ext_v7
21899 #undef THUMB_VARIANT
21900 #define THUMB_VARIANT & arm_ext_v7
21902 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
21903 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
21906 #define ARM_VARIANT & arm_ext_mp
21907 #undef THUMB_VARIANT
21908 #define THUMB_VARIANT & arm_ext_mp
21910 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
21912 /* AArchv8 instructions. */
21914 #define ARM_VARIANT & arm_ext_v8
21916 /* Instructions shared between armv8-a and armv8-m. */
21917 #undef THUMB_VARIANT
21918 #define THUMB_VARIANT & arm_ext_atomics
21920 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21921 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21922 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21923 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21924 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21925 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
21926 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21927 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
21928 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
21929 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21931 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21933 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
21935 #undef THUMB_VARIANT
21936 #define THUMB_VARIANT & arm_ext_v8
21938 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
21939 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
21941 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
21944 /* Defined in V8 but is in undefined encoding space for earlier
21945 architectures. However earlier architectures are required to treat
21946 this instuction as a semihosting trap as well. Hence while not explicitly
21947 defined as such, it is in fact correct to define the instruction for all
21949 #undef THUMB_VARIANT
21950 #define THUMB_VARIANT & arm_ext_v1
21952 #define ARM_VARIANT & arm_ext_v1
21953 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
21955 /* ARMv8 T32 only. */
21957 #define ARM_VARIANT NULL
21958 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
21959 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
21960 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
21962 /* FP for ARMv8. */
21964 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
21965 #undef THUMB_VARIANT
21966 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
21968 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21969 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21970 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21971 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
21972 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21973 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
21974 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
21975 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
21976 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
21977 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
21978 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
21979 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
21980 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
21982 /* Crypto v1 extensions. */
21984 #define ARM_VARIANT & fpu_crypto_ext_armv8
21985 #undef THUMB_VARIANT
21986 #define THUMB_VARIANT & fpu_crypto_ext_armv8
21988 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
21989 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
21990 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
21991 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
21992 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
21993 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
21994 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
21995 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
21996 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
21997 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
21998 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
21999 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
22000 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
22001 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
22004 #define ARM_VARIANT & crc_ext_armv8
22005 #undef THUMB_VARIANT
22006 #define THUMB_VARIANT & crc_ext_armv8
22007 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
22008 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
22009 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
22010 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
22011 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
22012 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
22014 /* ARMv8.2 RAS extension. */
22016 #define ARM_VARIANT & arm_ext_ras
22017 #undef THUMB_VARIANT
22018 #define THUMB_VARIANT & arm_ext_ras
22019 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
22022 #define ARM_VARIANT & arm_ext_v8_3
22023 #undef THUMB_VARIANT
22024 #define THUMB_VARIANT & arm_ext_v8_3
22025 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
22026 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
22027 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
22030 #define ARM_VARIANT & fpu_neon_ext_dotprod
22031 #undef THUMB_VARIANT
22032 #define THUMB_VARIANT & fpu_neon_ext_dotprod
22033 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
22034 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
22037 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
22038 #undef THUMB_VARIANT
22039 #define THUMB_VARIANT NULL
22041 cCE("wfs", e200110
, 1, (RR
), rd
),
22042 cCE("rfs", e300110
, 1, (RR
), rd
),
22043 cCE("wfc", e400110
, 1, (RR
), rd
),
22044 cCE("rfc", e500110
, 1, (RR
), rd
),
22046 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
22047 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
22048 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
22049 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
22051 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
22052 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
22053 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
22054 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
22056 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
22057 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
22058 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
22059 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
22060 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
22061 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
22062 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
22063 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
22064 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
22065 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
22066 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
22067 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
22069 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
22070 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
22071 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
22072 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
22073 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
22074 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
22075 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
22076 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
22077 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
22078 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
22079 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
22080 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
22082 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
22083 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
22084 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
22085 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
22086 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
22087 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
22088 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
22089 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
22090 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
22091 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
22092 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
22093 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
22095 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
22096 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
22097 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
22098 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
22099 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
22100 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
22101 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
22102 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
22103 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
22104 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
22105 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
22106 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
22108 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
22109 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
22110 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
22111 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
22112 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
22113 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
22114 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
22115 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
22116 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
22117 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
22118 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
22119 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
22121 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
22122 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
22123 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
22124 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
22125 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
22126 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
22127 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
22128 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
22129 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
22130 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
22131 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
22132 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
22134 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
22135 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
22136 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
22137 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
22138 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
22139 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
22140 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
22141 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
22142 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
22143 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
22144 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
22145 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
22147 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
22148 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
22149 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
22150 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
22151 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
22152 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
22153 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
22154 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
22155 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
22156 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
22157 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
22158 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
22160 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
22161 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
22162 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
22163 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
22164 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
22165 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
22166 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
22167 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
22168 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
22169 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
22170 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
22171 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
22173 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
22174 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
22175 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
22176 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
22177 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
22178 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
22179 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
22180 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
22181 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
22182 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
22183 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
22184 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
22186 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
22187 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
22188 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
22189 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
22190 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
22191 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
22192 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
22193 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
22194 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
22195 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
22196 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
22197 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
22199 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
22200 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
22201 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
22202 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
22203 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
22204 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
22205 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
22206 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
22207 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
22208 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
22209 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
22210 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
22212 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
22213 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
22214 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
22215 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
22216 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
22217 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
22218 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
22219 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
22220 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
22221 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
22222 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
22223 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
22225 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
22226 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
22227 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
22228 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
22229 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
22230 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
22231 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
22232 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
22233 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
22234 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
22235 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
22236 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
22238 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
22239 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
22240 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
22241 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
22242 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
22243 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
22244 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
22245 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
22246 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
22247 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
22248 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
22249 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
22251 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
22252 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
22253 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
22254 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
22255 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
22256 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
22257 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
22258 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
22259 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
22260 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
22261 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
22262 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
22264 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22265 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22266 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22267 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22268 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22269 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22270 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22271 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22272 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22273 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22274 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22275 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22277 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22278 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22279 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22280 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22281 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22282 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22283 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22284 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22285 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22286 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22287 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22288 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22290 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22291 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22292 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22293 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22294 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22295 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22296 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22297 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22298 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22299 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22300 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22301 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22303 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22304 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22305 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22306 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22307 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22308 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22309 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22310 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22311 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22312 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22313 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22314 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22316 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22317 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22318 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22319 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22320 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22321 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22322 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22323 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22324 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22325 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22326 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22327 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22329 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22330 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22331 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22332 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22333 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22334 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22335 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22336 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22337 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22338 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22339 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22340 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22342 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22343 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22344 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22345 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22346 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22347 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22348 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22349 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22350 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22351 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22352 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22353 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22355 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22356 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22357 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22358 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22359 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22360 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22361 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22362 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22363 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22364 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22365 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22366 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22368 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22369 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22370 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22371 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22372 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22373 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22374 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22375 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22376 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22377 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22378 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22379 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22381 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22382 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22383 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22384 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22385 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22386 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22387 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22388 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22389 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22390 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22391 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22392 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22394 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22395 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22396 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22397 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22398 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22399 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22400 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22401 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22402 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22403 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22404 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22405 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22407 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22408 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22409 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22410 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22411 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22412 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22413 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22414 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22415 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22416 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22417 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22418 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22420 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22421 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22422 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22423 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22424 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22425 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22426 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22427 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22428 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22429 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22430 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22431 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
22433 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22434 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22435 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22436 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
22438 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
22439 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
22440 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
22441 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
22442 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
22443 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
22444 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
22445 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
22446 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
22447 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
22448 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
22449 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
22451 /* The implementation of the FIX instruction is broken on some
22452 assemblers, in that it accepts a precision specifier as well as a
22453 rounding specifier, despite the fact that this is meaningless.
22454 To be more compatible, we accept it as well, though of course it
22455 does not set any bits. */
22456 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
22457 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
22458 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
22459 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
22460 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
22461 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
22462 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
22463 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
22464 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
22465 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
22466 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
22467 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
22468 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
22470 /* Instructions that were new with the real FPA, call them V2. */
22472 #define ARM_VARIANT & fpu_fpa_ext_v2
22474 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22475 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22476 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22477 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22478 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22479 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
22482 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
22484 /* Moves and type conversions. */
22485 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22486 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
22487 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
22488 cCE("fmstat", ef1fa10
, 0, (), noargs
),
22489 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
22490 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
22491 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22492 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22493 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22494 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22495 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22496 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22497 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
22498 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
22500 /* Memory operations. */
22501 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
22502 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
22503 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22504 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22505 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22506 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22507 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22508 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22509 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22510 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22511 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22512 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
22513 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22514 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
22515 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22516 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
22517 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22518 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
22520 /* Monadic operations. */
22521 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22522 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22523 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22525 /* Dyadic operations. */
22526 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22527 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22528 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22529 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22530 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22531 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22532 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22533 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22534 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22537 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22538 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
22539 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
22540 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
22542 /* Double precision load/store are still present on single precision
22543 implementations. */
22544 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
22545 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
22546 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22547 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22548 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22549 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22550 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22551 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
22552 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22553 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
22556 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
22558 /* Moves and type conversions. */
22559 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22560 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22561 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22562 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
22563 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
22564 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
22565 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
22566 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22567 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
22568 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22569 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22570 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22571 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
22573 /* Monadic operations. */
22574 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22575 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22576 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22578 /* Dyadic operations. */
22579 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22580 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22581 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22582 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22583 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22584 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22585 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22586 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22587 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22590 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22591 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
22592 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
22593 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
22596 #define ARM_VARIANT & fpu_vfp_ext_v2
22598 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
22599 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
22600 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
22601 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
22603 /* Instructions which may belong to either the Neon or VFP instruction sets.
22604 Individual encoder functions perform additional architecture checks. */
22606 #define ARM_VARIANT & fpu_vfp_ext_v1xd
22607 #undef THUMB_VARIANT
22608 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
22610 /* These mnemonics are unique to VFP. */
22611 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
22612 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
22613 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22614 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22615 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22616 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22617 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
22618 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
22619 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
22620 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
22622 /* Mnemonics shared by Neon and VFP. */
22623 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
22624 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22625 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
22627 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22628 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22629 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22630 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22631 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22632 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
22634 mnCEF(vcvt
, _vcvt
, 3, (RNSDQMQ
, RNSDQMQ
, oI32z
), neon_cvt
),
22635 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
22636 MNCEF(vcvtb
, eb20a40
, 3, (RVSDMQ
, RVSDMQ
, oI32b
), neon_cvtb
),
22637 MNCEF(vcvtt
, eb20a40
, 3, (RVSDMQ
, RVSDMQ
, oI32b
), neon_cvtt
),
22640 /* NOTE: All VMOV encoding is special-cased! */
22641 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
22642 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
22644 #undef THUMB_VARIANT
22645 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
22646 by different feature bits. Since we are setting the Thumb guard, we can
22647 require Thumb-1 which makes it a nop guard and set the right feature bit in
22648 do_vldr_vstr (). */
22649 #define THUMB_VARIANT & arm_ext_v4t
22650 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22651 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
22654 #define ARM_VARIANT & arm_ext_fp16
22655 #undef THUMB_VARIANT
22656 #define THUMB_VARIANT & arm_ext_fp16
22657 /* New instructions added from v8.2, allowing the extraction and insertion of
22658 the upper 16 bits of a 32-bit vector register. */
22659 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
22660 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
22662 /* New backported fma/fms instructions optional in v8.2. */
22663 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
22664 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
22666 #undef THUMB_VARIANT
22667 #define THUMB_VARIANT & fpu_neon_ext_v1
22669 #define ARM_VARIANT & fpu_neon_ext_v1
22671 /* Data processing with three registers of the same length. */
22672 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
22673 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
22674 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
22675 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22676 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22677 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22678 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22679 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
22680 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
22681 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
22682 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22683 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22684 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
22685 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
22686 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22687 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22688 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
22689 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
22690 /* If not immediate, fall back to neon_dyadic_i64_su.
22691 shl_imm should accept I8 I16 I32 I64,
22692 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
22693 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
22694 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
22695 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
22696 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
22697 /* Logic ops, types optional & ignored. */
22698 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22699 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22700 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22701 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22702 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22703 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22704 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
22705 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
22706 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
22707 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
22708 /* Bitfield ops, untyped. */
22709 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22710 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22711 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22712 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22713 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
22714 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
22715 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
22716 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22717 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22718 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22719 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
22720 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
22721 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
22722 back to neon_dyadic_if_su. */
22723 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22724 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22725 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
22726 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
22727 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22728 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22729 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
22730 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
22731 /* Comparison. Type I8 I16 I32 F32. */
22732 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
22733 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
22734 /* As above, D registers only. */
22735 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22736 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
22737 /* Int and float variants, signedness unimportant. */
22738 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22739 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
22740 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
22741 /* Add/sub take types I8 I16 I32 I64 F32. */
22742 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22743 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
22744 /* vtst takes sizes 8, 16, 32. */
22745 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
22746 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
22747 /* VMUL takes I8 I16 I32 F32 P8. */
22748 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
22749 /* VQD{R}MULH takes S16 S32. */
22750 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22751 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22752 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
22753 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
22754 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22755 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22756 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
22757 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
22758 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22759 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22760 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
22761 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
22762 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22763 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22764 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
22765 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
22766 /* ARM v8.1 extension. */
22767 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22768 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22769 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
22770 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
22772 /* Two address, int/float. Types S8 S16 S32 F32. */
22773 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22774 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
22776 /* Data processing with two registers and a shift amount. */
22777 /* Right shifts, and variants with rounding.
22778 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
22779 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22780 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22781 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
22782 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
22783 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22784 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22785 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
22786 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
22787 /* Shift and insert. Sizes accepted 8 16 32 64. */
22788 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
22789 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
22790 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
22791 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
22792 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
22793 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
22794 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
22795 /* Right shift immediate, saturating & narrowing, with rounding variants.
22796 Types accepted S16 S32 S64 U16 U32 U64. */
22797 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22798 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
22799 /* As above, unsigned. Types accepted S16 S32 S64. */
22800 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22801 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
22802 /* Right shift narrowing. Types accepted I16 I32 I64. */
22803 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22804 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
22805 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
22806 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
22807 /* CVT with optional immediate for fixed-point variant. */
22808 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
22810 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
22811 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
22813 /* Data processing, three registers of different lengths. */
22814 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
22815 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
22816 /* If not scalar, fall back to neon_dyadic_long.
22817 Vector types as above, scalar types S16 S32 U16 U32. */
22818 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22819 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
22820 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
22821 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22822 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
22823 /* Dyadic, narrowing insns. Types I16 I32 I64. */
22824 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22825 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22826 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22827 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
22828 /* Saturating doubling multiplies. Types S16 S32. */
22829 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22830 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22831 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
22832 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
22833 S16 S32 U16 U32. */
22834 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
22836 /* Extract. Size 8. */
22837 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
22838 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
22840 /* Two registers, miscellaneous. */
22841 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
22842 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
22843 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
22844 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
22845 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
22846 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
22847 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
22848 /* Vector replicate. Sizes 8 16 32. */
22849 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
22850 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
22851 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
22852 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
22853 /* VMOVN. Types I16 I32 I64. */
22854 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
22855 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
22856 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
22857 /* VQMOVUN. Types S16 S32 S64. */
22858 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
22859 /* VZIP / VUZP. Sizes 8 16 32. */
22860 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22861 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22862 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
22863 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
22864 /* VQABS / VQNEG. Types S8 S16 S32. */
22865 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22866 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22867 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
22868 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
22869 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
22870 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22871 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
22872 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
22873 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
22874 /* Reciprocal estimates. Types U32 F16 F32. */
22875 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22876 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
22877 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
22878 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
22879 /* VCLS. Types S8 S16 S32. */
22880 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
22881 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
22882 /* VCLZ. Types I8 I16 I32. */
22883 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
22884 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
22885 /* VCNT. Size 8. */
22886 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
22887 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
22888 /* Two address, untyped. */
22889 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
22890 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
22891 /* VTRN. Sizes 8 16 32. */
22892 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
22893 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
22895 /* Table lookup. Size 8. */
22896 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22897 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
22899 #undef THUMB_VARIANT
22900 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
22902 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
22904 /* Neon element/structure load/store. */
22905 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22906 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22907 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22908 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22909 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22910 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22911 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22912 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
22914 #undef THUMB_VARIANT
22915 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
22917 #define ARM_VARIANT & fpu_vfp_ext_v3xd
22918 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
22919 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22920 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22921 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22922 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22923 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22924 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22925 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
22926 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
22928 #undef THUMB_VARIANT
22929 #define THUMB_VARIANT & fpu_vfp_ext_v3
22931 #define ARM_VARIANT & fpu_vfp_ext_v3
22933 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
22934 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22935 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22936 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22937 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22938 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22939 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22940 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
22941 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
22944 #define ARM_VARIANT & fpu_vfp_ext_fma
22945 #undef THUMB_VARIANT
22946 #define THUMB_VARIANT & fpu_vfp_ext_fma
22947 /* Mnemonics shared by Neon and VFP. These are included in the
22948 VFP FMA variant; NEON and VFP FMA always includes the NEON
22949 FMA instructions. */
22950 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22951 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
22952 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
22953 the v form should always be used. */
22954 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22955 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
22956 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22957 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
22958 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22959 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
22961 #undef THUMB_VARIANT
22963 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
22965 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22966 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22967 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22968 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22969 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22970 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
22971 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
22972 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
22975 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
22977 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
22978 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
22979 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
22980 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
22981 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
22982 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
22983 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
22984 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
22985 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
22986 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22987 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22988 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22989 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22990 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22991 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
22992 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22993 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22994 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
22995 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
22996 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
22997 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22998 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
22999 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
23000 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
23001 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
23002 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
23003 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
23004 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
23005 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
23006 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
23007 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
23008 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
23009 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
23010 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
23011 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23012 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23013 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23014 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23015 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23016 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23017 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23018 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23019 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23020 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23021 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23022 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23023 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
23024 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23025 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23026 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23027 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23028 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23029 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23030 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23031 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23032 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23033 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23034 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23035 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23036 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23037 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23038 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23039 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23040 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23041 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23042 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23043 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
23044 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
23045 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
23046 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
23047 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23048 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23049 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23050 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23051 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23052 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23053 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23054 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23055 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23056 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23057 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23058 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23059 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23060 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23061 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23062 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23063 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23064 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23065 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
23066 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23067 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23068 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23069 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23070 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23071 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23072 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23073 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23074 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23075 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23076 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23077 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23078 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23079 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23080 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23081 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23082 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23083 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23084 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23085 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23086 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23087 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
23088 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23089 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23090 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23091 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23092 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23093 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23094 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23095 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23096 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23097 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23098 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23099 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23100 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23101 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23102 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23103 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23104 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
23105 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
23106 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
23107 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
23108 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
23109 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
23110 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23111 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23112 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23113 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23114 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23115 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23116 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23117 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23118 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23119 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23120 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23121 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23122 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23123 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23124 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
23125 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23126 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23127 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23128 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23129 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23130 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23131 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23132 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23133 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
23134 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23135 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23136 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23137 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23138 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
23141 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
23143 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
23144 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
23145 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
23146 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23147 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23148 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
23149 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23150 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23151 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23152 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23153 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23154 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23155 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23156 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23157 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23158 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23159 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23160 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23161 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23162 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23163 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
23164 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23165 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23166 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23167 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23168 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23169 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23170 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23171 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23172 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23173 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23174 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23175 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23176 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23177 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23178 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23179 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23180 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23181 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23182 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23183 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23184 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23185 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23186 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23187 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23188 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23189 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23190 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23191 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23192 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23193 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23194 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23195 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23196 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23197 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23198 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23199 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
23202 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
23204 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
23205 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
23206 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
23207 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
23208 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
23209 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
23210 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
23211 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
23212 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
23213 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
23214 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
23215 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
23216 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
23217 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
23218 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
23219 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
23220 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
23221 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
23222 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
23223 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
23224 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
23225 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
23226 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
23227 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
23228 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
23229 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
23230 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
23231 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
23232 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
23233 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
23234 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
23235 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
23236 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
23237 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
23238 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
23239 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
23240 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
23241 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
23242 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
23243 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
23244 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
23245 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
23246 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
23247 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
23248 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
23249 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
23250 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
23251 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
23252 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
23253 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
23254 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
23255 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
23256 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
23257 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
23258 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
23259 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
23260 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
23261 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
23262 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
23263 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
23264 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
23265 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
23266 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
23267 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
23268 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23269 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
23270 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23271 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
23272 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23273 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
23274 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23275 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
23276 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
23277 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
23278 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
23279 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
23281 /* ARMv8.5-A instructions. */
23283 #define ARM_VARIANT & arm_ext_sb
23284 #undef THUMB_VARIANT
23285 #define THUMB_VARIANT & arm_ext_sb
23286 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
23289 #define ARM_VARIANT & arm_ext_predres
23290 #undef THUMB_VARIANT
23291 #define THUMB_VARIANT & arm_ext_predres
23292 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
23293 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
23294 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
23296 /* ARMv8-M instructions. */
23298 #define ARM_VARIANT NULL
23299 #undef THUMB_VARIANT
23300 #define THUMB_VARIANT & arm_ext_v8m
23301 ToU("sg", e97fe97f
, 0, (), noargs
),
23302 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
23303 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
23304 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
23305 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
23306 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
23307 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
23309 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
23310 instructions behave as nop if no VFP is present. */
23311 #undef THUMB_VARIANT
23312 #define THUMB_VARIANT & arm_ext_v8m_main
23313 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
23314 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
23316 /* Armv8.1-M Mainline instructions. */
23317 #undef THUMB_VARIANT
23318 #define THUMB_VARIANT & arm_ext_v8_1m_main
23319 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
23320 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
23321 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
23322 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
23323 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
23325 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
23326 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
23327 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
23329 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
23330 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
),
23332 #undef THUMB_VARIANT
23333 #define THUMB_VARIANT & mve_ext
23334 ToC("vpst", fe710f4d
, 0, (), mve_vpt
),
23335 ToC("vpstt", fe318f4d
, 0, (), mve_vpt
),
23336 ToC("vpste", fe718f4d
, 0, (), mve_vpt
),
23337 ToC("vpsttt", fe314f4d
, 0, (), mve_vpt
),
23338 ToC("vpstte", fe31cf4d
, 0, (), mve_vpt
),
23339 ToC("vpstet", fe71cf4d
, 0, (), mve_vpt
),
23340 ToC("vpstee", fe714f4d
, 0, (), mve_vpt
),
23341 ToC("vpstttt", fe312f4d
, 0, (), mve_vpt
),
23342 ToC("vpsttte", fe316f4d
, 0, (), mve_vpt
),
23343 ToC("vpsttet", fe31ef4d
, 0, (), mve_vpt
),
23344 ToC("vpsttee", fe31af4d
, 0, (), mve_vpt
),
23345 ToC("vpstett", fe71af4d
, 0, (), mve_vpt
),
23346 ToC("vpstete", fe71ef4d
, 0, (), mve_vpt
),
23347 ToC("vpsteet", fe716f4d
, 0, (), mve_vpt
),
23348 ToC("vpsteee", fe712f4d
, 0, (), mve_vpt
),
23350 /* MVE and MVE FP only. */
23351 mCEF(vmullb
, _vmullb
, 3, (RMQ
, RMQ
, RMQ
), mve_vmull
),
23352 mCEF(vabav
, _vabav
, 3, (RRnpcsp
, RMQ
, RMQ
), mve_vabav
),
23353 mCEF(vmladav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23354 mCEF(vmladava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23355 mCEF(vmladavx
, _vmladavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23356 mCEF(vmladavax
, _vmladavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23357 mCEF(vmlav
, _vmladav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23358 mCEF(vmlava
, _vmladava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23359 mCEF(vmlsdav
, _vmlsdav
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23360 mCEF(vmlsdava
, _vmlsdava
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23361 mCEF(vmlsdavx
, _vmlsdavx
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23362 mCEF(vmlsdavax
, _vmlsdavax
, 3, (RRe
, RMQ
, RMQ
), mve_vmladav
),
23364 mCEF(vst20
, _vst20
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
23365 mCEF(vst21
, _vst21
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
23366 mCEF(vst40
, _vst40
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23367 mCEF(vst41
, _vst41
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23368 mCEF(vst42
, _vst42
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23369 mCEF(vst43
, _vst43
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23370 mCEF(vld20
, _vld20
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
23371 mCEF(vld21
, _vld21
, 2, (MSTRLST2
, ADDRMVE
), mve_vst_vld
),
23372 mCEF(vld40
, _vld40
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23373 mCEF(vld41
, _vld41
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23374 mCEF(vld42
, _vld42
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23375 mCEF(vld43
, _vld43
, 2, (MSTRLST4
, ADDRMVE
), mve_vst_vld
),
23376 mCEF(vstrb
, _vstrb
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23377 mCEF(vstrh
, _vstrh
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23378 mCEF(vstrw
, _vstrw
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23379 mCEF(vstrd
, _vstrd
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23380 mCEF(vldrb
, _vldrb
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23381 mCEF(vldrh
, _vldrh
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23382 mCEF(vldrw
, _vldrw
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23383 mCEF(vldrd
, _vldrd
, 2, (RMQ
, ADDRMVE
), mve_vstr_vldr
),
23386 #define ARM_VARIANT & fpu_vfp_ext_v1xd
23387 #undef THUMB_VARIANT
23388 #define THUMB_VARIANT & arm_ext_v6t2
23390 mCEF(vmullt
, _vmullt
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQ_RNSC_MQ
), mve_vmull
),
23391 mnCEF(vadd
, _vadd
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
23392 mnCEF(vsub
, _vsub
, 3, (RNSDQMQ
, oRNSDQMQ
, RNSDQMQR
), neon_addsub_if_i
),
23394 MNCEF(vabs
, 1b10300
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
23395 MNCEF(vneg
, 1b10380
, 2, (RNSDQMQ
, RNSDQMQ
), neon_abs_neg
),
23398 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
23399 mnUF(vcvta
, _vcvta
, 2, (RNSDQMQ
, oRNSDQMQ
), neon_cvta
),
23400 mnUF(vcvtp
, _vcvta
, 2, (RNSDQMQ
, oRNSDQMQ
), neon_cvtp
),
23401 mnUF(vcvtn
, _vcvta
, 3, (RNSDQMQ
, oRNSDQMQ
, oI32z
), neon_cvtn
),
23402 mnUF(vcvtm
, _vcvta
, 2, (RNSDQMQ
, oRNSDQMQ
), neon_cvtm
),
23405 #define ARM_VARIANT & fpu_neon_ext_v1
23406 mnUF(vabd
, _vabd
, 3, (RNDQMQ
, oRNDQMQ
, RNDQMQ
), neon_dyadic_if_su
),
23407 mnUF(vabdl
, _vabdl
, 3, (RNQMQ
, RNDMQ
, RNDMQ
), neon_dyadic_long
),
23408 mnUF(vaddl
, _vaddl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
23409 mnUF(vsubl
, _vsubl
, 3, (RNQMQ
, RNDMQ
, RNDMQR
), neon_dyadic_long
),
23412 #undef THUMB_VARIANT
23444 /* MD interface: bits in the object file. */
23446 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
23447 for use in the a.out file, and stores them in the array pointed to by buf.
23448 This knows about the endian-ness of the target machine and does
23449 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
23450 2 (short) and 4 (long) Floating numbers are put out as a series of
23451 LITTLENUMS (shorts, here at least). */
23454 md_number_to_chars (char * buf
, valueT val
, int n
)
23456 if (target_big_endian
)
23457 number_to_chars_bigendian (buf
, val
, n
);
23459 number_to_chars_littleendian (buf
, val
, n
);
23463 md_chars_to_number (char * buf
, int n
)
23466 unsigned char * where
= (unsigned char *) buf
;
23468 if (target_big_endian
)
23473 result
|= (*where
++ & 255);
23481 result
|= (where
[n
] & 255);
23488 /* MD interface: Sections. */
23490 /* Calculate the maximum variable size (i.e., excluding fr_fix)
23491 that an rs_machine_dependent frag may reach. */
23494 arm_frag_max_var (fragS
*fragp
)
23496 /* We only use rs_machine_dependent for variable-size Thumb instructions,
23497 which are either THUMB_SIZE (2) or INSN_SIZE (4).
23499 Note that we generate relaxable instructions even for cases that don't
23500 really need it, like an immediate that's a trivial constant. So we're
23501 overestimating the instruction size for some of those cases. Rather
23502 than putting more intelligence here, it would probably be better to
23503 avoid generating a relaxation frag in the first place when it can be
23504 determined up front that a short instruction will suffice. */
23506 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
23510 /* Estimate the size of a frag before relaxing. Assume everything fits in
23514 md_estimate_size_before_relax (fragS
* fragp
,
23515 segT segtype ATTRIBUTE_UNUSED
)
23521 /* Convert a machine dependent frag. */
23524 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
23526 unsigned long insn
;
23527 unsigned long old_op
;
23535 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23537 old_op
= bfd_get_16(abfd
, buf
);
23538 if (fragp
->fr_symbol
)
23540 exp
.X_op
= O_symbol
;
23541 exp
.X_add_symbol
= fragp
->fr_symbol
;
23545 exp
.X_op
= O_constant
;
23547 exp
.X_add_number
= fragp
->fr_offset
;
23548 opcode
= fragp
->fr_subtype
;
23551 case T_MNEM_ldr_pc
:
23552 case T_MNEM_ldr_pc2
:
23553 case T_MNEM_ldr_sp
:
23554 case T_MNEM_str_sp
:
23561 if (fragp
->fr_var
== 4)
23563 insn
= THUMB_OP32 (opcode
);
23564 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
23566 insn
|= (old_op
& 0x700) << 4;
23570 insn
|= (old_op
& 7) << 12;
23571 insn
|= (old_op
& 0x38) << 13;
23573 insn
|= 0x00000c00;
23574 put_thumb32_insn (buf
, insn
);
23575 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
23579 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
23581 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
23584 if (fragp
->fr_var
== 4)
23586 insn
= THUMB_OP32 (opcode
);
23587 insn
|= (old_op
& 0xf0) << 4;
23588 put_thumb32_insn (buf
, insn
);
23589 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
23593 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23594 exp
.X_add_number
-= 4;
23602 if (fragp
->fr_var
== 4)
23604 int r0off
= (opcode
== T_MNEM_mov
23605 || opcode
== T_MNEM_movs
) ? 0 : 8;
23606 insn
= THUMB_OP32 (opcode
);
23607 insn
= (insn
& 0xe1ffffff) | 0x10000000;
23608 insn
|= (old_op
& 0x700) << r0off
;
23609 put_thumb32_insn (buf
, insn
);
23610 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23614 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
23619 if (fragp
->fr_var
== 4)
23621 insn
= THUMB_OP32(opcode
);
23622 put_thumb32_insn (buf
, insn
);
23623 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
23626 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
23630 if (fragp
->fr_var
== 4)
23632 insn
= THUMB_OP32(opcode
);
23633 insn
|= (old_op
& 0xf00) << 14;
23634 put_thumb32_insn (buf
, insn
);
23635 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
23638 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
23641 case T_MNEM_add_sp
:
23642 case T_MNEM_add_pc
:
23643 case T_MNEM_inc_sp
:
23644 case T_MNEM_dec_sp
:
23645 if (fragp
->fr_var
== 4)
23647 /* ??? Choose between add and addw. */
23648 insn
= THUMB_OP32 (opcode
);
23649 insn
|= (old_op
& 0xf0) << 4;
23650 put_thumb32_insn (buf
, insn
);
23651 if (opcode
== T_MNEM_add_pc
)
23652 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
23654 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23657 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23665 if (fragp
->fr_var
== 4)
23667 insn
= THUMB_OP32 (opcode
);
23668 insn
|= (old_op
& 0xf0) << 4;
23669 insn
|= (old_op
& 0xf) << 16;
23670 put_thumb32_insn (buf
, insn
);
23671 if (insn
& (1 << 20))
23672 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
23674 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
23677 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
23683 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
23684 (enum bfd_reloc_code_real
) reloc_type
);
23685 fixp
->fx_file
= fragp
->fr_file
;
23686 fixp
->fx_line
= fragp
->fr_line
;
23687 fragp
->fr_fix
+= fragp
->fr_var
;
23689 /* Set whether we use thumb-2 ISA based on final relaxation results. */
23690 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
23691 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
23692 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
23695 /* Return the size of a relaxable immediate operand instruction.
23696 SHIFT and SIZE specify the form of the allowable immediate. */
23698 relax_immediate (fragS
*fragp
, int size
, int shift
)
23704 /* ??? Should be able to do better than this. */
23705 if (fragp
->fr_symbol
)
23708 low
= (1 << shift
) - 1;
23709 mask
= (1 << (shift
+ size
)) - (1 << shift
);
23710 offset
= fragp
->fr_offset
;
23711 /* Force misaligned offsets to 32-bit variant. */
23714 if (offset
& ~mask
)
23719 /* Get the address of a symbol during relaxation. */
23721 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
23727 sym
= fragp
->fr_symbol
;
23728 sym_frag
= symbol_get_frag (sym
);
23729 know (S_GET_SEGMENT (sym
) != absolute_section
23730 || sym_frag
== &zero_address_frag
);
23731 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
23733 /* If frag has yet to be reached on this pass, assume it will
23734 move by STRETCH just as we did. If this is not so, it will
23735 be because some frag between grows, and that will force
23739 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
23743 /* Adjust stretch for any alignment frag. Note that if have
23744 been expanding the earlier code, the symbol may be
23745 defined in what appears to be an earlier frag. FIXME:
23746 This doesn't handle the fr_subtype field, which specifies
23747 a maximum number of bytes to skip when doing an
23749 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
23751 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
23754 stretch
= - ((- stretch
)
23755 & ~ ((1 << (int) f
->fr_offset
) - 1));
23757 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
23769 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
23772 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
23777 /* Assume worst case for symbols not known to be in the same section. */
23778 if (fragp
->fr_symbol
== NULL
23779 || !S_IS_DEFINED (fragp
->fr_symbol
)
23780 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23781 || S_IS_WEAK (fragp
->fr_symbol
))
23784 val
= relaxed_symbol_addr (fragp
, stretch
);
23785 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
23786 addr
= (addr
+ 4) & ~3;
23787 /* Force misaligned targets to 32-bit variant. */
23791 if (val
< 0 || val
> 1020)
23796 /* Return the size of a relaxable add/sub immediate instruction. */
23798 relax_addsub (fragS
*fragp
, asection
*sec
)
23803 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
23804 op
= bfd_get_16(sec
->owner
, buf
);
23805 if ((op
& 0xf) == ((op
>> 4) & 0xf))
23806 return relax_immediate (fragp
, 8, 0);
23808 return relax_immediate (fragp
, 3, 0);
23811 /* Return TRUE iff the definition of symbol S could be pre-empted
23812 (overridden) at link or load time. */
23814 symbol_preemptible (symbolS
*s
)
23816 /* Weak symbols can always be pre-empted. */
23820 /* Non-global symbols cannot be pre-empted. */
23821 if (! S_IS_EXTERNAL (s
))
23825 /* In ELF, a global symbol can be marked protected, or private. In that
23826 case it can't be pre-empted (other definitions in the same link unit
23827 would violate the ODR). */
23828 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
23832 /* Other global symbols might be pre-empted. */
23836 /* Return the size of a relaxable branch instruction. BITS is the
23837 size of the offset field in the narrow instruction. */
23840 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
23846 /* Assume worst case for symbols not known to be in the same section. */
23847 if (!S_IS_DEFINED (fragp
->fr_symbol
)
23848 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
23849 || S_IS_WEAK (fragp
->fr_symbol
))
23853 /* A branch to a function in ARM state will require interworking. */
23854 if (S_IS_DEFINED (fragp
->fr_symbol
)
23855 && ARM_IS_FUNC (fragp
->fr_symbol
))
23859 if (symbol_preemptible (fragp
->fr_symbol
))
23862 val
= relaxed_symbol_addr (fragp
, stretch
);
23863 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
23866 /* Offset is a signed value *2 */
23868 if (val
>= limit
|| val
< -limit
)
23874 /* Relax a machine dependent frag. This returns the amount by which
23875 the current size of the frag should change. */
23878 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
23883 oldsize
= fragp
->fr_var
;
23884 switch (fragp
->fr_subtype
)
23886 case T_MNEM_ldr_pc2
:
23887 newsize
= relax_adr (fragp
, sec
, stretch
);
23889 case T_MNEM_ldr_pc
:
23890 case T_MNEM_ldr_sp
:
23891 case T_MNEM_str_sp
:
23892 newsize
= relax_immediate (fragp
, 8, 2);
23896 newsize
= relax_immediate (fragp
, 5, 2);
23900 newsize
= relax_immediate (fragp
, 5, 1);
23904 newsize
= relax_immediate (fragp
, 5, 0);
23907 newsize
= relax_adr (fragp
, sec
, stretch
);
23913 newsize
= relax_immediate (fragp
, 8, 0);
23916 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
23919 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
23921 case T_MNEM_add_sp
:
23922 case T_MNEM_add_pc
:
23923 newsize
= relax_immediate (fragp
, 8, 2);
23925 case T_MNEM_inc_sp
:
23926 case T_MNEM_dec_sp
:
23927 newsize
= relax_immediate (fragp
, 7, 2);
23933 newsize
= relax_addsub (fragp
, sec
);
23939 fragp
->fr_var
= newsize
;
23940 /* Freeze wide instructions that are at or before the same location as
23941 in the previous pass. This avoids infinite loops.
23942 Don't freeze them unconditionally because targets may be artificially
23943 misaligned by the expansion of preceding frags. */
23944 if (stretch
<= 0 && newsize
> 2)
23946 md_convert_frag (sec
->owner
, sec
, fragp
);
23950 return newsize
- oldsize
;
23953 /* Round up a section size to the appropriate boundary. */
23956 md_section_align (segT segment ATTRIBUTE_UNUSED
,
23962 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
23963 of an rs_align_code fragment. */
23966 arm_handle_align (fragS
* fragP
)
23968 static unsigned char const arm_noop
[2][2][4] =
23971 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
23972 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
23975 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
23976 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
23979 static unsigned char const thumb_noop
[2][2][2] =
23982 {0xc0, 0x46}, /* LE */
23983 {0x46, 0xc0}, /* BE */
23986 {0x00, 0xbf}, /* LE */
23987 {0xbf, 0x00} /* BE */
23990 static unsigned char const wide_thumb_noop
[2][4] =
23991 { /* Wide Thumb-2 */
23992 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
23993 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
23996 unsigned bytes
, fix
, noop_size
;
23998 const unsigned char * noop
;
23999 const unsigned char *narrow_noop
= NULL
;
24004 if (fragP
->fr_type
!= rs_align_code
)
24007 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
24008 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
24011 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
24012 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
24014 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
24016 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
24018 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
24019 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
24021 narrow_noop
= thumb_noop
[1][target_big_endian
];
24022 noop
= wide_thumb_noop
[target_big_endian
];
24025 noop
= thumb_noop
[0][target_big_endian
];
24033 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
24034 ? selected_cpu
: arm_arch_none
,
24036 [target_big_endian
];
24043 fragP
->fr_var
= noop_size
;
24045 if (bytes
& (noop_size
- 1))
24047 fix
= bytes
& (noop_size
- 1);
24049 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
24051 memset (p
, 0, fix
);
24058 if (bytes
& noop_size
)
24060 /* Insert a narrow noop. */
24061 memcpy (p
, narrow_noop
, noop_size
);
24063 bytes
-= noop_size
;
24067 /* Use wide noops for the remainder */
24071 while (bytes
>= noop_size
)
24073 memcpy (p
, noop
, noop_size
);
24075 bytes
-= noop_size
;
24079 fragP
->fr_fix
+= fix
;
24082 /* Called from md_do_align. Used to create an alignment
24083 frag in a code section. */
24086 arm_frag_align_code (int n
, int max
)
24090 /* We assume that there will never be a requirement
24091 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
24092 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
24097 _("alignments greater than %d bytes not supported in .text sections."),
24098 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
24099 as_fatal ("%s", err_msg
);
24102 p
= frag_var (rs_align_code
,
24103 MAX_MEM_FOR_RS_ALIGN_CODE
,
24105 (relax_substateT
) max
,
24112 /* Perform target specific initialisation of a frag.
24113 Note - despite the name this initialisation is not done when the frag
24114 is created, but only when its type is assigned. A frag can be created
24115 and used a long time before its type is set, so beware of assuming that
24116 this initialisation is performed first. */
24120 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
24122 /* Record whether this frag is in an ARM or a THUMB area. */
24123 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
24126 #else /* OBJ_ELF is defined. */
24128 arm_init_frag (fragS
* fragP
, int max_chars
)
24130 bfd_boolean frag_thumb_mode
;
24132 /* If the current ARM vs THUMB mode has not already
24133 been recorded into this frag then do so now. */
24134 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
24135 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
24137 /* PR 21809: Do not set a mapping state for debug sections
24138 - it just confuses other tools. */
24139 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
24142 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
24144 /* Record a mapping symbol for alignment frags. We will delete this
24145 later if the alignment ends up empty. */
24146 switch (fragP
->fr_type
)
24149 case rs_align_test
:
24151 mapping_state_2 (MAP_DATA
, max_chars
);
24153 case rs_align_code
:
24154 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
24161 /* When we change sections we need to issue a new mapping symbol. */
24164 arm_elf_change_section (void)
24166 /* Link an unlinked unwind index table section to the .text section. */
24167 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
24168 && elf_linked_to_section (now_seg
) == NULL
)
24169 elf_linked_to_section (now_seg
) = text_section
;
24173 arm_elf_section_type (const char * str
, size_t len
)
24175 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
24176 return SHT_ARM_EXIDX
;
24181 /* Code to deal with unwinding tables. */
24183 static void add_unwind_adjustsp (offsetT
);
24185 /* Generate any deferred unwind frame offset. */
24188 flush_pending_unwind (void)
24192 offset
= unwind
.pending_offset
;
24193 unwind
.pending_offset
= 0;
24195 add_unwind_adjustsp (offset
);
24198 /* Add an opcode to this list for this function. Two-byte opcodes should
24199 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
24203 add_unwind_opcode (valueT op
, int length
)
24205 /* Add any deferred stack adjustment. */
24206 if (unwind
.pending_offset
)
24207 flush_pending_unwind ();
24209 unwind
.sp_restored
= 0;
24211 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
24213 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
24214 if (unwind
.opcodes
)
24215 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
24216 unwind
.opcode_alloc
);
24218 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
24223 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
24225 unwind
.opcode_count
++;
24229 /* Add unwind opcodes to adjust the stack pointer. */
24232 add_unwind_adjustsp (offsetT offset
)
24236 if (offset
> 0x200)
24238 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
24243 /* Long form: 0xb2, uleb128. */
24244 /* This might not fit in a word so add the individual bytes,
24245 remembering the list is built in reverse order. */
24246 o
= (valueT
) ((offset
- 0x204) >> 2);
24248 add_unwind_opcode (0, 1);
24250 /* Calculate the uleb128 encoding of the offset. */
24254 bytes
[n
] = o
& 0x7f;
24260 /* Add the insn. */
24262 add_unwind_opcode (bytes
[n
- 1], 1);
24263 add_unwind_opcode (0xb2, 1);
24265 else if (offset
> 0x100)
24267 /* Two short opcodes. */
24268 add_unwind_opcode (0x3f, 1);
24269 op
= (offset
- 0x104) >> 2;
24270 add_unwind_opcode (op
, 1);
24272 else if (offset
> 0)
24274 /* Short opcode. */
24275 op
= (offset
- 4) >> 2;
24276 add_unwind_opcode (op
, 1);
24278 else if (offset
< 0)
24281 while (offset
> 0x100)
24283 add_unwind_opcode (0x7f, 1);
24286 op
= ((offset
- 4) >> 2) | 0x40;
24287 add_unwind_opcode (op
, 1);
24291 /* Finish the list of unwind opcodes for this function. */
24294 finish_unwind_opcodes (void)
24298 if (unwind
.fp_used
)
24300 /* Adjust sp as necessary. */
24301 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
24302 flush_pending_unwind ();
24304 /* After restoring sp from the frame pointer. */
24305 op
= 0x90 | unwind
.fp_reg
;
24306 add_unwind_opcode (op
, 1);
24309 flush_pending_unwind ();
24313 /* Start an exception table entry. If idx is nonzero this is an index table
24317 start_unwind_section (const segT text_seg
, int idx
)
24319 const char * text_name
;
24320 const char * prefix
;
24321 const char * prefix_once
;
24322 const char * group_name
;
24330 prefix
= ELF_STRING_ARM_unwind
;
24331 prefix_once
= ELF_STRING_ARM_unwind_once
;
24332 type
= SHT_ARM_EXIDX
;
24336 prefix
= ELF_STRING_ARM_unwind_info
;
24337 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
24338 type
= SHT_PROGBITS
;
24341 text_name
= segment_name (text_seg
);
24342 if (streq (text_name
, ".text"))
24345 if (strncmp (text_name
, ".gnu.linkonce.t.",
24346 strlen (".gnu.linkonce.t.")) == 0)
24348 prefix
= prefix_once
;
24349 text_name
+= strlen (".gnu.linkonce.t.");
24352 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
24358 /* Handle COMDAT group. */
24359 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
24361 group_name
= elf_group_name (text_seg
);
24362 if (group_name
== NULL
)
24364 as_bad (_("Group section `%s' has no group signature"),
24365 segment_name (text_seg
));
24366 ignore_rest_of_line ();
24369 flags
|= SHF_GROUP
;
24373 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
24376 /* Set the section link for index tables. */
24378 elf_linked_to_section (now_seg
) = text_seg
;
24382 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
24383 personality routine data. Returns zero, or the index table value for
24384 an inline entry. */
24387 create_unwind_entry (int have_data
)
24392 /* The current word of data. */
24394 /* The number of bytes left in this word. */
24397 finish_unwind_opcodes ();
24399 /* Remember the current text section. */
24400 unwind
.saved_seg
= now_seg
;
24401 unwind
.saved_subseg
= now_subseg
;
24403 start_unwind_section (now_seg
, 0);
24405 if (unwind
.personality_routine
== NULL
)
24407 if (unwind
.personality_index
== -2)
24410 as_bad (_("handlerdata in cantunwind frame"));
24411 return 1; /* EXIDX_CANTUNWIND. */
24414 /* Use a default personality routine if none is specified. */
24415 if (unwind
.personality_index
== -1)
24417 if (unwind
.opcode_count
> 3)
24418 unwind
.personality_index
= 1;
24420 unwind
.personality_index
= 0;
24423 /* Space for the personality routine entry. */
24424 if (unwind
.personality_index
== 0)
24426 if (unwind
.opcode_count
> 3)
24427 as_bad (_("too many unwind opcodes for personality routine 0"));
24431 /* All the data is inline in the index table. */
24434 while (unwind
.opcode_count
> 0)
24436 unwind
.opcode_count
--;
24437 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
24441 /* Pad with "finish" opcodes. */
24443 data
= (data
<< 8) | 0xb0;
24450 /* We get two opcodes "free" in the first word. */
24451 size
= unwind
.opcode_count
- 2;
24455 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
24456 if (unwind
.personality_index
!= -1)
24458 as_bad (_("attempt to recreate an unwind entry"));
24462 /* An extra byte is required for the opcode count. */
24463 size
= unwind
.opcode_count
+ 1;
24466 size
= (size
+ 3) >> 2;
24468 as_bad (_("too many unwind opcodes"));
24470 frag_align (2, 0, 0);
24471 record_alignment (now_seg
, 2);
24472 unwind
.table_entry
= expr_build_dot ();
24474 /* Allocate the table entry. */
24475 ptr
= frag_more ((size
<< 2) + 4);
24476 /* PR 13449: Zero the table entries in case some of them are not used. */
24477 memset (ptr
, 0, (size
<< 2) + 4);
24478 where
= frag_now_fix () - ((size
<< 2) + 4);
24480 switch (unwind
.personality_index
)
24483 /* ??? Should this be a PLT generating relocation? */
24484 /* Custom personality routine. */
24485 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
24486 BFD_RELOC_ARM_PREL31
);
24491 /* Set the first byte to the number of additional words. */
24492 data
= size
> 0 ? size
- 1 : 0;
24496 /* ABI defined personality routines. */
24498 /* Three opcodes bytes are packed into the first word. */
24505 /* The size and first two opcode bytes go in the first word. */
24506 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
24511 /* Should never happen. */
24515 /* Pack the opcodes into words (MSB first), reversing the list at the same
24517 while (unwind
.opcode_count
> 0)
24521 md_number_to_chars (ptr
, data
, 4);
24526 unwind
.opcode_count
--;
24528 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
24531 /* Finish off the last word. */
24534 /* Pad with "finish" opcodes. */
24536 data
= (data
<< 8) | 0xb0;
24538 md_number_to_chars (ptr
, data
, 4);
24543 /* Add an empty descriptor if there is no user-specified data. */
24544 ptr
= frag_more (4);
24545 md_number_to_chars (ptr
, 0, 4);
24552 /* Initialize the DWARF-2 unwind information for this procedure. */
24555 tc_arm_frame_initial_instructions (void)
24557 cfi_add_CFA_def_cfa (REG_SP
, 0);
24559 #endif /* OBJ_ELF */
24561 /* Convert REGNAME to a DWARF-2 register number. */
24564 tc_arm_regname_to_dw2regnum (char *regname
)
24566 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
24570 /* PR 16694: Allow VFP registers as well. */
24571 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
24575 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
24584 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
24588 exp
.X_op
= O_secrel
;
24589 exp
.X_add_symbol
= symbol
;
24590 exp
.X_add_number
= 0;
24591 emit_expr (&exp
, size
);
24595 /* MD interface: Symbol and relocation handling. */
24597 /* Return the address within the segment that a PC-relative fixup is
24598 relative to. For ARM, PC-relative fixups applied to instructions
24599 are generally relative to the location of the fixup plus 8 bytes.
24600 Thumb branches are offset by 4, and Thumb loads relative to PC
24601 require special handling. */
24604 md_pcrel_from_section (fixS
* fixP
, segT seg
)
24606 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24608 /* If this is pc-relative and we are going to emit a relocation
24609 then we just want to put out any pipeline compensation that the linker
24610 will need. Otherwise we want to use the calculated base.
24611 For WinCE we skip the bias for externals as well, since this
24612 is how the MS ARM-CE assembler behaves and we want to be compatible. */
24614 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
24615 || (arm_force_relocation (fixP
)
24617 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
24623 switch (fixP
->fx_r_type
)
24625 /* PC relative addressing on the Thumb is slightly odd as the
24626 bottom two bits of the PC are forced to zero for the
24627 calculation. This happens *after* application of the
24628 pipeline offset. However, Thumb adrl already adjusts for
24629 this, so we need not do it again. */
24630 case BFD_RELOC_ARM_THUMB_ADD
:
24633 case BFD_RELOC_ARM_THUMB_OFFSET
:
24634 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24635 case BFD_RELOC_ARM_T32_ADD_PC12
:
24636 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24637 return (base
+ 4) & ~3;
24639 /* Thumb branches are simply offset by +4. */
24640 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
24641 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
24642 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
24643 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
24644 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24645 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24646 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
24647 case BFD_RELOC_ARM_THUMB_BF17
:
24648 case BFD_RELOC_ARM_THUMB_BF19
:
24649 case BFD_RELOC_ARM_THUMB_BF13
:
24650 case BFD_RELOC_ARM_THUMB_LOOP12
:
24653 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24655 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24656 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24657 && ARM_IS_FUNC (fixP
->fx_addsy
)
24658 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24659 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24662 /* BLX is like branches above, but forces the low two bits of PC to
24664 case BFD_RELOC_THUMB_PCREL_BLX
:
24666 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24667 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24668 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24669 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24670 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24671 return (base
+ 4) & ~3;
24673 /* ARM mode branches are offset by +8. However, the Windows CE
24674 loader expects the relocation not to take this into account. */
24675 case BFD_RELOC_ARM_PCREL_BLX
:
24677 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24678 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24679 && ARM_IS_FUNC (fixP
->fx_addsy
)
24680 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24681 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24684 case BFD_RELOC_ARM_PCREL_CALL
:
24686 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24687 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24688 && THUMB_IS_FUNC (fixP
->fx_addsy
)
24689 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24690 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24693 case BFD_RELOC_ARM_PCREL_BRANCH
:
24694 case BFD_RELOC_ARM_PCREL_JUMP
:
24695 case BFD_RELOC_ARM_PLT32
:
24697 /* When handling fixups immediately, because we have already
24698 discovered the value of a symbol, or the address of the frag involved
24699 we must account for the offset by +8, as the OS loader will never see the reloc.
24700 see fixup_segment() in write.c
24701 The S_IS_EXTERNAL test handles the case of global symbols.
24702 Those need the calculated base, not just the pipe compensation the linker will need. */
24704 && fixP
->fx_addsy
!= NULL
24705 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24706 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
24714 /* ARM mode loads relative to PC are also offset by +8. Unlike
24715 branches, the Windows CE loader *does* expect the relocation
24716 to take this into account. */
24717 case BFD_RELOC_ARM_OFFSET_IMM
:
24718 case BFD_RELOC_ARM_OFFSET_IMM8
:
24719 case BFD_RELOC_ARM_HWLITERAL
:
24720 case BFD_RELOC_ARM_LITERAL
:
24721 case BFD_RELOC_ARM_CP_OFF_IMM
:
24725 /* Other PC-relative relocations are un-offset. */
24731 static bfd_boolean flag_warn_syms
= TRUE
;
24734 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
24736 /* PR 18347 - Warn if the user attempts to create a symbol with the same
24737 name as an ARM instruction. Whilst strictly speaking it is allowed, it
24738 does mean that the resulting code might be very confusing to the reader.
24739 Also this warning can be triggered if the user omits an operand before
24740 an immediate address, eg:
24744 GAS treats this as an assignment of the value of the symbol foo to a
24745 symbol LDR, and so (without this code) it will not issue any kind of
24746 warning or error message.
24748 Note - ARM instructions are case-insensitive but the strings in the hash
24749 table are all stored in lower case, so we must first ensure that name is
24751 if (flag_warn_syms
&& arm_ops_hsh
)
24753 char * nbuf
= strdup (name
);
24756 for (p
= nbuf
; *p
; p
++)
24758 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
24760 static struct hash_control
* already_warned
= NULL
;
24762 if (already_warned
== NULL
)
24763 already_warned
= hash_new ();
24764 /* Only warn about the symbol once. To keep the code
24765 simple we let hash_insert do the lookup for us. */
24766 if (hash_insert (already_warned
, nbuf
, NULL
) == NULL
)
24767 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
24776 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
24777 Otherwise we have no need to default values of symbols. */
24780 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
24783 if (name
[0] == '_' && name
[1] == 'G'
24784 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
24788 if (symbol_find (name
))
24789 as_bad (_("GOT already in the symbol table"));
24791 GOT_symbol
= symbol_new (name
, undefined_section
,
24792 (valueT
) 0, & zero_address_frag
);
24802 /* Subroutine of md_apply_fix. Check to see if an immediate can be
24803 computed as two separate immediate values, added together. We
24804 already know that this value cannot be computed by just one ARM
24807 static unsigned int
24808 validate_immediate_twopart (unsigned int val
,
24809 unsigned int * highpart
)
24814 for (i
= 0; i
< 32; i
+= 2)
24815 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
24821 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
24823 else if (a
& 0xff0000)
24825 if (a
& 0xff000000)
24827 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
24831 gas_assert (a
& 0xff000000);
24832 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
24835 return (a
& 0xff) | (i
<< 7);
24842 validate_offset_imm (unsigned int val
, int hwse
)
24844 if ((hwse
&& val
> 255) || val
> 4095)
24849 /* Subroutine of md_apply_fix. Do those data_ops which can take a
24850 negative immediate constant by altering the instruction. A bit of
24855 by inverting the second operand, and
24858 by negating the second operand. */
24861 negate_data_op (unsigned long * instruction
,
24862 unsigned long value
)
24865 unsigned long negated
, inverted
;
24867 negated
= encode_arm_immediate (-value
);
24868 inverted
= encode_arm_immediate (~value
);
24870 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
24873 /* First negates. */
24874 case OPCODE_SUB
: /* ADD <-> SUB */
24875 new_inst
= OPCODE_ADD
;
24880 new_inst
= OPCODE_SUB
;
24884 case OPCODE_CMP
: /* CMP <-> CMN */
24885 new_inst
= OPCODE_CMN
;
24890 new_inst
= OPCODE_CMP
;
24894 /* Now Inverted ops. */
24895 case OPCODE_MOV
: /* MOV <-> MVN */
24896 new_inst
= OPCODE_MVN
;
24901 new_inst
= OPCODE_MOV
;
24905 case OPCODE_AND
: /* AND <-> BIC */
24906 new_inst
= OPCODE_BIC
;
24911 new_inst
= OPCODE_AND
;
24915 case OPCODE_ADC
: /* ADC <-> SBC */
24916 new_inst
= OPCODE_SBC
;
24921 new_inst
= OPCODE_ADC
;
24925 /* We cannot do anything. */
24930 if (value
== (unsigned) FAIL
)
24933 *instruction
&= OPCODE_MASK
;
24934 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
24938 /* Like negate_data_op, but for Thumb-2. */
24940 static unsigned int
24941 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
24945 unsigned int negated
, inverted
;
24947 negated
= encode_thumb32_immediate (-value
);
24948 inverted
= encode_thumb32_immediate (~value
);
24950 rd
= (*instruction
>> 8) & 0xf;
24951 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
24954 /* ADD <-> SUB. Includes CMP <-> CMN. */
24955 case T2_OPCODE_SUB
:
24956 new_inst
= T2_OPCODE_ADD
;
24960 case T2_OPCODE_ADD
:
24961 new_inst
= T2_OPCODE_SUB
;
24965 /* ORR <-> ORN. Includes MOV <-> MVN. */
24966 case T2_OPCODE_ORR
:
24967 new_inst
= T2_OPCODE_ORN
;
24971 case T2_OPCODE_ORN
:
24972 new_inst
= T2_OPCODE_ORR
;
24976 /* AND <-> BIC. TST has no inverted equivalent. */
24977 case T2_OPCODE_AND
:
24978 new_inst
= T2_OPCODE_BIC
;
24985 case T2_OPCODE_BIC
:
24986 new_inst
= T2_OPCODE_AND
;
24991 case T2_OPCODE_ADC
:
24992 new_inst
= T2_OPCODE_SBC
;
24996 case T2_OPCODE_SBC
:
24997 new_inst
= T2_OPCODE_ADC
;
25001 /* We cannot do anything. */
25006 if (value
== (unsigned int)FAIL
)
25009 *instruction
&= T2_OPCODE_MASK
;
25010 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
25014 /* Read a 32-bit thumb instruction from buf. */
25016 static unsigned long
25017 get_thumb32_insn (char * buf
)
25019 unsigned long insn
;
25020 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
25021 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25026 /* We usually want to set the low bit on the address of thumb function
25027 symbols. In particular .word foo - . should have the low bit set.
25028 Generic code tries to fold the difference of two symbols to
25029 a constant. Prevent this and force a relocation when the first symbols
25030 is a thumb function. */
25033 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
25035 if (op
== O_subtract
25036 && l
->X_op
== O_symbol
25037 && r
->X_op
== O_symbol
25038 && THUMB_IS_FUNC (l
->X_add_symbol
))
25040 l
->X_op
= O_subtract
;
25041 l
->X_op_symbol
= r
->X_add_symbol
;
25042 l
->X_add_number
-= r
->X_add_number
;
25046 /* Process as normal. */
25050 /* Encode Thumb2 unconditional branches and calls. The encoding
25051 for the 2 are identical for the immediate values. */
25054 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
25056 #define T2I1I2MASK ((1 << 13) | (1 << 11))
25059 addressT S
, I1
, I2
, lo
, hi
;
25061 S
= (value
>> 24) & 0x01;
25062 I1
= (value
>> 23) & 0x01;
25063 I2
= (value
>> 22) & 0x01;
25064 hi
= (value
>> 12) & 0x3ff;
25065 lo
= (value
>> 1) & 0x7ff;
25066 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25067 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25068 newval
|= (S
<< 10) | hi
;
25069 newval2
&= ~T2I1I2MASK
;
25070 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
25071 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25072 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25076 md_apply_fix (fixS
* fixP
,
25080 offsetT value
= * valP
;
25082 unsigned int newimm
;
25083 unsigned long temp
;
25085 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
25087 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
25089 /* Note whether this will delete the relocation. */
25091 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
25094 /* On a 64-bit host, silently truncate 'value' to 32 bits for
25095 consistency with the behaviour on 32-bit hosts. Remember value
25097 value
&= 0xffffffff;
25098 value
^= 0x80000000;
25099 value
-= 0x80000000;
25102 fixP
->fx_addnumber
= value
;
25104 /* Same treatment for fixP->fx_offset. */
25105 fixP
->fx_offset
&= 0xffffffff;
25106 fixP
->fx_offset
^= 0x80000000;
25107 fixP
->fx_offset
-= 0x80000000;
25109 switch (fixP
->fx_r_type
)
25111 case BFD_RELOC_NONE
:
25112 /* This will need to go in the object file. */
25116 case BFD_RELOC_ARM_IMMEDIATE
:
25117 /* We claim that this fixup has been processed here,
25118 even if in fact we generate an error because we do
25119 not have a reloc for it, so tc_gen_reloc will reject it. */
25122 if (fixP
->fx_addsy
)
25124 const char *msg
= 0;
25126 if (! S_IS_DEFINED (fixP
->fx_addsy
))
25127 msg
= _("undefined symbol %s used as an immediate value");
25128 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
25129 msg
= _("symbol %s is in a different section");
25130 else if (S_IS_WEAK (fixP
->fx_addsy
))
25131 msg
= _("symbol %s is weak and may be overridden later");
25135 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25136 msg
, S_GET_NAME (fixP
->fx_addsy
));
25141 temp
= md_chars_to_number (buf
, INSN_SIZE
);
25143 /* If the offset is negative, we should use encoding A2 for ADR. */
25144 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
25145 newimm
= negate_data_op (&temp
, value
);
25148 newimm
= encode_arm_immediate (value
);
25150 /* If the instruction will fail, see if we can fix things up by
25151 changing the opcode. */
25152 if (newimm
== (unsigned int) FAIL
)
25153 newimm
= negate_data_op (&temp
, value
);
25154 /* MOV accepts both ARM modified immediate (A1 encoding) and
25155 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
25156 When disassembling, MOV is preferred when there is no encoding
25158 if (newimm
== (unsigned int) FAIL
25159 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
25160 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
25161 && !((temp
>> SBIT_SHIFT
) & 0x1)
25162 && value
>= 0 && value
<= 0xffff)
25164 /* Clear bits[23:20] to change encoding from A1 to A2. */
25165 temp
&= 0xff0fffff;
25166 /* Encoding high 4bits imm. Code below will encode the remaining
25168 temp
|= (value
& 0x0000f000) << 4;
25169 newimm
= value
& 0x00000fff;
25173 if (newimm
== (unsigned int) FAIL
)
25175 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25176 _("invalid constant (%lx) after fixup"),
25177 (unsigned long) value
);
25181 newimm
|= (temp
& 0xfffff000);
25182 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
25185 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
25187 unsigned int highpart
= 0;
25188 unsigned int newinsn
= 0xe1a00000; /* nop. */
25190 if (fixP
->fx_addsy
)
25192 const char *msg
= 0;
25194 if (! S_IS_DEFINED (fixP
->fx_addsy
))
25195 msg
= _("undefined symbol %s used as an immediate value");
25196 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
25197 msg
= _("symbol %s is in a different section");
25198 else if (S_IS_WEAK (fixP
->fx_addsy
))
25199 msg
= _("symbol %s is weak and may be overridden later");
25203 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25204 msg
, S_GET_NAME (fixP
->fx_addsy
));
25209 newimm
= encode_arm_immediate (value
);
25210 temp
= md_chars_to_number (buf
, INSN_SIZE
);
25212 /* If the instruction will fail, see if we can fix things up by
25213 changing the opcode. */
25214 if (newimm
== (unsigned int) FAIL
25215 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
25217 /* No ? OK - try using two ADD instructions to generate
25219 newimm
= validate_immediate_twopart (value
, & highpart
);
25221 /* Yes - then make sure that the second instruction is
25223 if (newimm
!= (unsigned int) FAIL
)
25225 /* Still No ? Try using a negated value. */
25226 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
25227 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
25228 /* Otherwise - give up. */
25231 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25232 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
25237 /* Replace the first operand in the 2nd instruction (which
25238 is the PC) with the destination register. We have
25239 already added in the PC in the first instruction and we
25240 do not want to do it again. */
25241 newinsn
&= ~ 0xf0000;
25242 newinsn
|= ((newinsn
& 0x0f000) << 4);
25245 newimm
|= (temp
& 0xfffff000);
25246 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
25248 highpart
|= (newinsn
& 0xfffff000);
25249 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
25253 case BFD_RELOC_ARM_OFFSET_IMM
:
25254 if (!fixP
->fx_done
&& seg
->use_rela_p
)
25256 /* Fall through. */
25258 case BFD_RELOC_ARM_LITERAL
:
25264 if (validate_offset_imm (value
, 0) == FAIL
)
25266 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
25267 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25268 _("invalid literal constant: pool needs to be closer"));
25270 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25271 _("bad immediate value for offset (%ld)"),
25276 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25278 newval
&= 0xfffff000;
25281 newval
&= 0xff7ff000;
25282 newval
|= value
| (sign
? INDEX_UP
: 0);
25284 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25287 case BFD_RELOC_ARM_OFFSET_IMM8
:
25288 case BFD_RELOC_ARM_HWLITERAL
:
25294 if (validate_offset_imm (value
, 1) == FAIL
)
25296 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
25297 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25298 _("invalid literal constant: pool needs to be closer"));
25300 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25301 _("bad immediate value for 8-bit offset (%ld)"),
25306 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25308 newval
&= 0xfffff0f0;
25311 newval
&= 0xff7ff0f0;
25312 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
25314 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25317 case BFD_RELOC_ARM_T32_OFFSET_U8
:
25318 if (value
< 0 || value
> 1020 || value
% 4 != 0)
25319 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25320 _("bad immediate value for offset (%ld)"), (long) value
);
25323 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
25325 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
25328 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
25329 /* This is a complicated relocation used for all varieties of Thumb32
25330 load/store instruction with immediate offset:
25332 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
25333 *4, optional writeback(W)
25334 (doubleword load/store)
25336 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
25337 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
25338 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
25339 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
25340 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
25342 Uppercase letters indicate bits that are already encoded at
25343 this point. Lowercase letters are our problem. For the
25344 second block of instructions, the secondary opcode nybble
25345 (bits 8..11) is present, and bit 23 is zero, even if this is
25346 a PC-relative operation. */
25347 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25349 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
25351 if ((newval
& 0xf0000000) == 0xe0000000)
25353 /* Doubleword load/store: 8-bit offset, scaled by 4. */
25355 newval
|= (1 << 23);
25358 if (value
% 4 != 0)
25360 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25361 _("offset not a multiple of 4"));
25367 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25368 _("offset out of range"));
25373 else if ((newval
& 0x000f0000) == 0x000f0000)
25375 /* PC-relative, 12-bit offset. */
25377 newval
|= (1 << 23);
25382 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25383 _("offset out of range"));
25388 else if ((newval
& 0x00000100) == 0x00000100)
25390 /* Writeback: 8-bit, +/- offset. */
25392 newval
|= (1 << 9);
25397 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25398 _("offset out of range"));
25403 else if ((newval
& 0x00000f00) == 0x00000e00)
25405 /* T-instruction: positive 8-bit offset. */
25406 if (value
< 0 || value
> 0xff)
25408 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25409 _("offset out of range"));
25417 /* Positive 12-bit or negative 8-bit offset. */
25421 newval
|= (1 << 23);
25431 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25432 _("offset out of range"));
25439 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
25440 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
25443 case BFD_RELOC_ARM_SHIFT_IMM
:
25444 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25445 if (((unsigned long) value
) > 32
25447 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
25449 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25450 _("shift expression is too large"));
25455 /* Shifts of zero must be done as lsl. */
25457 else if (value
== 32)
25459 newval
&= 0xfffff07f;
25460 newval
|= (value
& 0x1f) << 7;
25461 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25464 case BFD_RELOC_ARM_T32_IMMEDIATE
:
25465 case BFD_RELOC_ARM_T32_ADD_IMM
:
25466 case BFD_RELOC_ARM_T32_IMM12
:
25467 case BFD_RELOC_ARM_T32_ADD_PC12
:
25468 /* We claim that this fixup has been processed here,
25469 even if in fact we generate an error because we do
25470 not have a reloc for it, so tc_gen_reloc will reject it. */
25474 && ! S_IS_DEFINED (fixP
->fx_addsy
))
25476 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25477 _("undefined symbol %s used as an immediate value"),
25478 S_GET_NAME (fixP
->fx_addsy
));
25482 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25484 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
25487 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
25488 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
25489 Thumb2 modified immediate encoding (T2). */
25490 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
25491 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
25493 newimm
= encode_thumb32_immediate (value
);
25494 if (newimm
== (unsigned int) FAIL
)
25495 newimm
= thumb32_negate_data_op (&newval
, value
);
25497 if (newimm
== (unsigned int) FAIL
)
25499 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
25501 /* Turn add/sum into addw/subw. */
25502 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
25503 newval
= (newval
& 0xfeffffff) | 0x02000000;
25504 /* No flat 12-bit imm encoding for addsw/subsw. */
25505 if ((newval
& 0x00100000) == 0)
25507 /* 12 bit immediate for addw/subw. */
25511 newval
^= 0x00a00000;
25514 newimm
= (unsigned int) FAIL
;
25521 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
25522 UINT16 (T3 encoding), MOVW only accepts UINT16. When
25523 disassembling, MOV is preferred when there is no encoding
25525 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
25526 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
25527 but with the Rn field [19:16] set to 1111. */
25528 && (((newval
>> 16) & 0xf) == 0xf)
25529 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
25530 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
25531 && value
>= 0 && value
<= 0xffff)
25533 /* Toggle bit[25] to change encoding from T2 to T3. */
25535 /* Clear bits[19:16]. */
25536 newval
&= 0xfff0ffff;
25537 /* Encoding high 4bits imm. Code below will encode the
25538 remaining low 12bits. */
25539 newval
|= (value
& 0x0000f000) << 4;
25540 newimm
= value
& 0x00000fff;
25545 if (newimm
== (unsigned int)FAIL
)
25547 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25548 _("invalid constant (%lx) after fixup"),
25549 (unsigned long) value
);
25553 newval
|= (newimm
& 0x800) << 15;
25554 newval
|= (newimm
& 0x700) << 4;
25555 newval
|= (newimm
& 0x0ff);
25557 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
25558 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
25561 case BFD_RELOC_ARM_SMC
:
25562 if (((unsigned long) value
) > 0xffff)
25563 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25564 _("invalid smc expression"));
25565 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25566 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
25567 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25570 case BFD_RELOC_ARM_HVC
:
25571 if (((unsigned long) value
) > 0xffff)
25572 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25573 _("invalid hvc expression"));
25574 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25575 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
25576 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25579 case BFD_RELOC_ARM_SWI
:
25580 if (fixP
->tc_fix_data
!= 0)
25582 if (((unsigned long) value
) > 0xff)
25583 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25584 _("invalid swi expression"));
25585 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25587 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25591 if (((unsigned long) value
) > 0x00ffffff)
25592 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25593 _("invalid swi expression"));
25594 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25596 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25600 case BFD_RELOC_ARM_MULTI
:
25601 if (((unsigned long) value
) > 0xffff)
25602 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25603 _("invalid expression in load/store multiple"));
25604 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
25605 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25609 case BFD_RELOC_ARM_PCREL_CALL
:
25611 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25613 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25614 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25615 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25616 /* Flip the bl to blx. This is a simple flip
25617 bit here because we generate PCREL_CALL for
25618 unconditional bls. */
25620 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25621 newval
= newval
| 0x10000000;
25622 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25628 goto arm_branch_common
;
25630 case BFD_RELOC_ARM_PCREL_JUMP
:
25631 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25633 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25634 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25635 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25637 /* This would map to a bl<cond>, b<cond>,
25638 b<always> to a Thumb function. We
25639 need to force a relocation for this particular
25641 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25644 /* Fall through. */
25646 case BFD_RELOC_ARM_PLT32
:
25648 case BFD_RELOC_ARM_PCREL_BRANCH
:
25650 goto arm_branch_common
;
25652 case BFD_RELOC_ARM_PCREL_BLX
:
25655 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
25657 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25658 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25659 && ARM_IS_FUNC (fixP
->fx_addsy
))
25661 /* Flip the blx to a bl and warn. */
25662 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25663 newval
= 0xeb000000;
25664 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25665 _("blx to '%s' an ARM ISA state function changed to bl"),
25667 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25673 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25674 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
25678 /* We are going to store value (shifted right by two) in the
25679 instruction, in a 24 bit, signed field. Bits 26 through 32 either
25680 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
25683 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25684 _("misaligned branch destination"));
25685 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
25686 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
25687 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25689 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25691 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25692 newval
|= (value
>> 2) & 0x00ffffff;
25693 /* Set the H bit on BLX instructions. */
25697 newval
|= 0x01000000;
25699 newval
&= ~0x01000000;
25701 md_number_to_chars (buf
, newval
, INSN_SIZE
);
25705 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
25706 /* CBZ can only branch forward. */
25708 /* Attempts to use CBZ to branch to the next instruction
25709 (which, strictly speaking, are prohibited) will be turned into
25712 FIXME: It may be better to remove the instruction completely and
25713 perform relaxation. */
25716 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25717 newval
= 0xbf00; /* NOP encoding T1 */
25718 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25723 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25725 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25727 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25728 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
25729 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25734 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
25735 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
25736 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25738 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25740 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25741 newval
|= (value
& 0x1ff) >> 1;
25742 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25746 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
25747 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
25748 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25750 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25752 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25753 newval
|= (value
& 0xfff) >> 1;
25754 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25758 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25760 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25761 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25762 && ARM_IS_FUNC (fixP
->fx_addsy
)
25763 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25765 /* Force a relocation for a branch 20 bits wide. */
25768 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
25769 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25770 _("conditional branch out of range"));
25772 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25775 addressT S
, J1
, J2
, lo
, hi
;
25777 S
= (value
& 0x00100000) >> 20;
25778 J2
= (value
& 0x00080000) >> 19;
25779 J1
= (value
& 0x00040000) >> 18;
25780 hi
= (value
& 0x0003f000) >> 12;
25781 lo
= (value
& 0x00000ffe) >> 1;
25783 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25784 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25785 newval
|= (S
<< 10) | hi
;
25786 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
25787 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25788 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25792 case BFD_RELOC_THUMB_PCREL_BLX
:
25793 /* If there is a blx from a thumb state function to
25794 another thumb function flip this to a bl and warn
25798 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25799 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25800 && THUMB_IS_FUNC (fixP
->fx_addsy
))
25802 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
25803 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
25804 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
25806 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25807 newval
= newval
| 0x1000;
25808 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25809 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25814 goto thumb_bl_common
;
25816 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25817 /* A bl from Thumb state ISA to an internal ARM state function
25818 is converted to a blx. */
25820 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25821 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25822 && ARM_IS_FUNC (fixP
->fx_addsy
)
25823 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
25825 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25826 newval
= newval
& ~0x1000;
25827 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
25828 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
25834 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25835 /* For a BLX instruction, make sure that the relocation is rounded up
25836 to a word boundary. This follows the semantics of the instruction
25837 which specifies that bit 1 of the target address will come from bit
25838 1 of the base address. */
25839 value
= (value
+ 3) & ~ 3;
25842 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
25843 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
25844 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25847 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
25849 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
25850 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25851 else if ((value
& ~0x1ffffff)
25852 && ((value
& ~0x1ffffff) != ~0x1ffffff))
25853 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25854 _("Thumb2 branch out of range"));
25857 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25858 encode_thumb2_b_bl_offset (buf
, value
);
25862 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25863 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
25864 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
25866 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25867 encode_thumb2_b_bl_offset (buf
, value
);
25872 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25877 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25878 md_number_to_chars (buf
, value
, 2);
25882 case BFD_RELOC_ARM_TLS_CALL
:
25883 case BFD_RELOC_ARM_THM_TLS_CALL
:
25884 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25885 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25886 case BFD_RELOC_ARM_TLS_GOTDESC
:
25887 case BFD_RELOC_ARM_TLS_GD32
:
25888 case BFD_RELOC_ARM_TLS_LE32
:
25889 case BFD_RELOC_ARM_TLS_IE32
:
25890 case BFD_RELOC_ARM_TLS_LDM32
:
25891 case BFD_RELOC_ARM_TLS_LDO32
:
25892 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25895 /* Same handling as above, but with the arm_fdpic guard. */
25896 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25897 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25898 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25901 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
25905 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25906 _("Relocation supported only in FDPIC mode"));
25910 case BFD_RELOC_ARM_GOT32
:
25911 case BFD_RELOC_ARM_GOTOFF
:
25914 case BFD_RELOC_ARM_GOT_PREL
:
25915 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25916 md_number_to_chars (buf
, value
, 4);
25919 case BFD_RELOC_ARM_TARGET2
:
25920 /* TARGET2 is not partial-inplace, so we need to write the
25921 addend here for REL targets, because it won't be written out
25922 during reloc processing later. */
25923 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25924 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
25927 /* Relocations for FDPIC. */
25928 case BFD_RELOC_ARM_GOTFUNCDESC
:
25929 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25930 case BFD_RELOC_ARM_FUNCDESC
:
25933 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25934 md_number_to_chars (buf
, 0, 4);
25938 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25939 _("Relocation supported only in FDPIC mode"));
25944 case BFD_RELOC_RVA
:
25946 case BFD_RELOC_ARM_TARGET1
:
25947 case BFD_RELOC_ARM_ROSEGREL32
:
25948 case BFD_RELOC_ARM_SBREL32
:
25949 case BFD_RELOC_32_PCREL
:
25951 case BFD_RELOC_32_SECREL
:
25953 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25955 /* For WinCE we only do this for pcrel fixups. */
25956 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
25958 md_number_to_chars (buf
, value
, 4);
25962 case BFD_RELOC_ARM_PREL31
:
25963 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25965 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
25966 if ((value
^ (value
>> 1)) & 0x40000000)
25968 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25969 _("rel31 relocation overflow"));
25971 newval
|= value
& 0x7fffffff;
25972 md_number_to_chars (buf
, newval
, 4);
25977 case BFD_RELOC_ARM_CP_OFF_IMM
:
25978 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
25979 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
25980 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
25981 newval
= md_chars_to_number (buf
, INSN_SIZE
);
25983 newval
= get_thumb32_insn (buf
);
25984 if ((newval
& 0x0f200f00) == 0x0d000900)
25986 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
25987 has permitted values that are multiples of 2, in the range 0
25989 if (value
< -510 || value
> 510 || (value
& 1))
25990 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25991 _("co-processor offset out of range"));
25993 else if ((newval
& 0xfe001f80) == 0xec000f80)
25995 if (value
< -511 || value
> 512 || (value
& 3))
25996 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25997 _("co-processor offset out of range"));
25999 else if (value
< -1023 || value
> 1023 || (value
& 3))
26000 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26001 _("co-processor offset out of range"));
26006 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
26007 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
26008 newval
= md_chars_to_number (buf
, INSN_SIZE
);
26010 newval
= get_thumb32_insn (buf
);
26013 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
26014 newval
&= 0xffffff80;
26016 newval
&= 0xffffff00;
26020 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
26021 newval
&= 0xff7fff80;
26023 newval
&= 0xff7fff00;
26024 if ((newval
& 0x0f200f00) == 0x0d000900)
26026 /* This is a fp16 vstr/vldr.
26028 It requires the immediate offset in the instruction is shifted
26029 left by 1 to be a half-word offset.
26031 Here, left shift by 1 first, and later right shift by 2
26032 should get the right offset. */
26035 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
26037 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
26038 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
26039 md_number_to_chars (buf
, newval
, INSN_SIZE
);
26041 put_thumb32_insn (buf
, newval
);
26044 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
26045 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
26046 if (value
< -255 || value
> 255)
26047 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26048 _("co-processor offset out of range"));
26050 goto cp_off_common
;
26052 case BFD_RELOC_ARM_THUMB_OFFSET
:
26053 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26054 /* Exactly what ranges, and where the offset is inserted depends
26055 on the type of instruction, we can establish this from the
26057 switch (newval
>> 12)
26059 case 4: /* PC load. */
26060 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
26061 forced to zero for these loads; md_pcrel_from has already
26062 compensated for this. */
26064 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26065 _("invalid offset, target not word aligned (0x%08lX)"),
26066 (((unsigned long) fixP
->fx_frag
->fr_address
26067 + (unsigned long) fixP
->fx_where
) & ~3)
26068 + (unsigned long) value
);
26070 if (value
& ~0x3fc)
26071 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26072 _("invalid offset, value too big (0x%08lX)"),
26075 newval
|= value
>> 2;
26078 case 9: /* SP load/store. */
26079 if (value
& ~0x3fc)
26080 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26081 _("invalid offset, value too big (0x%08lX)"),
26083 newval
|= value
>> 2;
26086 case 6: /* Word load/store. */
26088 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26089 _("invalid offset, value too big (0x%08lX)"),
26091 newval
|= value
<< 4; /* 6 - 2. */
26094 case 7: /* Byte load/store. */
26096 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26097 _("invalid offset, value too big (0x%08lX)"),
26099 newval
|= value
<< 6;
26102 case 8: /* Halfword load/store. */
26104 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26105 _("invalid offset, value too big (0x%08lX)"),
26107 newval
|= value
<< 5; /* 6 - 1. */
26111 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26112 "Unable to process relocation for thumb opcode: %lx",
26113 (unsigned long) newval
);
26116 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26119 case BFD_RELOC_ARM_THUMB_ADD
:
26120 /* This is a complicated relocation, since we use it for all of
26121 the following immediate relocations:
26125 9bit ADD/SUB SP word-aligned
26126 10bit ADD PC/SP word-aligned
26128 The type of instruction being processed is encoded in the
26135 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26137 int rd
= (newval
>> 4) & 0xf;
26138 int rs
= newval
& 0xf;
26139 int subtract
= !!(newval
& 0x8000);
26141 /* Check for HI regs, only very restricted cases allowed:
26142 Adjusting SP, and using PC or SP to get an address. */
26143 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
26144 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
26145 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26146 _("invalid Hi register with immediate"));
26148 /* If value is negative, choose the opposite instruction. */
26152 subtract
= !subtract
;
26154 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26155 _("immediate value out of range"));
26160 if (value
& ~0x1fc)
26161 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26162 _("invalid immediate for stack address calculation"));
26163 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
26164 newval
|= value
>> 2;
26166 else if (rs
== REG_PC
|| rs
== REG_SP
)
26168 /* PR gas/18541. If the addition is for a defined symbol
26169 within range of an ADR instruction then accept it. */
26172 && fixP
->fx_addsy
!= NULL
)
26176 if (! S_IS_DEFINED (fixP
->fx_addsy
)
26177 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
26178 || S_IS_WEAK (fixP
->fx_addsy
))
26180 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26181 _("address calculation needs a strongly defined nearby symbol"));
26185 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
26187 /* Round up to the next 4-byte boundary. */
26192 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
26196 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26197 _("symbol too far away"));
26207 if (subtract
|| value
& ~0x3fc)
26208 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26209 _("invalid immediate for address calculation (value = 0x%08lX)"),
26210 (unsigned long) (subtract
? - value
: value
));
26211 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
26213 newval
|= value
>> 2;
26218 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26219 _("immediate value out of range"));
26220 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
26221 newval
|= (rd
<< 8) | value
;
26226 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26227 _("immediate value out of range"));
26228 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
26229 newval
|= rd
| (rs
<< 3) | (value
<< 6);
26232 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26235 case BFD_RELOC_ARM_THUMB_IMM
:
26236 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26237 if (value
< 0 || value
> 255)
26238 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26239 _("invalid immediate: %ld is out of range"),
26242 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26245 case BFD_RELOC_ARM_THUMB_SHIFT
:
26246 /* 5bit shift value (0..32). LSL cannot take 32. */
26247 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
26248 temp
= newval
& 0xf800;
26249 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
26250 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26251 _("invalid shift value: %ld"), (long) value
);
26252 /* Shifts of zero must be encoded as LSL. */
26254 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
26255 /* Shifts of 32 are encoded as zero. */
26256 else if (value
== 32)
26258 newval
|= value
<< 6;
26259 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26262 case BFD_RELOC_VTABLE_INHERIT
:
26263 case BFD_RELOC_VTABLE_ENTRY
:
26267 case BFD_RELOC_ARM_MOVW
:
26268 case BFD_RELOC_ARM_MOVT
:
26269 case BFD_RELOC_ARM_THUMB_MOVW
:
26270 case BFD_RELOC_ARM_THUMB_MOVT
:
26271 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26273 /* REL format relocations are limited to a 16-bit addend. */
26274 if (!fixP
->fx_done
)
26276 if (value
< -0x8000 || value
> 0x7fff)
26277 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26278 _("offset out of range"));
26280 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
26281 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
26286 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
26287 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
26289 newval
= get_thumb32_insn (buf
);
26290 newval
&= 0xfbf08f00;
26291 newval
|= (value
& 0xf000) << 4;
26292 newval
|= (value
& 0x0800) << 15;
26293 newval
|= (value
& 0x0700) << 4;
26294 newval
|= (value
& 0x00ff);
26295 put_thumb32_insn (buf
, newval
);
26299 newval
= md_chars_to_number (buf
, 4);
26300 newval
&= 0xfff0f000;
26301 newval
|= value
& 0x0fff;
26302 newval
|= (value
& 0xf000) << 4;
26303 md_number_to_chars (buf
, newval
, 4);
26308 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
26309 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
26310 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
26311 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
26312 gas_assert (!fixP
->fx_done
);
26315 bfd_boolean is_mov
;
26316 bfd_vma encoded_addend
= value
;
26318 /* Check that addend can be encoded in instruction. */
26319 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
26320 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26321 _("the offset 0x%08lX is not representable"),
26322 (unsigned long) encoded_addend
);
26324 /* Extract the instruction. */
26325 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
26326 is_mov
= (insn
& 0xf800) == 0x2000;
26331 if (!seg
->use_rela_p
)
26332 insn
|= encoded_addend
;
26338 /* Extract the instruction. */
26339 /* Encoding is the following
26344 /* The following conditions must be true :
26349 rd
= (insn
>> 4) & 0xf;
26351 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
26352 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26353 _("Unable to process relocation for thumb opcode: %lx"),
26354 (unsigned long) insn
);
26356 /* Encode as ADD immediate8 thumb 1 code. */
26357 insn
= 0x3000 | (rd
<< 8);
26359 /* Place the encoded addend into the first 8 bits of the
26361 if (!seg
->use_rela_p
)
26362 insn
|= encoded_addend
;
26365 /* Update the instruction. */
26366 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
26370 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
26371 case BFD_RELOC_ARM_ALU_PC_G0
:
26372 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
26373 case BFD_RELOC_ARM_ALU_PC_G1
:
26374 case BFD_RELOC_ARM_ALU_PC_G2
:
26375 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
26376 case BFD_RELOC_ARM_ALU_SB_G0
:
26377 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
26378 case BFD_RELOC_ARM_ALU_SB_G1
:
26379 case BFD_RELOC_ARM_ALU_SB_G2
:
26380 gas_assert (!fixP
->fx_done
);
26381 if (!seg
->use_rela_p
)
26384 bfd_vma encoded_addend
;
26385 bfd_vma addend_abs
= llabs (value
);
26387 /* Check that the absolute value of the addend can be
26388 expressed as an 8-bit constant plus a rotation. */
26389 encoded_addend
= encode_arm_immediate (addend_abs
);
26390 if (encoded_addend
== (unsigned int) FAIL
)
26391 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26392 _("the offset 0x%08lX is not representable"),
26393 (unsigned long) addend_abs
);
26395 /* Extract the instruction. */
26396 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26398 /* If the addend is positive, use an ADD instruction.
26399 Otherwise use a SUB. Take care not to destroy the S bit. */
26400 insn
&= 0xff1fffff;
26406 /* Place the encoded addend into the first 12 bits of the
26408 insn
&= 0xfffff000;
26409 insn
|= encoded_addend
;
26411 /* Update the instruction. */
26412 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26416 case BFD_RELOC_ARM_LDR_PC_G0
:
26417 case BFD_RELOC_ARM_LDR_PC_G1
:
26418 case BFD_RELOC_ARM_LDR_PC_G2
:
26419 case BFD_RELOC_ARM_LDR_SB_G0
:
26420 case BFD_RELOC_ARM_LDR_SB_G1
:
26421 case BFD_RELOC_ARM_LDR_SB_G2
:
26422 gas_assert (!fixP
->fx_done
);
26423 if (!seg
->use_rela_p
)
26426 bfd_vma addend_abs
= llabs (value
);
26428 /* Check that the absolute value of the addend can be
26429 encoded in 12 bits. */
26430 if (addend_abs
>= 0x1000)
26431 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26432 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
26433 (unsigned long) addend_abs
);
26435 /* Extract the instruction. */
26436 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26438 /* If the addend is negative, clear bit 23 of the instruction.
26439 Otherwise set it. */
26441 insn
&= ~(1 << 23);
26445 /* Place the absolute value of the addend into the first 12 bits
26446 of the instruction. */
26447 insn
&= 0xfffff000;
26448 insn
|= addend_abs
;
26450 /* Update the instruction. */
26451 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26455 case BFD_RELOC_ARM_LDRS_PC_G0
:
26456 case BFD_RELOC_ARM_LDRS_PC_G1
:
26457 case BFD_RELOC_ARM_LDRS_PC_G2
:
26458 case BFD_RELOC_ARM_LDRS_SB_G0
:
26459 case BFD_RELOC_ARM_LDRS_SB_G1
:
26460 case BFD_RELOC_ARM_LDRS_SB_G2
:
26461 gas_assert (!fixP
->fx_done
);
26462 if (!seg
->use_rela_p
)
26465 bfd_vma addend_abs
= llabs (value
);
26467 /* Check that the absolute value of the addend can be
26468 encoded in 8 bits. */
26469 if (addend_abs
>= 0x100)
26470 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26471 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
26472 (unsigned long) addend_abs
);
26474 /* Extract the instruction. */
26475 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26477 /* If the addend is negative, clear bit 23 of the instruction.
26478 Otherwise set it. */
26480 insn
&= ~(1 << 23);
26484 /* Place the first four bits of the absolute value of the addend
26485 into the first 4 bits of the instruction, and the remaining
26486 four into bits 8 .. 11. */
26487 insn
&= 0xfffff0f0;
26488 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
26490 /* Update the instruction. */
26491 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26495 case BFD_RELOC_ARM_LDC_PC_G0
:
26496 case BFD_RELOC_ARM_LDC_PC_G1
:
26497 case BFD_RELOC_ARM_LDC_PC_G2
:
26498 case BFD_RELOC_ARM_LDC_SB_G0
:
26499 case BFD_RELOC_ARM_LDC_SB_G1
:
26500 case BFD_RELOC_ARM_LDC_SB_G2
:
26501 gas_assert (!fixP
->fx_done
);
26502 if (!seg
->use_rela_p
)
26505 bfd_vma addend_abs
= llabs (value
);
26507 /* Check that the absolute value of the addend is a multiple of
26508 four and, when divided by four, fits in 8 bits. */
26509 if (addend_abs
& 0x3)
26510 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26511 _("bad offset 0x%08lX (must be word-aligned)"),
26512 (unsigned long) addend_abs
);
26514 if ((addend_abs
>> 2) > 0xff)
26515 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26516 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
26517 (unsigned long) addend_abs
);
26519 /* Extract the instruction. */
26520 insn
= md_chars_to_number (buf
, INSN_SIZE
);
26522 /* If the addend is negative, clear bit 23 of the instruction.
26523 Otherwise set it. */
26525 insn
&= ~(1 << 23);
26529 /* Place the addend (divided by four) into the first eight
26530 bits of the instruction. */
26531 insn
&= 0xfffffff0;
26532 insn
|= addend_abs
>> 2;
26534 /* Update the instruction. */
26535 md_number_to_chars (buf
, insn
, INSN_SIZE
);
26539 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26541 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26542 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26543 && ARM_IS_FUNC (fixP
->fx_addsy
)
26544 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26546 /* Force a relocation for a branch 5 bits wide. */
26549 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
26550 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26553 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26555 addressT boff
= value
>> 1;
26557 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26558 newval
|= (boff
<< 7);
26559 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26563 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26565 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26566 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26567 && ARM_IS_FUNC (fixP
->fx_addsy
)
26568 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26572 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
26573 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26574 _("branch out of range"));
26576 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26578 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26580 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
26581 addressT diff
= value
- boff
;
26585 newval
|= 1 << 1; /* T bit. */
26587 else if (diff
!= 2)
26589 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26590 _("out of range label-relative fixup value"));
26592 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26596 case BFD_RELOC_ARM_THUMB_BF17
:
26598 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26599 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26600 && ARM_IS_FUNC (fixP
->fx_addsy
)
26601 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26603 /* Force a relocation for a branch 17 bits wide. */
26607 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
26608 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26611 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26614 addressT immA
, immB
, immC
;
26616 immA
= (value
& 0x0001f000) >> 12;
26617 immB
= (value
& 0x00000ffc) >> 2;
26618 immC
= (value
& 0x00000002) >> 1;
26620 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26621 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26623 newval2
|= (immC
<< 11) | (immB
<< 1);
26624 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26625 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26629 case BFD_RELOC_ARM_THUMB_BF19
:
26631 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26632 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26633 && ARM_IS_FUNC (fixP
->fx_addsy
)
26634 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26636 /* Force a relocation for a branch 19 bits wide. */
26640 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
26641 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26644 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26647 addressT immA
, immB
, immC
;
26649 immA
= (value
& 0x0007f000) >> 12;
26650 immB
= (value
& 0x00000ffc) >> 2;
26651 immC
= (value
& 0x00000002) >> 1;
26653 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26654 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26656 newval2
|= (immC
<< 11) | (immB
<< 1);
26657 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26658 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26662 case BFD_RELOC_ARM_THUMB_BF13
:
26664 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26665 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26666 && ARM_IS_FUNC (fixP
->fx_addsy
)
26667 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26669 /* Force a relocation for a branch 13 bits wide. */
26673 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
26674 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26677 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26680 addressT immA
, immB
, immC
;
26682 immA
= (value
& 0x00001000) >> 12;
26683 immB
= (value
& 0x00000ffc) >> 2;
26684 immC
= (value
& 0x00000002) >> 1;
26686 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
26687 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26689 newval2
|= (immC
<< 11) | (immB
<< 1);
26690 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
26691 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
26695 case BFD_RELOC_ARM_THUMB_LOOP12
:
26697 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
26698 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
26699 && ARM_IS_FUNC (fixP
->fx_addsy
)
26700 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
26702 /* Force a relocation for a branch 12 bits wide. */
26706 bfd_vma insn
= get_thumb32_insn (buf
);
26707 /* le lr, <label> or le <label> */
26708 if (((insn
& 0xffffffff) == 0xf00fc001)
26709 || ((insn
& 0xffffffff) == 0xf02fc001))
26712 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
26713 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26715 if (fixP
->fx_done
|| !seg
->use_rela_p
)
26717 addressT imml
, immh
;
26719 immh
= (value
& 0x00000ffc) >> 2;
26720 imml
= (value
& 0x00000002) >> 1;
26722 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
26723 newval
|= (imml
<< 11) | (immh
<< 1);
26724 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
26728 case BFD_RELOC_ARM_V4BX
:
26729 /* This will need to go in the object file. */
26733 case BFD_RELOC_UNUSED
:
26735 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
26736 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
26740 /* Translate internal representation of relocation info to BFD target
26744 tc_gen_reloc (asection
*section
, fixS
*fixp
)
26747 bfd_reloc_code_real_type code
;
26749 reloc
= XNEW (arelent
);
26751 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
26752 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
26753 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
26755 if (fixp
->fx_pcrel
)
26757 if (section
->use_rela_p
)
26758 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
26760 fixp
->fx_offset
= reloc
->address
;
26762 reloc
->addend
= fixp
->fx_offset
;
26764 switch (fixp
->fx_r_type
)
26767 if (fixp
->fx_pcrel
)
26769 code
= BFD_RELOC_8_PCREL
;
26772 /* Fall through. */
26775 if (fixp
->fx_pcrel
)
26777 code
= BFD_RELOC_16_PCREL
;
26780 /* Fall through. */
26783 if (fixp
->fx_pcrel
)
26785 code
= BFD_RELOC_32_PCREL
;
26788 /* Fall through. */
26790 case BFD_RELOC_ARM_MOVW
:
26791 if (fixp
->fx_pcrel
)
26793 code
= BFD_RELOC_ARM_MOVW_PCREL
;
26796 /* Fall through. */
26798 case BFD_RELOC_ARM_MOVT
:
26799 if (fixp
->fx_pcrel
)
26801 code
= BFD_RELOC_ARM_MOVT_PCREL
;
26804 /* Fall through. */
26806 case BFD_RELOC_ARM_THUMB_MOVW
:
26807 if (fixp
->fx_pcrel
)
26809 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
26812 /* Fall through. */
26814 case BFD_RELOC_ARM_THUMB_MOVT
:
26815 if (fixp
->fx_pcrel
)
26817 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
26820 /* Fall through. */
26822 case BFD_RELOC_NONE
:
26823 case BFD_RELOC_ARM_PCREL_BRANCH
:
26824 case BFD_RELOC_ARM_PCREL_BLX
:
26825 case BFD_RELOC_RVA
:
26826 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
26827 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
26828 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
26829 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
26830 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
26831 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
26832 case BFD_RELOC_VTABLE_ENTRY
:
26833 case BFD_RELOC_VTABLE_INHERIT
:
26835 case BFD_RELOC_32_SECREL
:
26837 code
= fixp
->fx_r_type
;
26840 case BFD_RELOC_THUMB_PCREL_BLX
:
26842 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
26843 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
26846 code
= BFD_RELOC_THUMB_PCREL_BLX
;
26849 case BFD_RELOC_ARM_LITERAL
:
26850 case BFD_RELOC_ARM_HWLITERAL
:
26851 /* If this is called then the a literal has
26852 been referenced across a section boundary. */
26853 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26854 _("literal referenced across section boundary"));
26858 case BFD_RELOC_ARM_TLS_CALL
:
26859 case BFD_RELOC_ARM_THM_TLS_CALL
:
26860 case BFD_RELOC_ARM_TLS_DESCSEQ
:
26861 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
26862 case BFD_RELOC_ARM_GOT32
:
26863 case BFD_RELOC_ARM_GOTOFF
:
26864 case BFD_RELOC_ARM_GOT_PREL
:
26865 case BFD_RELOC_ARM_PLT32
:
26866 case BFD_RELOC_ARM_TARGET1
:
26867 case BFD_RELOC_ARM_ROSEGREL32
:
26868 case BFD_RELOC_ARM_SBREL32
:
26869 case BFD_RELOC_ARM_PREL31
:
26870 case BFD_RELOC_ARM_TARGET2
:
26871 case BFD_RELOC_ARM_TLS_LDO32
:
26872 case BFD_RELOC_ARM_PCREL_CALL
:
26873 case BFD_RELOC_ARM_PCREL_JUMP
:
26874 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
26875 case BFD_RELOC_ARM_ALU_PC_G0
:
26876 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
26877 case BFD_RELOC_ARM_ALU_PC_G1
:
26878 case BFD_RELOC_ARM_ALU_PC_G2
:
26879 case BFD_RELOC_ARM_LDR_PC_G0
:
26880 case BFD_RELOC_ARM_LDR_PC_G1
:
26881 case BFD_RELOC_ARM_LDR_PC_G2
:
26882 case BFD_RELOC_ARM_LDRS_PC_G0
:
26883 case BFD_RELOC_ARM_LDRS_PC_G1
:
26884 case BFD_RELOC_ARM_LDRS_PC_G2
:
26885 case BFD_RELOC_ARM_LDC_PC_G0
:
26886 case BFD_RELOC_ARM_LDC_PC_G1
:
26887 case BFD_RELOC_ARM_LDC_PC_G2
:
26888 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
26889 case BFD_RELOC_ARM_ALU_SB_G0
:
26890 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
26891 case BFD_RELOC_ARM_ALU_SB_G1
:
26892 case BFD_RELOC_ARM_ALU_SB_G2
:
26893 case BFD_RELOC_ARM_LDR_SB_G0
:
26894 case BFD_RELOC_ARM_LDR_SB_G1
:
26895 case BFD_RELOC_ARM_LDR_SB_G2
:
26896 case BFD_RELOC_ARM_LDRS_SB_G0
:
26897 case BFD_RELOC_ARM_LDRS_SB_G1
:
26898 case BFD_RELOC_ARM_LDRS_SB_G2
:
26899 case BFD_RELOC_ARM_LDC_SB_G0
:
26900 case BFD_RELOC_ARM_LDC_SB_G1
:
26901 case BFD_RELOC_ARM_LDC_SB_G2
:
26902 case BFD_RELOC_ARM_V4BX
:
26903 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
26904 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
26905 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
26906 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
26907 case BFD_RELOC_ARM_GOTFUNCDESC
:
26908 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
26909 case BFD_RELOC_ARM_FUNCDESC
:
26910 case BFD_RELOC_ARM_THUMB_BF17
:
26911 case BFD_RELOC_ARM_THUMB_BF19
:
26912 case BFD_RELOC_ARM_THUMB_BF13
:
26913 code
= fixp
->fx_r_type
;
26916 case BFD_RELOC_ARM_TLS_GOTDESC
:
26917 case BFD_RELOC_ARM_TLS_GD32
:
26918 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
26919 case BFD_RELOC_ARM_TLS_LE32
:
26920 case BFD_RELOC_ARM_TLS_IE32
:
26921 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
26922 case BFD_RELOC_ARM_TLS_LDM32
:
26923 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
26924 /* BFD will include the symbol's address in the addend.
26925 But we don't want that, so subtract it out again here. */
26926 if (!S_IS_COMMON (fixp
->fx_addsy
))
26927 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
26928 code
= fixp
->fx_r_type
;
26932 case BFD_RELOC_ARM_IMMEDIATE
:
26933 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26934 _("internal relocation (type: IMMEDIATE) not fixed up"));
26937 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
26938 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26939 _("ADRL used for a symbol not defined in the same file"));
26942 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
26943 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
26944 case BFD_RELOC_ARM_THUMB_LOOP12
:
26945 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26946 _("%s used for a symbol not defined in the same file"),
26947 bfd_get_reloc_code_name (fixp
->fx_r_type
));
26950 case BFD_RELOC_ARM_OFFSET_IMM
:
26951 if (section
->use_rela_p
)
26953 code
= fixp
->fx_r_type
;
26957 if (fixp
->fx_addsy
!= NULL
26958 && !S_IS_DEFINED (fixp
->fx_addsy
)
26959 && S_IS_LOCAL (fixp
->fx_addsy
))
26961 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26962 _("undefined local label `%s'"),
26963 S_GET_NAME (fixp
->fx_addsy
));
26967 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26968 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
26975 switch (fixp
->fx_r_type
)
26977 case BFD_RELOC_NONE
: type
= "NONE"; break;
26978 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
26979 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
26980 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
26981 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
26982 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
26983 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
26984 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
26985 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
26986 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
26987 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
26988 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
26989 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
26990 default: type
= _("<unknown>"); break;
26992 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
26993 _("cannot represent %s relocation in this object file format"),
27000 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
27002 && fixp
->fx_addsy
== GOT_symbol
)
27004 code
= BFD_RELOC_ARM_GOTPC
;
27005 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
27009 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
27011 if (reloc
->howto
== NULL
)
27013 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
27014 _("cannot represent %s relocation in this object file format"),
27015 bfd_get_reloc_code_name (code
));
27019 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
27020 vtable entry to be used in the relocation's section offset. */
27021 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
27022 reloc
->address
= fixp
->fx_offset
;
27027 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
27030 cons_fix_new_arm (fragS
* frag
,
27034 bfd_reloc_code_real_type reloc
)
27039 FIXME: @@ Should look at CPU word size. */
27043 reloc
= BFD_RELOC_8
;
27046 reloc
= BFD_RELOC_16
;
27050 reloc
= BFD_RELOC_32
;
27053 reloc
= BFD_RELOC_64
;
27058 if (exp
->X_op
== O_secrel
)
27060 exp
->X_op
= O_symbol
;
27061 reloc
= BFD_RELOC_32_SECREL
;
27065 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
27068 #if defined (OBJ_COFF)
27070 arm_validate_fix (fixS
* fixP
)
27072 /* If the destination of the branch is a defined symbol which does not have
27073 the THUMB_FUNC attribute, then we must be calling a function which has
27074 the (interfacearm) attribute. We look for the Thumb entry point to that
27075 function and change the branch to refer to that function instead. */
27076 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
27077 && fixP
->fx_addsy
!= NULL
27078 && S_IS_DEFINED (fixP
->fx_addsy
)
27079 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
27081 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
27088 arm_force_relocation (struct fix
* fixp
)
27090 #if defined (OBJ_COFF) && defined (TE_PE)
27091 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
27095 /* In case we have a call or a branch to a function in ARM ISA mode from
27096 a thumb function or vice-versa force the relocation. These relocations
27097 are cleared off for some cores that might have blx and simple transformations
27101 switch (fixp
->fx_r_type
)
27103 case BFD_RELOC_ARM_PCREL_JUMP
:
27104 case BFD_RELOC_ARM_PCREL_CALL
:
27105 case BFD_RELOC_THUMB_PCREL_BLX
:
27106 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
27110 case BFD_RELOC_ARM_PCREL_BLX
:
27111 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
27112 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
27113 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
27114 if (ARM_IS_FUNC (fixp
->fx_addsy
))
27123 /* Resolve these relocations even if the symbol is extern or weak.
27124 Technically this is probably wrong due to symbol preemption.
27125 In practice these relocations do not have enough range to be useful
27126 at dynamic link time, and some code (e.g. in the Linux kernel)
27127 expects these references to be resolved. */
27128 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
27129 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
27130 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
27131 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
27132 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
27133 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
27134 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
27135 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
27136 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
27137 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
27138 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
27139 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
27140 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
27141 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
27144 /* Always leave these relocations for the linker. */
27145 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
27146 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
27147 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
27150 /* Always generate relocations against function symbols. */
27151 if (fixp
->fx_r_type
== BFD_RELOC_32
27153 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
27156 return generic_force_reloc (fixp
);
27159 #if defined (OBJ_ELF) || defined (OBJ_COFF)
27160 /* Relocations against function names must be left unadjusted,
27161 so that the linker can use this information to generate interworking
27162 stubs. The MIPS version of this function
27163 also prevents relocations that are mips-16 specific, but I do not
27164 know why it does this.
27167 There is one other problem that ought to be addressed here, but
27168 which currently is not: Taking the address of a label (rather
27169 than a function) and then later jumping to that address. Such
27170 addresses also ought to have their bottom bit set (assuming that
27171 they reside in Thumb code), but at the moment they will not. */
27174 arm_fix_adjustable (fixS
* fixP
)
27176 if (fixP
->fx_addsy
== NULL
)
27179 /* Preserve relocations against symbols with function type. */
27180 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
27183 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
27184 && fixP
->fx_subsy
== NULL
)
27187 /* We need the symbol name for the VTABLE entries. */
27188 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
27189 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
27192 /* Don't allow symbols to be discarded on GOT related relocs. */
27193 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
27194 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
27195 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
27196 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
27197 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
27198 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
27199 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
27200 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
27201 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
27202 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
27203 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
27204 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
27205 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
27206 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
27207 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
27208 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
27209 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
27212 /* Similarly for group relocations. */
27213 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
27214 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
27215 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
27218 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
27219 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
27220 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
27221 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
27222 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
27223 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
27224 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
27225 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
27226 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
27229 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
27230 offsets, so keep these symbols. */
27231 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
27232 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
27237 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
27241 elf32_arm_target_format (void)
27244 return (target_big_endian
27245 ? "elf32-bigarm-symbian"
27246 : "elf32-littlearm-symbian");
27247 #elif defined (TE_VXWORKS)
27248 return (target_big_endian
27249 ? "elf32-bigarm-vxworks"
27250 : "elf32-littlearm-vxworks");
27251 #elif defined (TE_NACL)
27252 return (target_big_endian
27253 ? "elf32-bigarm-nacl"
27254 : "elf32-littlearm-nacl");
27258 if (target_big_endian
)
27259 return "elf32-bigarm-fdpic";
27261 return "elf32-littlearm-fdpic";
27265 if (target_big_endian
)
27266 return "elf32-bigarm";
27268 return "elf32-littlearm";
27274 armelf_frob_symbol (symbolS
* symp
,
27277 elf_frob_symbol (symp
, puntp
);
27281 /* MD interface: Finalization. */
27286 literal_pool
* pool
;
27288 /* Ensure that all the predication blocks are properly closed. */
27289 check_pred_blocks_finished ();
27291 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
27293 /* Put it at the end of the relevant section. */
27294 subseg_set (pool
->section
, pool
->sub_section
);
27296 arm_elf_change_section ();
27303 /* Remove any excess mapping symbols generated for alignment frags in
27304 SEC. We may have created a mapping symbol before a zero byte
27305 alignment; remove it if there's a mapping symbol after the
27308 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
27309 void *dummy ATTRIBUTE_UNUSED
)
27311 segment_info_type
*seginfo
= seg_info (sec
);
27314 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
27317 for (fragp
= seginfo
->frchainP
->frch_root
;
27319 fragp
= fragp
->fr_next
)
27321 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
27322 fragS
*next
= fragp
->fr_next
;
27324 /* Variable-sized frags have been converted to fixed size by
27325 this point. But if this was variable-sized to start with,
27326 there will be a fixed-size frag after it. So don't handle
27328 if (sym
== NULL
|| next
== NULL
)
27331 if (S_GET_VALUE (sym
) < next
->fr_address
)
27332 /* Not at the end of this frag. */
27334 know (S_GET_VALUE (sym
) == next
->fr_address
);
27338 if (next
->tc_frag_data
.first_map
!= NULL
)
27340 /* Next frag starts with a mapping symbol. Discard this
27342 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
27346 if (next
->fr_next
== NULL
)
27348 /* This mapping symbol is at the end of the section. Discard
27350 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
27351 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
27355 /* As long as we have empty frags without any mapping symbols,
27357 /* If the next frag is non-empty and does not start with a
27358 mapping symbol, then this mapping symbol is required. */
27359 if (next
->fr_address
!= next
->fr_next
->fr_address
)
27362 next
= next
->fr_next
;
27364 while (next
!= NULL
);
27369 /* Adjust the symbol table. This marks Thumb symbols as distinct from
27373 arm_adjust_symtab (void)
27378 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
27380 if (ARM_IS_THUMB (sym
))
27382 if (THUMB_IS_FUNC (sym
))
27384 /* Mark the symbol as a Thumb function. */
27385 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
27386 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
27387 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
27389 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
27390 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
27392 as_bad (_("%s: unexpected function type: %d"),
27393 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
27395 else switch (S_GET_STORAGE_CLASS (sym
))
27398 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
27401 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
27404 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
27412 if (ARM_IS_INTERWORK (sym
))
27413 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
27420 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
27422 if (ARM_IS_THUMB (sym
))
27424 elf_symbol_type
* elf_sym
;
27426 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
27427 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
27429 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
27430 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
27432 /* If it's a .thumb_func, declare it as so,
27433 otherwise tag label as .code 16. */
27434 if (THUMB_IS_FUNC (sym
))
27435 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
27436 ST_BRANCH_TO_THUMB
);
27437 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
27438 elf_sym
->internal_elf_sym
.st_info
=
27439 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
27444 /* Remove any overlapping mapping symbols generated by alignment frags. */
27445 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
27446 /* Now do generic ELF adjustments. */
27447 elf_adjust_symtab ();
27451 /* MD interface: Initialization. */
27454 set_constant_flonums (void)
27458 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
27459 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
27463 /* Auto-select Thumb mode if it's the only available instruction set for the
27464 given architecture. */
27467 autoselect_thumb_from_cpu_variant (void)
27469 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
27470 opcode_select (16);
27479 if ( (arm_ops_hsh
= hash_new ()) == NULL
27480 || (arm_cond_hsh
= hash_new ()) == NULL
27481 || (arm_vcond_hsh
= hash_new ()) == NULL
27482 || (arm_shift_hsh
= hash_new ()) == NULL
27483 || (arm_psr_hsh
= hash_new ()) == NULL
27484 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
27485 || (arm_reg_hsh
= hash_new ()) == NULL
27486 || (arm_reloc_hsh
= hash_new ()) == NULL
27487 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
27488 as_fatal (_("virtual memory exhausted"));
27490 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
27491 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
27492 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
27493 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
27494 for (i
= 0; i
< sizeof (vconds
) / sizeof (struct asm_cond
); i
++)
27495 hash_insert (arm_vcond_hsh
, vconds
[i
].template_name
, (void *) (vconds
+ i
));
27496 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
27497 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
27498 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
27499 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
27500 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
27501 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
27502 (void *) (v7m_psrs
+ i
));
27503 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
27504 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
27506 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
27508 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
27509 (void *) (barrier_opt_names
+ i
));
27511 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
27513 struct reloc_entry
* entry
= reloc_names
+ i
;
27515 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
27516 /* This makes encode_branch() use the EABI versions of this relocation. */
27517 entry
->reloc
= BFD_RELOC_UNUSED
;
27519 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
27523 set_constant_flonums ();
27525 /* Set the cpu variant based on the command-line options. We prefer
27526 -mcpu= over -march= if both are set (as for GCC); and we prefer
27527 -mfpu= over any other way of setting the floating point unit.
27528 Use of legacy options with new options are faulted. */
27531 if (mcpu_cpu_opt
|| march_cpu_opt
)
27532 as_bad (_("use of old and new-style options to set CPU type"));
27534 selected_arch
= *legacy_cpu
;
27536 else if (mcpu_cpu_opt
)
27538 selected_arch
= *mcpu_cpu_opt
;
27539 selected_ext
= *mcpu_ext_opt
;
27541 else if (march_cpu_opt
)
27543 selected_arch
= *march_cpu_opt
;
27544 selected_ext
= *march_ext_opt
;
27546 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
27551 as_bad (_("use of old and new-style options to set FPU type"));
27553 selected_fpu
= *legacy_fpu
;
27556 selected_fpu
= *mfpu_opt
;
27559 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
27560 || defined (TE_NetBSD) || defined (TE_VXWORKS))
27561 /* Some environments specify a default FPU. If they don't, infer it
27562 from the processor. */
27564 selected_fpu
= *mcpu_fpu_opt
;
27565 else if (march_fpu_opt
)
27566 selected_fpu
= *march_fpu_opt
;
27568 selected_fpu
= fpu_default
;
27572 if (ARM_FEATURE_ZERO (selected_fpu
))
27574 if (!no_cpu_selected ())
27575 selected_fpu
= fpu_default
;
27577 selected_fpu
= fpu_arch_fpa
;
27581 if (ARM_FEATURE_ZERO (selected_arch
))
27583 selected_arch
= cpu_default
;
27584 selected_cpu
= selected_arch
;
27586 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
27588 /* Autodection of feature mode: allow all features in cpu_variant but leave
27589 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
27590 after all instruction have been processed and we can decide what CPU
27591 should be selected. */
27592 if (ARM_FEATURE_ZERO (selected_arch
))
27593 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
27595 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
27598 autoselect_thumb_from_cpu_variant ();
27600 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
27602 #if defined OBJ_COFF || defined OBJ_ELF
27604 unsigned int flags
= 0;
27606 #if defined OBJ_ELF
27607 flags
= meabi_flags
;
27609 switch (meabi_flags
)
27611 case EF_ARM_EABI_UNKNOWN
:
27613 /* Set the flags in the private structure. */
27614 if (uses_apcs_26
) flags
|= F_APCS26
;
27615 if (support_interwork
) flags
|= F_INTERWORK
;
27616 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
27617 if (pic_code
) flags
|= F_PIC
;
27618 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
27619 flags
|= F_SOFT_FLOAT
;
27621 switch (mfloat_abi_opt
)
27623 case ARM_FLOAT_ABI_SOFT
:
27624 case ARM_FLOAT_ABI_SOFTFP
:
27625 flags
|= F_SOFT_FLOAT
;
27628 case ARM_FLOAT_ABI_HARD
:
27629 if (flags
& F_SOFT_FLOAT
)
27630 as_bad (_("hard-float conflicts with specified fpu"));
27634 /* Using pure-endian doubles (even if soft-float). */
27635 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
27636 flags
|= F_VFP_FLOAT
;
27638 #if defined OBJ_ELF
27639 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
27640 flags
|= EF_ARM_MAVERICK_FLOAT
;
27643 case EF_ARM_EABI_VER4
:
27644 case EF_ARM_EABI_VER5
:
27645 /* No additional flags to set. */
27652 bfd_set_private_flags (stdoutput
, flags
);
27654 /* We have run out flags in the COFF header to encode the
27655 status of ATPCS support, so instead we create a dummy,
27656 empty, debug section called .arm.atpcs. */
27661 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
27665 bfd_set_section_flags
27666 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
27667 bfd_set_section_size (stdoutput
, sec
, 0);
27668 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
27674 /* Record the CPU type as well. */
27675 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
27676 mach
= bfd_mach_arm_iWMMXt2
;
27677 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
27678 mach
= bfd_mach_arm_iWMMXt
;
27679 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
27680 mach
= bfd_mach_arm_XScale
;
27681 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
27682 mach
= bfd_mach_arm_ep9312
;
27683 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
27684 mach
= bfd_mach_arm_5TE
;
27685 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
27687 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27688 mach
= bfd_mach_arm_5T
;
27690 mach
= bfd_mach_arm_5
;
27692 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
27694 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
27695 mach
= bfd_mach_arm_4T
;
27697 mach
= bfd_mach_arm_4
;
27699 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
27700 mach
= bfd_mach_arm_3M
;
27701 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
27702 mach
= bfd_mach_arm_3
;
27703 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
27704 mach
= bfd_mach_arm_2a
;
27705 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
27706 mach
= bfd_mach_arm_2
;
27708 mach
= bfd_mach_arm_unknown
;
27710 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
27713 /* Command line processing. */
27716 Invocation line includes a switch not recognized by the base assembler.
27717 See if it's a processor-specific option.
27719 This routine is somewhat complicated by the need for backwards
27720 compatibility (since older releases of gcc can't be changed).
27721 The new options try to make the interface as compatible as
27724 New options (supported) are:
27726 -mcpu=<cpu name> Assemble for selected processor
27727 -march=<architecture name> Assemble for selected architecture
27728 -mfpu=<fpu architecture> Assemble for selected FPU.
27729 -EB/-mbig-endian Big-endian
27730 -EL/-mlittle-endian Little-endian
27731 -k Generate PIC code
27732 -mthumb Start in Thumb mode
27733 -mthumb-interwork Code supports ARM/Thumb interworking
27735 -m[no-]warn-deprecated Warn about deprecated features
27736 -m[no-]warn-syms Warn when symbols match instructions
27738 For now we will also provide support for:
27740 -mapcs-32 32-bit Program counter
27741 -mapcs-26 26-bit Program counter
27742 -macps-float Floats passed in FP registers
27743 -mapcs-reentrant Reentrant code
27745 (sometime these will probably be replaced with -mapcs=<list of options>
27746 and -matpcs=<list of options>)
27748 The remaining options are only supported for back-wards compatibility.
27749 Cpu variants, the arm part is optional:
27750 -m[arm]1 Currently not supported.
27751 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
27752 -m[arm]3 Arm 3 processor
27753 -m[arm]6[xx], Arm 6 processors
27754 -m[arm]7[xx][t][[d]m] Arm 7 processors
27755 -m[arm]8[10] Arm 8 processors
27756 -m[arm]9[20][tdmi] Arm 9 processors
27757 -mstrongarm[110[0]] StrongARM processors
27758 -mxscale XScale processors
27759 -m[arm]v[2345[t[e]]] Arm architectures
27760 -mall All (except the ARM1)
27762 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
27763 -mfpe-old (No float load/store multiples)
27764 -mvfpxd VFP Single precision
27766 -mno-fpu Disable all floating point instructions
27768 The following CPU names are recognized:
27769 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
27770 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
27771 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
27772 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
27773 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
27774 arm10t arm10e, arm1020t, arm1020e, arm10200e,
27775 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
27779 const char * md_shortopts
= "m:k";
27781 #ifdef ARM_BI_ENDIAN
27782 #define OPTION_EB (OPTION_MD_BASE + 0)
27783 #define OPTION_EL (OPTION_MD_BASE + 1)
27785 #if TARGET_BYTES_BIG_ENDIAN
27786 #define OPTION_EB (OPTION_MD_BASE + 0)
27788 #define OPTION_EL (OPTION_MD_BASE + 1)
27791 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
27792 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
27794 struct option md_longopts
[] =
27797 {"EB", no_argument
, NULL
, OPTION_EB
},
27800 {"EL", no_argument
, NULL
, OPTION_EL
},
27802 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
27804 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
27806 {NULL
, no_argument
, NULL
, 0}
27809 size_t md_longopts_size
= sizeof (md_longopts
);
27811 struct arm_option_table
27813 const char * option
; /* Option name to match. */
27814 const char * help
; /* Help information. */
27815 int * var
; /* Variable to change. */
27816 int value
; /* What to change it to. */
27817 const char * deprecated
; /* If non-null, print this message. */
27820 struct arm_option_table arm_opts
[] =
27822 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
27823 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
27824 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
27825 &support_interwork
, 1, NULL
},
27826 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
27827 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
27828 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
27830 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
27831 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
27832 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
27833 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
27836 /* These are recognized by the assembler, but have no affect on code. */
27837 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
27838 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
27840 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
27841 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
27842 &warn_on_deprecated
, 0, NULL
},
27843 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
27844 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
27845 {NULL
, NULL
, NULL
, 0, NULL
}
27848 struct arm_legacy_option_table
27850 const char * option
; /* Option name to match. */
27851 const arm_feature_set
** var
; /* Variable to change. */
27852 const arm_feature_set value
; /* What to change it to. */
27853 const char * deprecated
; /* If non-null, print this message. */
27856 const struct arm_legacy_option_table arm_legacy_opts
[] =
27858 /* DON'T add any new processors to this list -- we want the whole list
27859 to go away... Add them to the processors table instead. */
27860 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27861 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
27862 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27863 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
27864 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27865 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
27866 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27867 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
27868 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27869 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
27870 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27871 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
27872 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27873 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
27874 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27875 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
27876 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27877 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
27878 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27879 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
27880 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27881 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
27882 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27883 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
27884 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27885 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
27886 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27887 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
27888 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27889 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
27890 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27891 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
27892 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27893 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
27894 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27895 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
27896 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27897 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
27898 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27899 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
27900 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27901 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
27902 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27903 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
27904 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27905 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
27906 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27907 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27908 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27909 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
27910 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27911 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
27912 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27913 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
27914 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27915 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
27916 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27917 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
27918 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27919 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
27920 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27921 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
27922 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27923 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
27924 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27925 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
27926 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27927 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
27928 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
27929 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
27930 N_("use -mcpu=strongarm110")},
27931 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
27932 N_("use -mcpu=strongarm1100")},
27933 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
27934 N_("use -mcpu=strongarm1110")},
27935 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
27936 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
27937 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
27939 /* Architecture variants -- don't add any more to this list either. */
27940 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27941 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
27942 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27943 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
27944 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27945 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
27946 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27947 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
27948 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27949 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
27950 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27951 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
27952 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27953 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
27954 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27955 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
27956 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27957 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
27959 /* Floating point variants -- don't add any more to this list either. */
27960 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
27961 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
27962 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
27963 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
27964 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
27966 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
27969 struct arm_cpu_option_table
27973 const arm_feature_set value
;
27974 const arm_feature_set ext
;
27975 /* For some CPUs we assume an FPU unless the user explicitly sets
27977 const arm_feature_set default_fpu
;
27978 /* The canonical name of the CPU, or NULL to use NAME converted to upper
27980 const char * canonical_name
;
27983 /* This list should, at a minimum, contain all the cpu names
27984 recognized by GCC. */
27985 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
27987 static const struct arm_cpu_option_table arm_cpus
[] =
27989 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
27992 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
27995 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
27998 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
28001 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
28004 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
28007 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
28010 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
28013 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
28016 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
28019 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
28022 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
28025 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
28028 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
28031 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
28034 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
28037 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
28040 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
28043 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
28046 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
28049 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
28052 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
28055 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
28058 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
28061 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
28064 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
28067 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
28070 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
28073 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
28076 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
28079 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
28082 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
28085 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
28088 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
28091 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
28094 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
28097 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
28100 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
28103 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
28106 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
28109 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
28112 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
28115 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
28118 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
28121 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
28124 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
28128 /* For V5 or later processors we default to using VFP; but the user
28129 should really set the FPU type explicitly. */
28130 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
28133 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
28136 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
28139 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
28142 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
28145 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
28148 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
28151 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
28154 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
28157 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
28160 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
28163 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
28166 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
28169 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
28172 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
28175 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
28178 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
28181 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
28184 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
28187 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
28190 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
28193 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
28196 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
28199 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
28202 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
28205 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
28208 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
28211 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
28214 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
28217 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
28220 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
28223 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
28226 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
28229 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
28232 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
28235 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
28238 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
28239 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
28241 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
28243 FPU_ARCH_NEON_VFP_V4
),
28244 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
28245 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28246 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
28247 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
28248 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
28249 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
28250 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
28252 FPU_ARCH_NEON_VFP_V4
),
28253 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
28255 FPU_ARCH_NEON_VFP_V4
),
28256 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
28258 FPU_ARCH_NEON_VFP_V4
),
28259 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
28260 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28261 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28262 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
28263 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28264 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28265 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
28266 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28267 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28268 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
28269 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28270 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28271 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
28272 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28273 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28274 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
28275 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28276 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28277 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
28278 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28279 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28280 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
28281 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28282 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28283 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
28284 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28285 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28286 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
28287 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28288 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28289 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
28292 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
28294 FPU_ARCH_VFP_V3D16
),
28295 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
28296 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
28298 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
28299 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
28300 FPU_ARCH_VFP_V3D16
),
28301 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
28302 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
28303 FPU_ARCH_VFP_V3D16
),
28304 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
28305 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28306 FPU_ARCH_NEON_VFP_ARMV8
),
28307 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
28308 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28310 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
28313 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
28316 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
28319 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
28322 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
28325 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
28328 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
28331 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
28332 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28333 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28334 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
28335 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28336 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
28337 /* ??? XSCALE is really an architecture. */
28338 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
28342 /* ??? iwmmxt is not a processor. */
28343 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
28346 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
28349 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
28354 ARM_CPU_OPT ("ep9312", "ARM920T",
28355 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
28356 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
28358 /* Marvell processors. */
28359 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
28360 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
28361 FPU_ARCH_VFP_V3D16
),
28362 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
28363 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
28364 FPU_ARCH_NEON_VFP_V4
),
28366 /* APM X-Gene family. */
28367 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
28369 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28370 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
28371 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28372 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
28374 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
28378 struct arm_ext_table
28382 const arm_feature_set merge
;
28383 const arm_feature_set clear
;
28386 struct arm_arch_option_table
28390 const arm_feature_set value
;
28391 const arm_feature_set default_fpu
;
28392 const struct arm_ext_table
* ext_table
;
28395 /* Used to add support for +E and +noE extension. */
28396 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
28397 /* Used to add support for a +E extension. */
28398 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
28399 /* Used to add support for a +noE extension. */
28400 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
28402 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
28403 ~0 & ~FPU_ENDIAN_PURE)
28405 static const struct arm_ext_table armv5te_ext_table
[] =
28407 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
28408 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28411 static const struct arm_ext_table armv7_ext_table
[] =
28413 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
28414 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28417 static const struct arm_ext_table armv7ve_ext_table
[] =
28419 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
28420 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
28421 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
28422 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
28423 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
28424 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
28425 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
28427 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
28428 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
28430 /* Aliases for +simd. */
28431 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
28433 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28434 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28435 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
28437 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28440 static const struct arm_ext_table armv7a_ext_table
[] =
28442 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
28443 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
28444 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
28445 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
28446 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
28447 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
28448 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
28450 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
28451 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
28453 /* Aliases for +simd. */
28454 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28455 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
28457 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
28458 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
28460 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
28461 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
28462 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28465 static const struct arm_ext_table armv7r_ext_table
[] =
28467 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
28468 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
28469 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
28470 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
28471 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
28472 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
28473 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28474 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
28475 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28478 static const struct arm_ext_table armv7em_ext_table
[] =
28480 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
28481 /* Alias for +fp, used to be known as fpv4-sp-d16. */
28482 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
28483 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
28484 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
28485 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
28486 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28489 static const struct arm_ext_table armv8a_ext_table
[] =
28491 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
28492 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
28493 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28494 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28496 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28497 should use the +simd option to turn on FP. */
28498 ARM_REMOVE ("fp", ALL_FP
),
28499 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28500 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28501 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28505 static const struct arm_ext_table armv81a_ext_table
[] =
28507 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
28508 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
28509 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28511 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28512 should use the +simd option to turn on FP. */
28513 ARM_REMOVE ("fp", ALL_FP
),
28514 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28515 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28516 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28519 static const struct arm_ext_table armv82a_ext_table
[] =
28521 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
28522 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
28523 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
28524 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
28525 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28526 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28528 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28529 should use the +simd option to turn on FP. */
28530 ARM_REMOVE ("fp", ALL_FP
),
28531 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28532 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28533 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28536 static const struct arm_ext_table armv84a_ext_table
[] =
28538 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28539 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
28540 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
28541 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28543 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28544 should use the +simd option to turn on FP. */
28545 ARM_REMOVE ("fp", ALL_FP
),
28546 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
28547 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
28548 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28551 static const struct arm_ext_table armv85a_ext_table
[] =
28553 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
28554 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
28555 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
28556 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28558 /* Armv8-a does not allow an FP implementation without SIMD, so the user
28559 should use the +simd option to turn on FP. */
28560 ARM_REMOVE ("fp", ALL_FP
),
28561 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28564 static const struct arm_ext_table armv8m_main_ext_table
[] =
28566 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28567 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
28568 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
28569 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
28570 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28573 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
28575 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28576 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
28578 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28579 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
28582 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28583 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
28584 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE
),
28585 ARM_FEATURE_COPROC (FPU_MVE
| FPU_MVE_FP
)),
28587 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
28588 FPU_MVE
| FPU_MVE_FP
| FPU_VFP_V5_SP_D16
|
28589 FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
28590 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28593 static const struct arm_ext_table armv8r_ext_table
[] =
28595 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
28596 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
28597 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28598 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
28599 ARM_REMOVE ("fp", ALL_FP
),
28600 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
28601 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
28604 /* This list should, at a minimum, contain all the architecture names
28605 recognized by GCC. */
28606 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
28607 #define ARM_ARCH_OPT2(N, V, DF, ext) \
28608 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
28610 static const struct arm_arch_option_table arm_archs
[] =
28612 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
28613 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
28614 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
28615 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
28616 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
28617 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
28618 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
28619 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
28620 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
28621 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
28622 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
28623 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
28624 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
28625 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
28626 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
28627 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
28628 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
28629 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28630 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
28631 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
28632 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
28633 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
28634 kept to preserve existing behaviour. */
28635 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28636 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
28637 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
28638 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
28639 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
28640 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
28641 kept to preserve existing behaviour. */
28642 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28643 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
28644 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
28645 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
28646 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
28647 /* The official spelling of the ARMv7 profile variants is the dashed form.
28648 Accept the non-dashed form for compatibility with old toolchains. */
28649 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28650 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
28651 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28652 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28653 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
28654 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
28655 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
28656 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
28657 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
28658 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
28660 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
28662 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
28663 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
28664 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
28665 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
28666 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
28667 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
28668 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
28669 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
28670 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
28671 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
28672 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
28674 #undef ARM_ARCH_OPT
28676 /* ISA extensions in the co-processor and main instruction set space. */
28678 struct arm_option_extension_value_table
28682 const arm_feature_set merge_value
;
28683 const arm_feature_set clear_value
;
28684 /* List of architectures for which an extension is available. ARM_ARCH_NONE
28685 indicates that an extension is available for all architectures while
28686 ARM_ANY marks an empty entry. */
28687 const arm_feature_set allowed_archs
[2];
28690 /* The following table must be in alphabetical order with a NULL last entry. */
28692 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
28693 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
28695 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
28696 use the context sensitive approach using arm_ext_table's. */
28697 static const struct arm_option_extension_value_table arm_extensions
[] =
28699 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
28700 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28701 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
28702 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
28703 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28704 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
28705 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
28707 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28708 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
28709 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
28710 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
28711 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28712 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28713 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
28715 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28716 | ARM_EXT2_FP16_FML
),
28717 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
28718 | ARM_EXT2_FP16_FML
),
28720 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28721 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
28722 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28723 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28724 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
28725 Thumb divide instruction. Due to this having the same name as the
28726 previous entry, this will be ignored when doing command-line parsing and
28727 only considered by build attribute selection code. */
28728 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28729 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
28730 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
28731 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
28732 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
28733 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
28734 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
28735 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
28736 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
28737 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28738 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
28739 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
28740 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
28741 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28742 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
28743 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
28744 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
28745 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
28746 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28747 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28748 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
28750 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
28751 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
28752 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28753 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
28754 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
28755 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
28756 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28757 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
28759 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28760 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
28761 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
28762 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28763 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
28764 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
28765 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
28766 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
28768 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
28769 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
28770 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
28771 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
28772 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
28776 /* ISA floating-point and Advanced SIMD extensions. */
28777 struct arm_option_fpu_value_table
28780 const arm_feature_set value
;
28783 /* This list should, at a minimum, contain all the fpu names
28784 recognized by GCC. */
28785 static const struct arm_option_fpu_value_table arm_fpus
[] =
28787 {"softfpa", FPU_NONE
},
28788 {"fpe", FPU_ARCH_FPE
},
28789 {"fpe2", FPU_ARCH_FPE
},
28790 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
28791 {"fpa", FPU_ARCH_FPA
},
28792 {"fpa10", FPU_ARCH_FPA
},
28793 {"fpa11", FPU_ARCH_FPA
},
28794 {"arm7500fe", FPU_ARCH_FPA
},
28795 {"softvfp", FPU_ARCH_VFP
},
28796 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
28797 {"vfp", FPU_ARCH_VFP_V2
},
28798 {"vfp9", FPU_ARCH_VFP_V2
},
28799 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
28800 {"vfp10", FPU_ARCH_VFP_V2
},
28801 {"vfp10-r0", FPU_ARCH_VFP_V1
},
28802 {"vfpxd", FPU_ARCH_VFP_V1xD
},
28803 {"vfpv2", FPU_ARCH_VFP_V2
},
28804 {"vfpv3", FPU_ARCH_VFP_V3
},
28805 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
28806 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
28807 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
28808 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
28809 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
28810 {"arm1020t", FPU_ARCH_VFP_V1
},
28811 {"arm1020e", FPU_ARCH_VFP_V2
},
28812 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
28813 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
28814 {"maverick", FPU_ARCH_MAVERICK
},
28815 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28816 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
28817 {"neon-fp16", FPU_ARCH_NEON_FP16
},
28818 {"vfpv4", FPU_ARCH_VFP_V4
},
28819 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
28820 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
28821 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
28822 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
28823 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
28824 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
28825 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
28826 {"crypto-neon-fp-armv8",
28827 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
28828 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
28829 {"crypto-neon-fp-armv8.1",
28830 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
28831 {NULL
, ARM_ARCH_NONE
}
28834 struct arm_option_value_table
28840 static const struct arm_option_value_table arm_float_abis
[] =
28842 {"hard", ARM_FLOAT_ABI_HARD
},
28843 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
28844 {"soft", ARM_FLOAT_ABI_SOFT
},
28849 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
28850 static const struct arm_option_value_table arm_eabis
[] =
28852 {"gnu", EF_ARM_EABI_UNKNOWN
},
28853 {"4", EF_ARM_EABI_VER4
},
28854 {"5", EF_ARM_EABI_VER5
},
28859 struct arm_long_option_table
28861 const char * option
; /* Substring to match. */
28862 const char * help
; /* Help information. */
28863 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
28864 const char * deprecated
; /* If non-null, print this message. */
28868 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
28869 arm_feature_set
*ext_set
,
28870 const struct arm_ext_table
*ext_table
)
28872 /* We insist on extensions being specified in alphabetical order, and with
28873 extensions being added before being removed. We achieve this by having
28874 the global ARM_EXTENSIONS table in alphabetical order, and using the
28875 ADDING_VALUE variable to indicate whether we are adding an extension (1)
28876 or removing it (0) and only allowing it to change in the order
28878 const struct arm_option_extension_value_table
* opt
= NULL
;
28879 const arm_feature_set arm_any
= ARM_ANY
;
28880 int adding_value
= -1;
28882 while (str
!= NULL
&& *str
!= 0)
28889 as_bad (_("invalid architectural extension"));
28894 ext
= strchr (str
, '+');
28899 len
= strlen (str
);
28901 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
28903 if (adding_value
!= 0)
28906 opt
= arm_extensions
;
28914 if (adding_value
== -1)
28917 opt
= arm_extensions
;
28919 else if (adding_value
!= 1)
28921 as_bad (_("must specify extensions to add before specifying "
28922 "those to remove"));
28929 as_bad (_("missing architectural extension"));
28933 gas_assert (adding_value
!= -1);
28934 gas_assert (opt
!= NULL
);
28936 if (ext_table
!= NULL
)
28938 const struct arm_ext_table
* ext_opt
= ext_table
;
28939 bfd_boolean found
= FALSE
;
28940 for (; ext_opt
->name
!= NULL
; ext_opt
++)
28941 if (ext_opt
->name_len
== len
28942 && strncmp (ext_opt
->name
, str
, len
) == 0)
28946 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
28947 /* TODO: Option not supported. When we remove the
28948 legacy table this case should error out. */
28951 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
28955 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
28956 /* TODO: Option not supported. When we remove the
28957 legacy table this case should error out. */
28959 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
28971 /* Scan over the options table trying to find an exact match. */
28972 for (; opt
->name
!= NULL
; opt
++)
28973 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
28975 int i
, nb_allowed_archs
=
28976 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28977 /* Check we can apply the extension to this architecture. */
28978 for (i
= 0; i
< nb_allowed_archs
; i
++)
28981 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
28983 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
28986 if (i
== nb_allowed_archs
)
28988 as_bad (_("extension does not apply to the base architecture"));
28992 /* Add or remove the extension. */
28994 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
28996 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
28998 /* Allowing Thumb division instructions for ARMv7 in autodetection
28999 rely on this break so that duplicate extensions (extensions
29000 with the same name as a previous extension in the list) are not
29001 considered for command-line parsing. */
29005 if (opt
->name
== NULL
)
29007 /* Did we fail to find an extension because it wasn't specified in
29008 alphabetical order, or because it does not exist? */
29010 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29011 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
29014 if (opt
->name
== NULL
)
29015 as_bad (_("unknown architectural extension `%s'"), str
);
29017 as_bad (_("architectural extensions must be specified in "
29018 "alphabetical order"));
29024 /* We should skip the extension we've just matched the next time
29036 arm_parse_cpu (const char *str
)
29038 const struct arm_cpu_option_table
*opt
;
29039 const char *ext
= strchr (str
, '+');
29045 len
= strlen (str
);
29049 as_bad (_("missing cpu name `%s'"), str
);
29053 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
29054 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
29056 mcpu_cpu_opt
= &opt
->value
;
29057 if (mcpu_ext_opt
== NULL
)
29058 mcpu_ext_opt
= XNEW (arm_feature_set
);
29059 *mcpu_ext_opt
= opt
->ext
;
29060 mcpu_fpu_opt
= &opt
->default_fpu
;
29061 if (opt
->canonical_name
)
29063 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
29064 strcpy (selected_cpu_name
, opt
->canonical_name
);
29070 if (len
>= sizeof selected_cpu_name
)
29071 len
= (sizeof selected_cpu_name
) - 1;
29073 for (i
= 0; i
< len
; i
++)
29074 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
29075 selected_cpu_name
[i
] = 0;
29079 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
29084 as_bad (_("unknown cpu `%s'"), str
);
29089 arm_parse_arch (const char *str
)
29091 const struct arm_arch_option_table
*opt
;
29092 const char *ext
= strchr (str
, '+');
29098 len
= strlen (str
);
29102 as_bad (_("missing architecture name `%s'"), str
);
29106 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
29107 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
29109 march_cpu_opt
= &opt
->value
;
29110 if (march_ext_opt
== NULL
)
29111 march_ext_opt
= XNEW (arm_feature_set
);
29112 *march_ext_opt
= arm_arch_none
;
29113 march_fpu_opt
= &opt
->default_fpu
;
29114 strcpy (selected_cpu_name
, opt
->name
);
29117 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
29123 as_bad (_("unknown architecture `%s'\n"), str
);
29128 arm_parse_fpu (const char * str
)
29130 const struct arm_option_fpu_value_table
* opt
;
29132 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
29133 if (streq (opt
->name
, str
))
29135 mfpu_opt
= &opt
->value
;
29139 as_bad (_("unknown floating point format `%s'\n"), str
);
29144 arm_parse_float_abi (const char * str
)
29146 const struct arm_option_value_table
* opt
;
29148 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
29149 if (streq (opt
->name
, str
))
29151 mfloat_abi_opt
= opt
->value
;
29155 as_bad (_("unknown floating point abi `%s'\n"), str
);
29161 arm_parse_eabi (const char * str
)
29163 const struct arm_option_value_table
*opt
;
29165 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
29166 if (streq (opt
->name
, str
))
29168 meabi_flags
= opt
->value
;
29171 as_bad (_("unknown EABI `%s'\n"), str
);
29177 arm_parse_it_mode (const char * str
)
29179 bfd_boolean ret
= TRUE
;
29181 if (streq ("arm", str
))
29182 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
29183 else if (streq ("thumb", str
))
29184 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
29185 else if (streq ("always", str
))
29186 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
29187 else if (streq ("never", str
))
29188 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
29191 as_bad (_("unknown implicit IT mode `%s', should be "\
29192 "arm, thumb, always, or never."), str
);
29200 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
29202 codecomposer_syntax
= TRUE
;
29203 arm_comment_chars
[0] = ';';
29204 arm_line_separator_chars
[0] = 0;
29208 struct arm_long_option_table arm_long_opts
[] =
29210 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
29211 arm_parse_cpu
, NULL
},
29212 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
29213 arm_parse_arch
, NULL
},
29214 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
29215 arm_parse_fpu
, NULL
},
29216 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
29217 arm_parse_float_abi
, NULL
},
29219 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
29220 arm_parse_eabi
, NULL
},
29222 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
29223 arm_parse_it_mode
, NULL
},
29224 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
29225 arm_ccs_mode
, NULL
},
29226 {NULL
, NULL
, 0, NULL
}
29230 md_parse_option (int c
, const char * arg
)
29232 struct arm_option_table
*opt
;
29233 const struct arm_legacy_option_table
*fopt
;
29234 struct arm_long_option_table
*lopt
;
29240 target_big_endian
= 1;
29246 target_big_endian
= 0;
29250 case OPTION_FIX_V4BX
:
29258 #endif /* OBJ_ELF */
29261 /* Listing option. Just ignore these, we don't support additional
29266 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
29268 if (c
== opt
->option
[0]
29269 && ((arg
== NULL
&& opt
->option
[1] == 0)
29270 || streq (arg
, opt
->option
+ 1)))
29272 /* If the option is deprecated, tell the user. */
29273 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
29274 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
29275 arg
? arg
: "", _(opt
->deprecated
));
29277 if (opt
->var
!= NULL
)
29278 *opt
->var
= opt
->value
;
29284 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
29286 if (c
== fopt
->option
[0]
29287 && ((arg
== NULL
&& fopt
->option
[1] == 0)
29288 || streq (arg
, fopt
->option
+ 1)))
29290 /* If the option is deprecated, tell the user. */
29291 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
29292 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
29293 arg
? arg
: "", _(fopt
->deprecated
));
29295 if (fopt
->var
!= NULL
)
29296 *fopt
->var
= &fopt
->value
;
29302 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
29304 /* These options are expected to have an argument. */
29305 if (c
== lopt
->option
[0]
29307 && strncmp (arg
, lopt
->option
+ 1,
29308 strlen (lopt
->option
+ 1)) == 0)
29310 /* If the option is deprecated, tell the user. */
29311 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
29312 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
29313 _(lopt
->deprecated
));
29315 /* Call the sup-option parser. */
29316 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
29327 md_show_usage (FILE * fp
)
29329 struct arm_option_table
*opt
;
29330 struct arm_long_option_table
*lopt
;
29332 fprintf (fp
, _(" ARM-specific assembler options:\n"));
29334 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
29335 if (opt
->help
!= NULL
)
29336 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
29338 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
29339 if (lopt
->help
!= NULL
)
29340 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
29344 -EB assemble code for a big-endian cpu\n"));
29349 -EL assemble code for a little-endian cpu\n"));
29353 --fix-v4bx Allow BX in ARMv4 code\n"));
29357 --fdpic generate an FDPIC object file\n"));
29358 #endif /* OBJ_ELF */
29366 arm_feature_set flags
;
29367 } cpu_arch_ver_table
;
29369 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
29370 chronologically for architectures, with an exception for ARMv6-M and
29371 ARMv6S-M due to legacy reasons. No new architecture should have a
29372 special case. This allows for build attribute selection results to be
29373 stable when new architectures are added. */
29374 static const cpu_arch_ver_table cpu_arch_ver
[] =
29376 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
29377 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
29378 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
29379 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
29380 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
29381 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
29382 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
29383 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
29384 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
29385 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
29386 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
29387 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
29388 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
29389 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
29390 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
29391 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
29392 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
29393 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
29394 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
29395 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
29396 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
29397 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
29398 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
29399 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
29401 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
29402 always selected build attributes to match those of ARMv6-M
29403 (resp. ARMv6S-M). However, due to these architectures being a strict
29404 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
29405 would be selected when fully respecting chronology of architectures.
29406 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
29407 move them before ARMv7 architectures. */
29408 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
29409 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
29411 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
29412 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
29413 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
29414 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
29415 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
29416 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
29417 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
29418 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
29419 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
29420 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
29421 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
29422 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
29423 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
29424 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
29425 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
29426 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
29427 {-1, ARM_ARCH_NONE
}
29430 /* Set an attribute if it has not already been set by the user. */
29433 aeabi_set_attribute_int (int tag
, int value
)
29436 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
29437 || !attributes_set_explicitly
[tag
])
29438 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
29442 aeabi_set_attribute_string (int tag
, const char *value
)
29445 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
29446 || !attributes_set_explicitly
[tag
])
29447 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
29450 /* Return whether features in the *NEEDED feature set are available via
29451 extensions for the architecture whose feature set is *ARCH_FSET. */
29454 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
29455 const arm_feature_set
*needed
)
29457 int i
, nb_allowed_archs
;
29458 arm_feature_set ext_fset
;
29459 const struct arm_option_extension_value_table
*opt
;
29461 ext_fset
= arm_arch_none
;
29462 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29464 /* Extension does not provide any feature we need. */
29465 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
29469 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
29470 for (i
= 0; i
< nb_allowed_archs
; i
++)
29473 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
29476 /* Extension is available, add it. */
29477 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
29478 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
29482 /* Can we enable all features in *needed? */
29483 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
29486 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
29487 a given architecture feature set *ARCH_EXT_FSET including extension feature
29488 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
29489 - if true, check for an exact match of the architecture modulo extensions;
29490 - otherwise, select build attribute value of the first superset
29491 architecture released so that results remains stable when new architectures
29493 For -march/-mcpu=all the build attribute value of the most featureful
29494 architecture is returned. Tag_CPU_arch_profile result is returned in
29498 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
29499 const arm_feature_set
*ext_fset
,
29500 char *profile
, int exact_match
)
29502 arm_feature_set arch_fset
;
29503 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
29505 /* Select most featureful architecture with all its extensions if building
29506 for -march=all as the feature sets used to set build attributes. */
29507 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
29509 /* Force revisiting of decision for each new architecture. */
29510 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
29512 return TAG_CPU_ARCH_V8
;
29515 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
29517 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
29519 arm_feature_set known_arch_fset
;
29521 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
29524 /* Base architecture match user-specified architecture and
29525 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
29526 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
29531 /* Base architecture match user-specified architecture only
29532 (eg. ARMv6-M in the same case as above). Record it in case we
29533 find a match with above condition. */
29534 else if (p_ver_ret
== NULL
29535 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
29541 /* Architecture has all features wanted. */
29542 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
29544 arm_feature_set added_fset
;
29546 /* Compute features added by this architecture over the one
29547 recorded in p_ver_ret. */
29548 if (p_ver_ret
!= NULL
)
29549 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
29551 /* First architecture that match incl. with extensions, or the
29552 only difference in features over the recorded match is
29553 features that were optional and are now mandatory. */
29554 if (p_ver_ret
== NULL
29555 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
29561 else if (p_ver_ret
== NULL
)
29563 arm_feature_set needed_ext_fset
;
29565 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
29567 /* Architecture has all features needed when using some
29568 extensions. Record it and continue searching in case there
29569 exist an architecture providing all needed features without
29570 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
29572 if (have_ext_for_needed_feat_p (&known_arch_fset
,
29579 if (p_ver_ret
== NULL
)
29583 /* Tag_CPU_arch_profile. */
29584 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
29585 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
29586 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
29587 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
29589 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
29591 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
29595 return p_ver_ret
->val
;
29598 /* Set the public EABI object attributes. */
29601 aeabi_set_public_attributes (void)
29603 char profile
= '\0';
29606 int fp16_optional
= 0;
29607 int skip_exact_match
= 0;
29608 arm_feature_set flags
, flags_arch
, flags_ext
;
29610 /* Autodetection mode, choose the architecture based the instructions
29612 if (no_cpu_selected ())
29614 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
29616 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
29617 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
29619 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
29620 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
29622 /* Code run during relaxation relies on selected_cpu being set. */
29623 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29624 flags_ext
= arm_arch_none
;
29625 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
29626 selected_ext
= flags_ext
;
29627 selected_cpu
= flags
;
29629 /* Otherwise, choose the architecture based on the capabilities of the
29633 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
29634 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
29635 flags_ext
= selected_ext
;
29636 flags
= selected_cpu
;
29638 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
29640 /* Allow the user to override the reported architecture. */
29641 if (!ARM_FEATURE_ZERO (selected_object_arch
))
29643 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
29644 flags_ext
= arm_arch_none
;
29647 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
29649 /* When this function is run again after relaxation has happened there is no
29650 way to determine whether an architecture or CPU was specified by the user:
29651 - selected_cpu is set above for relaxation to work;
29652 - march_cpu_opt is not set if only -mcpu or .cpu is used;
29653 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
29654 Therefore, if not in -march=all case we first try an exact match and fall
29655 back to autodetection. */
29656 if (!skip_exact_match
)
29657 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
29659 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
29661 as_bad (_("no architecture contains all the instructions used\n"));
29663 /* Tag_CPU_name. */
29664 if (selected_cpu_name
[0])
29668 q
= selected_cpu_name
;
29669 if (strncmp (q
, "armv", 4) == 0)
29674 for (i
= 0; q
[i
]; i
++)
29675 q
[i
] = TOUPPER (q
[i
]);
29677 aeabi_set_attribute_string (Tag_CPU_name
, q
);
29680 /* Tag_CPU_arch. */
29681 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
29683 /* Tag_CPU_arch_profile. */
29684 if (profile
!= '\0')
29685 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
29687 /* Tag_DSP_extension. */
29688 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
29689 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
29691 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
29692 /* Tag_ARM_ISA_use. */
29693 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
29694 || ARM_FEATURE_ZERO (flags_arch
))
29695 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
29697 /* Tag_THUMB_ISA_use. */
29698 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
29699 || ARM_FEATURE_ZERO (flags_arch
))
29703 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29704 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
29706 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
29710 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
29713 /* Tag_VFP_arch. */
29714 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
29715 aeabi_set_attribute_int (Tag_VFP_arch
,
29716 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29718 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
29719 aeabi_set_attribute_int (Tag_VFP_arch
,
29720 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
29722 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
29725 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
29727 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
29729 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
29732 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
29733 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
29734 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
29735 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
29736 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
29738 /* Tag_ABI_HardFP_use. */
29739 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
29740 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
29741 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
29743 /* Tag_WMMX_arch. */
29744 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
29745 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
29746 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
29747 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
29749 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
29750 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
29751 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
29752 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
29753 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
29754 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
29756 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
29758 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
29762 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
29767 if (ARM_CPU_HAS_FEATURE (flags
, mve_fp_ext
))
29768 aeabi_set_attribute_int (Tag_MVE_arch
, 2);
29769 else if (ARM_CPU_HAS_FEATURE (flags
, mve_ext
))
29770 aeabi_set_attribute_int (Tag_MVE_arch
, 1);
29772 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
29773 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
29774 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
29778 We set Tag_DIV_use to two when integer divide instructions have been used
29779 in ARM state, or when Thumb integer divide instructions have been used,
29780 but we have no architecture profile set, nor have we any ARM instructions.
29782 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
29783 by the base architecture.
29785 For new architectures we will have to check these tests. */
29786 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
29787 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
29788 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
29789 aeabi_set_attribute_int (Tag_DIV_use
, 0);
29790 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
29791 || (profile
== '\0'
29792 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
29793 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
29794 aeabi_set_attribute_int (Tag_DIV_use
, 2);
29796 /* Tag_MP_extension_use. */
29797 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
29798 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
29800 /* Tag Virtualization_use. */
29801 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
29803 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
29806 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
29809 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
29810 finished and free extension feature bits which will not be used anymore. */
29813 arm_md_post_relax (void)
29815 aeabi_set_public_attributes ();
29816 XDELETE (mcpu_ext_opt
);
29817 mcpu_ext_opt
= NULL
;
29818 XDELETE (march_ext_opt
);
29819 march_ext_opt
= NULL
;
29822 /* Add the default contents for the .ARM.attributes section. */
29827 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
29830 aeabi_set_public_attributes ();
29832 #endif /* OBJ_ELF */
29834 /* Parse a .cpu directive. */
29837 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
29839 const struct arm_cpu_option_table
*opt
;
29843 name
= input_line_pointer
;
29844 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29845 input_line_pointer
++;
29846 saved_char
= *input_line_pointer
;
29847 *input_line_pointer
= 0;
29849 /* Skip the first "all" entry. */
29850 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
29851 if (streq (opt
->name
, name
))
29853 selected_arch
= opt
->value
;
29854 selected_ext
= opt
->ext
;
29855 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29856 if (opt
->canonical_name
)
29857 strcpy (selected_cpu_name
, opt
->canonical_name
);
29861 for (i
= 0; opt
->name
[i
]; i
++)
29862 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
29864 selected_cpu_name
[i
] = 0;
29866 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29868 *input_line_pointer
= saved_char
;
29869 demand_empty_rest_of_line ();
29872 as_bad (_("unknown cpu `%s'"), name
);
29873 *input_line_pointer
= saved_char
;
29874 ignore_rest_of_line ();
29877 /* Parse a .arch directive. */
29880 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
29882 const struct arm_arch_option_table
*opt
;
29886 name
= input_line_pointer
;
29887 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29888 input_line_pointer
++;
29889 saved_char
= *input_line_pointer
;
29890 *input_line_pointer
= 0;
29892 /* Skip the first "all" entry. */
29893 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29894 if (streq (opt
->name
, name
))
29896 selected_arch
= opt
->value
;
29897 selected_ext
= arm_arch_none
;
29898 selected_cpu
= selected_arch
;
29899 strcpy (selected_cpu_name
, opt
->name
);
29900 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29901 *input_line_pointer
= saved_char
;
29902 demand_empty_rest_of_line ();
29906 as_bad (_("unknown architecture `%s'\n"), name
);
29907 *input_line_pointer
= saved_char
;
29908 ignore_rest_of_line ();
29911 /* Parse a .object_arch directive. */
29914 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
29916 const struct arm_arch_option_table
*opt
;
29920 name
= input_line_pointer
;
29921 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29922 input_line_pointer
++;
29923 saved_char
= *input_line_pointer
;
29924 *input_line_pointer
= 0;
29926 /* Skip the first "all" entry. */
29927 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
29928 if (streq (opt
->name
, name
))
29930 selected_object_arch
= opt
->value
;
29931 *input_line_pointer
= saved_char
;
29932 demand_empty_rest_of_line ();
29936 as_bad (_("unknown architecture `%s'\n"), name
);
29937 *input_line_pointer
= saved_char
;
29938 ignore_rest_of_line ();
29941 /* Parse a .arch_extension directive. */
29944 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
29946 const struct arm_option_extension_value_table
*opt
;
29949 int adding_value
= 1;
29951 name
= input_line_pointer
;
29952 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
29953 input_line_pointer
++;
29954 saved_char
= *input_line_pointer
;
29955 *input_line_pointer
= 0;
29957 if (strlen (name
) >= 2
29958 && strncmp (name
, "no", 2) == 0)
29964 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
29965 if (streq (opt
->name
, name
))
29967 int i
, nb_allowed_archs
=
29968 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
29969 for (i
= 0; i
< nb_allowed_archs
; i
++)
29972 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
29974 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
29978 if (i
== nb_allowed_archs
)
29980 as_bad (_("architectural extension `%s' is not allowed for the "
29981 "current base architecture"), name
);
29986 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
29989 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
29991 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
29992 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
29993 *input_line_pointer
= saved_char
;
29994 demand_empty_rest_of_line ();
29995 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
29996 on this return so that duplicate extensions (extensions with the
29997 same name as a previous extension in the list) are not considered
29998 for command-line parsing. */
30002 if (opt
->name
== NULL
)
30003 as_bad (_("unknown architecture extension `%s'\n"), name
);
30005 *input_line_pointer
= saved_char
;
30006 ignore_rest_of_line ();
30009 /* Parse a .fpu directive. */
30012 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
30014 const struct arm_option_fpu_value_table
*opt
;
30018 name
= input_line_pointer
;
30019 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
30020 input_line_pointer
++;
30021 saved_char
= *input_line_pointer
;
30022 *input_line_pointer
= 0;
30024 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
30025 if (streq (opt
->name
, name
))
30027 selected_fpu
= opt
->value
;
30028 #ifndef CPU_DEFAULT
30029 if (no_cpu_selected ())
30030 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
30033 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
30034 *input_line_pointer
= saved_char
;
30035 demand_empty_rest_of_line ();
30039 as_bad (_("unknown floating point format `%s'\n"), name
);
30040 *input_line_pointer
= saved_char
;
30041 ignore_rest_of_line ();
30044 /* Copy symbol information. */
30047 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
30049 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
30053 /* Given a symbolic attribute NAME, return the proper integer value.
30054 Returns -1 if the attribute is not known. */
30057 arm_convert_symbolic_attribute (const char *name
)
30059 static const struct
30064 attribute_table
[] =
30066 /* When you modify this table you should
30067 also modify the list in doc/c-arm.texi. */
30068 #define T(tag) {#tag, tag}
30069 T (Tag_CPU_raw_name
),
30072 T (Tag_CPU_arch_profile
),
30073 T (Tag_ARM_ISA_use
),
30074 T (Tag_THUMB_ISA_use
),
30078 T (Tag_Advanced_SIMD_arch
),
30079 T (Tag_PCS_config
),
30080 T (Tag_ABI_PCS_R9_use
),
30081 T (Tag_ABI_PCS_RW_data
),
30082 T (Tag_ABI_PCS_RO_data
),
30083 T (Tag_ABI_PCS_GOT_use
),
30084 T (Tag_ABI_PCS_wchar_t
),
30085 T (Tag_ABI_FP_rounding
),
30086 T (Tag_ABI_FP_denormal
),
30087 T (Tag_ABI_FP_exceptions
),
30088 T (Tag_ABI_FP_user_exceptions
),
30089 T (Tag_ABI_FP_number_model
),
30090 T (Tag_ABI_align_needed
),
30091 T (Tag_ABI_align8_needed
),
30092 T (Tag_ABI_align_preserved
),
30093 T (Tag_ABI_align8_preserved
),
30094 T (Tag_ABI_enum_size
),
30095 T (Tag_ABI_HardFP_use
),
30096 T (Tag_ABI_VFP_args
),
30097 T (Tag_ABI_WMMX_args
),
30098 T (Tag_ABI_optimization_goals
),
30099 T (Tag_ABI_FP_optimization_goals
),
30100 T (Tag_compatibility
),
30101 T (Tag_CPU_unaligned_access
),
30102 T (Tag_FP_HP_extension
),
30103 T (Tag_VFP_HP_extension
),
30104 T (Tag_ABI_FP_16bit_format
),
30105 T (Tag_MPextension_use
),
30107 T (Tag_nodefaults
),
30108 T (Tag_also_compatible_with
),
30109 T (Tag_conformance
),
30111 T (Tag_Virtualization_use
),
30112 T (Tag_DSP_extension
),
30114 /* We deliberately do not include Tag_MPextension_use_legacy. */
30122 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
30123 if (streq (name
, attribute_table
[i
].name
))
30124 return attribute_table
[i
].tag
;
30129 /* Apply sym value for relocations only in the case that they are for
30130 local symbols in the same segment as the fixup and you have the
30131 respective architectural feature for blx and simple switches. */
30134 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
30137 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
30138 /* PR 17444: If the local symbol is in a different section then a reloc
30139 will always be generated for it, so applying the symbol value now
30140 will result in a double offset being stored in the relocation. */
30141 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
30142 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
30144 switch (fixP
->fx_r_type
)
30146 case BFD_RELOC_ARM_PCREL_BLX
:
30147 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
30148 if (ARM_IS_FUNC (fixP
->fx_addsy
))
30152 case BFD_RELOC_ARM_PCREL_CALL
:
30153 case BFD_RELOC_THUMB_PCREL_BLX
:
30154 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
30165 #endif /* OBJ_ELF */