1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2019 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS
* table_entry
;
53 symbolS
* personality_routine
;
54 int personality_index
;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes
;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset
;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored
:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS
,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result
;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant
;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used
;
136 static arm_feature_set thumb_arch_used
;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26
= FALSE
;
140 static int atpcs
= FALSE
;
141 static int support_interwork
= FALSE
;
142 static int uses_apcs_float
= FALSE
;
143 static int pic_code
= FALSE
;
144 static int fix_v4bx
= FALSE
;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated
= TRUE
;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax
= FALSE
;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set
*legacy_cpu
= NULL
;
158 static const arm_feature_set
*legacy_fpu
= NULL
;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set
*mcpu_cpu_opt
= NULL
;
162 static arm_feature_set
*mcpu_ext_opt
= NULL
;
163 static const arm_feature_set
*mcpu_fpu_opt
= NULL
;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set
*march_cpu_opt
= NULL
;
167 static arm_feature_set
*march_ext_opt
= NULL
;
168 static const arm_feature_set
*march_fpu_opt
= NULL
;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set
*mfpu_opt
= NULL
;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default
= FPU_DEFAULT
;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V1
;
176 static const arm_feature_set fpu_arch_vfp_v2
= FPU_ARCH_VFP_V2
;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED
= FPU_ARCH_VFP_V3
;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED
= FPU_ARCH_NEON_V1
;
179 static const arm_feature_set fpu_arch_fpa
= FPU_ARCH_FPA
;
180 static const arm_feature_set fpu_any_hard
= FPU_ANY_HARD
;
182 static const arm_feature_set fpu_arch_maverick
= FPU_ARCH_MAVERICK
;
184 static const arm_feature_set fpu_endian_pure
= FPU_ARCH_ENDIAN_PURE
;
187 static const arm_feature_set cpu_default
= CPU_DEFAULT
;
190 static const arm_feature_set arm_ext_v1
= ARM_FEATURE_CORE_LOW (ARM_EXT_V1
);
191 static const arm_feature_set arm_ext_v2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2
);
192 static const arm_feature_set arm_ext_v2s
= ARM_FEATURE_CORE_LOW (ARM_EXT_V2S
);
193 static const arm_feature_set arm_ext_v3
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3
);
194 static const arm_feature_set arm_ext_v3m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V3M
);
195 static const arm_feature_set arm_ext_v4
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4
);
196 static const arm_feature_set arm_ext_v4t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
);
197 static const arm_feature_set arm_ext_v5
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5
);
198 static const arm_feature_set arm_ext_v4t_5
=
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T
| ARM_EXT_V5
);
200 static const arm_feature_set arm_ext_v5t
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5T
);
201 static const arm_feature_set arm_ext_v5e
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
);
202 static const arm_feature_set arm_ext_v5exp
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
);
203 static const arm_feature_set arm_ext_v5j
= ARM_FEATURE_CORE_LOW (ARM_EXT_V5J
);
204 static const arm_feature_set arm_ext_v6
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6
);
205 static const arm_feature_set arm_ext_v6k
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
);
206 static const arm_feature_set arm_ext_v6t2
= ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2
);
207 /* Only for compatability of hint instructions. */
208 static const arm_feature_set arm_ext_v6k_v6t2
=
209 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
| ARM_EXT_V6T2
);
210 static const arm_feature_set arm_ext_v6_notm
=
211 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM
);
212 static const arm_feature_set arm_ext_v6_dsp
=
213 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP
);
214 static const arm_feature_set arm_ext_barrier
=
215 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER
);
216 static const arm_feature_set arm_ext_msr
=
217 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR
);
218 static const arm_feature_set arm_ext_div
= ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
);
219 static const arm_feature_set arm_ext_v7
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7
);
220 static const arm_feature_set arm_ext_v7a
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
);
221 static const arm_feature_set arm_ext_v7r
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
);
223 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m
= ARM_FEATURE_CORE_LOW (ARM_EXT_V7M
);
225 static const arm_feature_set arm_ext_v8
= ARM_FEATURE_CORE_LOW (ARM_EXT_V8
);
226 static const arm_feature_set arm_ext_m
=
227 ARM_FEATURE_CORE (ARM_EXT_V6M
| ARM_EXT_V7M
,
228 ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
229 static const arm_feature_set arm_ext_mp
= ARM_FEATURE_CORE_LOW (ARM_EXT_MP
);
230 static const arm_feature_set arm_ext_sec
= ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
);
231 static const arm_feature_set arm_ext_os
= ARM_FEATURE_CORE_LOW (ARM_EXT_OS
);
232 static const arm_feature_set arm_ext_adiv
= ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
);
233 static const arm_feature_set arm_ext_virt
= ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
);
234 static const arm_feature_set arm_ext_pan
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
);
235 static const arm_feature_set arm_ext_v8m
= ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
);
236 static const arm_feature_set arm_ext_v8m_main
=
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN
);
238 static const arm_feature_set arm_ext_v8_1m_main
=
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN
);
240 /* Instructions in ARMv8-M only found in M profile architectures. */
241 static const arm_feature_set arm_ext_v8m_m_only
=
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M
| ARM_EXT2_V8M_MAIN
);
243 static const arm_feature_set arm_ext_v6t2_v8m
=
244 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M
);
245 /* Instructions shared between ARMv8-A and ARMv8-M. */
246 static const arm_feature_set arm_ext_atomics
=
247 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS
);
249 /* DSP instructions Tag_DSP_extension refers to. */
250 static const arm_feature_set arm_ext_dsp
=
251 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E
| ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
);
253 static const arm_feature_set arm_ext_ras
=
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
);
255 /* FP16 instructions. */
256 static const arm_feature_set arm_ext_fp16
=
257 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
);
258 static const arm_feature_set arm_ext_fp16_fml
=
259 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML
);
260 static const arm_feature_set arm_ext_v8_2
=
261 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A
);
262 static const arm_feature_set arm_ext_v8_3
=
263 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A
);
264 static const arm_feature_set arm_ext_sb
=
265 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
);
266 static const arm_feature_set arm_ext_predres
=
267 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
);
269 static const arm_feature_set arm_arch_any
= ARM_ANY
;
271 static const arm_feature_set fpu_any
= FPU_ANY
;
273 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED
= ARM_FEATURE (-1, -1, -1);
274 static const arm_feature_set arm_arch_t2
= ARM_ARCH_THUMB2
;
275 static const arm_feature_set arm_arch_none
= ARM_ARCH_NONE
;
277 static const arm_feature_set arm_cext_iwmmxt2
=
278 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
);
279 static const arm_feature_set arm_cext_iwmmxt
=
280 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
);
281 static const arm_feature_set arm_cext_xscale
=
282 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
);
283 static const arm_feature_set arm_cext_maverick
=
284 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
);
285 static const arm_feature_set fpu_fpa_ext_v1
=
286 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1
);
287 static const arm_feature_set fpu_fpa_ext_v2
=
288 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2
);
289 static const arm_feature_set fpu_vfp_ext_v1xd
=
290 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD
);
291 static const arm_feature_set fpu_vfp_ext_v1
=
292 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1
);
293 static const arm_feature_set fpu_vfp_ext_v2
=
294 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2
);
295 static const arm_feature_set fpu_vfp_ext_v3xd
=
296 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD
);
297 static const arm_feature_set fpu_vfp_ext_v3
=
298 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3
);
299 static const arm_feature_set fpu_vfp_ext_d32
=
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32
);
301 static const arm_feature_set fpu_neon_ext_v1
=
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
);
303 static const arm_feature_set fpu_vfp_v3_or_neon_ext
=
304 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_VFP_EXT_V3
);
305 static const arm_feature_set mve_ext
=
306 ARM_FEATURE_COPROC (FPU_MVE
);
307 static const arm_feature_set mve_fp_ext
=
308 ARM_FEATURE_COPROC (FPU_MVE_FP
);
310 static const arm_feature_set fpu_vfp_fp16
=
311 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16
);
312 static const arm_feature_set fpu_neon_ext_fma
=
313 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA
);
315 static const arm_feature_set fpu_vfp_ext_fma
=
316 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA
);
317 static const arm_feature_set fpu_vfp_ext_armv8
=
318 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8
);
319 static const arm_feature_set fpu_vfp_ext_armv8xd
=
320 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD
);
321 static const arm_feature_set fpu_neon_ext_armv8
=
322 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8
);
323 static const arm_feature_set fpu_crypto_ext_armv8
=
324 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8
);
325 static const arm_feature_set crc_ext_armv8
=
326 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
);
327 static const arm_feature_set fpu_neon_ext_v8_1
=
328 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA
);
329 static const arm_feature_set fpu_neon_ext_dotprod
=
330 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
);
332 static int mfloat_abi_opt
= -1;
333 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
335 static arm_feature_set selected_arch
= ARM_ARCH_NONE
;
336 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
338 static arm_feature_set selected_ext
= ARM_ARCH_NONE
;
339 /* Feature bits selected by the last -mcpu/-march or by the combination of the
340 last .cpu/.arch directive .arch_extension directives since that
342 static arm_feature_set selected_cpu
= ARM_ARCH_NONE
;
343 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
344 static arm_feature_set selected_fpu
= FPU_NONE
;
345 /* Feature bits selected by the last .object_arch directive. */
346 static arm_feature_set selected_object_arch
= ARM_ARCH_NONE
;
347 /* Must be long enough to hold any of the names in arm_cpus. */
348 static char selected_cpu_name
[20];
350 extern FLONUM_TYPE generic_floating_point_number
;
352 /* Return if no cpu was selected on command-line. */
354 no_cpu_selected (void)
356 return ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_none
);
361 static int meabi_flags
= EABI_DEFAULT
;
363 static int meabi_flags
= EF_ARM_EABI_UNKNOWN
;
366 static int attributes_set_explicitly
[NUM_KNOWN_OBJ_ATTRIBUTES
];
371 return (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
);
376 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
377 symbolS
* GOT_symbol
;
380 /* 0: assemble for ARM,
381 1: assemble for Thumb,
382 2: assemble for Thumb even though target CPU does not support thumb
384 static int thumb_mode
= 0;
385 /* A value distinct from the possible values for thumb_mode that we
386 can use to record whether thumb_mode has been copied into the
387 tc_frag_data field of a frag. */
388 #define MODE_RECORDED (1 << 4)
390 /* Specifies the intrinsic IT insn behavior mode. */
391 enum implicit_it_mode
393 IMPLICIT_IT_MODE_NEVER
= 0x00,
394 IMPLICIT_IT_MODE_ARM
= 0x01,
395 IMPLICIT_IT_MODE_THUMB
= 0x02,
396 IMPLICIT_IT_MODE_ALWAYS
= (IMPLICIT_IT_MODE_ARM
| IMPLICIT_IT_MODE_THUMB
)
398 static int implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
400 /* If unified_syntax is true, we are processing the new unified
401 ARM/Thumb syntax. Important differences from the old ARM mode:
403 - Immediate operands do not require a # prefix.
404 - Conditional affixes always appear at the end of the
405 instruction. (For backward compatibility, those instructions
406 that formerly had them in the middle, continue to accept them
408 - The IT instruction may appear, and if it does is validated
409 against subsequent conditional affixes. It does not generate
412 Important differences from the old Thumb mode:
414 - Immediate operands do not require a # prefix.
415 - Most of the V6T2 instructions are only available in unified mode.
416 - The .N and .W suffixes are recognized and honored (it is an error
417 if they cannot be honored).
418 - All instructions set the flags if and only if they have an 's' affix.
419 - Conditional affixes may be used. They are validated against
420 preceding IT instructions. Unlike ARM mode, you cannot use a
421 conditional affix except in the scope of an IT instruction. */
423 static bfd_boolean unified_syntax
= FALSE
;
425 /* An immediate operand can start with #, and ld*, st*, pld operands
426 can contain [ and ]. We need to tell APP not to elide whitespace
427 before a [, which can appear as the first operand for pld.
428 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
429 const char arm_symbol_chars
[] = "#[]{}";
444 enum neon_el_type type
;
448 #define NEON_MAX_TYPE_ELS 4
452 struct neon_type_el el
[NEON_MAX_TYPE_ELS
];
456 enum it_instruction_type
461 IF_INSIDE_IT_LAST_INSN
, /* Either outside or inside;
462 if inside, should be the last one. */
463 NEUTRAL_IT_INSN
, /* This could be either inside or outside,
464 i.e. BKPT and NOP. */
465 IT_INSN
/* The IT insn has been parsed. */
468 /* The maximum number of operands we need. */
469 #define ARM_IT_MAX_OPERANDS 6
470 #define ARM_IT_MAX_RELOCS 3
475 unsigned long instruction
;
479 /* "uncond_value" is set to the value in place of the conditional field in
480 unconditional versions of the instruction, or -1 if nothing is
483 struct neon_type vectype
;
484 /* This does not indicate an actual NEON instruction, only that
485 the mnemonic accepts neon-style type suffixes. */
487 /* Set to the opcode if the instruction needs relaxation.
488 Zero if the instruction is not relaxed. */
492 bfd_reloc_code_real_type type
;
495 } relocs
[ARM_IT_MAX_RELOCS
];
497 enum it_instruction_type it_insn_type
;
503 struct neon_type_el vectype
;
504 unsigned present
: 1; /* Operand present. */
505 unsigned isreg
: 1; /* Operand was a register. */
506 unsigned immisreg
: 1; /* .imm field is a second register. */
507 unsigned isscalar
: 1; /* Operand is a (Neon) scalar. */
508 unsigned immisalign
: 1; /* Immediate is an alignment specifier. */
509 unsigned immisfloat
: 1; /* Immediate was parsed as a float. */
510 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
511 instructions. This allows us to disambiguate ARM <-> vector insns. */
512 unsigned regisimm
: 1; /* 64-bit immediate, reg forms high 32 bits. */
513 unsigned isvec
: 1; /* Is a single, double or quad VFP/Neon reg. */
514 unsigned isquad
: 1; /* Operand is Neon quad-precision register. */
515 unsigned issingle
: 1; /* Operand is VFP single-precision register. */
516 unsigned hasreloc
: 1; /* Operand has relocation suffix. */
517 unsigned writeback
: 1; /* Operand has trailing ! */
518 unsigned preind
: 1; /* Preindexed address. */
519 unsigned postind
: 1; /* Postindexed address. */
520 unsigned negative
: 1; /* Index register was negated. */
521 unsigned shifted
: 1; /* Shift applied to operation. */
522 unsigned shift_kind
: 3; /* Shift operation (enum shift_kind). */
523 } operands
[ARM_IT_MAX_OPERANDS
];
526 static struct arm_it inst
;
528 #define NUM_FLOAT_VALS 8
530 const char * fp_const
[] =
532 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
535 LITTLENUM_TYPE fp_values
[NUM_FLOAT_VALS
][MAX_LITTLENUMS
];
545 #define CP_T_X 0x00008000
546 #define CP_T_Y 0x00400000
548 #define CONDS_BIT 0x00100000
549 #define LOAD_BIT 0x00100000
551 #define DOUBLE_LOAD_FLAG 0x00000001
555 const char * template_name
;
559 #define COND_ALWAYS 0xE
563 const char * template_name
;
567 struct asm_barrier_opt
569 const char * template_name
;
571 const arm_feature_set arch
;
574 /* The bit that distinguishes CPSR and SPSR. */
575 #define SPSR_BIT (1 << 22)
577 /* The individual PSR flag bits. */
578 #define PSR_c (1 << 16)
579 #define PSR_x (1 << 17)
580 #define PSR_s (1 << 18)
581 #define PSR_f (1 << 19)
586 bfd_reloc_code_real_type reloc
;
591 VFP_REG_Sd
, VFP_REG_Sm
, VFP_REG_Sn
,
592 VFP_REG_Dd
, VFP_REG_Dm
, VFP_REG_Dn
597 VFP_LDSTMIA
, VFP_LDSTMDB
, VFP_LDSTMIAX
, VFP_LDSTMDBX
600 /* Bits for DEFINED field in neon_typed_alias. */
601 #define NTA_HASTYPE 1
602 #define NTA_HASINDEX 2
604 struct neon_typed_alias
606 unsigned char defined
;
608 struct neon_type_el eltype
;
611 /* ARM register categories. This includes coprocessor numbers and various
612 architecture extensions' registers. Each entry should have an error message
613 in reg_expected_msgs below. */
641 /* Structure for a hash table entry for a register.
642 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
643 information which states whether a vector type or index is specified (for a
644 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
650 unsigned char builtin
;
651 struct neon_typed_alias
* neon
;
654 /* Diagnostics used when we don't get a register of the expected type. */
655 const char * const reg_expected_msgs
[] =
657 [REG_TYPE_RN
] = N_("ARM register expected"),
658 [REG_TYPE_CP
] = N_("bad or missing co-processor number"),
659 [REG_TYPE_CN
] = N_("co-processor register expected"),
660 [REG_TYPE_FN
] = N_("FPA register expected"),
661 [REG_TYPE_VFS
] = N_("VFP single precision register expected"),
662 [REG_TYPE_VFD
] = N_("VFP/Neon double precision register expected"),
663 [REG_TYPE_NQ
] = N_("Neon quad precision register expected"),
664 [REG_TYPE_VFSD
] = N_("VFP single or double precision register expected"),
665 [REG_TYPE_NDQ
] = N_("Neon double or quad precision register expected"),
666 [REG_TYPE_NSD
] = N_("Neon single or double precision register expected"),
667 [REG_TYPE_NSDQ
] = N_("VFP single, double or Neon quad precision register"
669 [REG_TYPE_VFC
] = N_("VFP system register expected"),
670 [REG_TYPE_MVF
] = N_("Maverick MVF register expected"),
671 [REG_TYPE_MVD
] = N_("Maverick MVD register expected"),
672 [REG_TYPE_MVFX
] = N_("Maverick MVFX register expected"),
673 [REG_TYPE_MVDX
] = N_("Maverick MVDX register expected"),
674 [REG_TYPE_MVAX
] = N_("Maverick MVAX register expected"),
675 [REG_TYPE_DSPSC
] = N_("Maverick DSPSC register expected"),
676 [REG_TYPE_MMXWR
] = N_("iWMMXt data register expected"),
677 [REG_TYPE_MMXWC
] = N_("iWMMXt control register expected"),
678 [REG_TYPE_MMXWCG
] = N_("iWMMXt scalar register expected"),
679 [REG_TYPE_XSCALE
] = N_("XScale accumulator register expected"),
680 [REG_TYPE_RNB
] = N_("")
683 /* Some well known registers that we refer to directly elsewhere. */
689 /* ARM instructions take 4bytes in the object file, Thumb instructions
695 /* Basic string to match. */
696 const char * template_name
;
698 /* Parameters to instruction. */
699 unsigned int operands
[8];
701 /* Conditional tag - see opcode_lookup. */
702 unsigned int tag
: 4;
704 /* Basic instruction code. */
705 unsigned int avalue
: 28;
707 /* Thumb-format instruction code. */
710 /* Which architecture variant provides this instruction. */
711 const arm_feature_set
* avariant
;
712 const arm_feature_set
* tvariant
;
714 /* Function to call to encode instruction in ARM format. */
715 void (* aencode
) (void);
717 /* Function to call to encode instruction in Thumb format. */
718 void (* tencode
) (void);
721 /* Defines for various bits that we will want to toggle. */
722 #define INST_IMMEDIATE 0x02000000
723 #define OFFSET_REG 0x02000000
724 #define HWOFFSET_IMM 0x00400000
725 #define SHIFT_BY_REG 0x00000010
726 #define PRE_INDEX 0x01000000
727 #define INDEX_UP 0x00800000
728 #define WRITE_BACK 0x00200000
729 #define LDM_TYPE_2_OR_3 0x00400000
730 #define CPSI_MMOD 0x00020000
732 #define LITERAL_MASK 0xf000f000
733 #define OPCODE_MASK 0xfe1fffff
734 #define V4_STR_BIT 0x00000020
735 #define VLDR_VMOV_SAME 0x0040f000
737 #define T2_SUBS_PC_LR 0xf3de8f00
739 #define DATA_OP_SHIFT 21
740 #define SBIT_SHIFT 20
742 #define T2_OPCODE_MASK 0xfe1fffff
743 #define T2_DATA_OP_SHIFT 21
744 #define T2_SBIT_SHIFT 20
746 #define A_COND_MASK 0xf0000000
747 #define A_PUSH_POP_OP_MASK 0x0fff0000
749 /* Opcodes for pushing/poping registers to/from the stack. */
750 #define A1_OPCODE_PUSH 0x092d0000
751 #define A2_OPCODE_PUSH 0x052d0004
752 #define A2_OPCODE_POP 0x049d0004
754 /* Codes to distinguish the arithmetic instructions. */
765 #define OPCODE_CMP 10
766 #define OPCODE_CMN 11
767 #define OPCODE_ORR 12
768 #define OPCODE_MOV 13
769 #define OPCODE_BIC 14
770 #define OPCODE_MVN 15
772 #define T2_OPCODE_AND 0
773 #define T2_OPCODE_BIC 1
774 #define T2_OPCODE_ORR 2
775 #define T2_OPCODE_ORN 3
776 #define T2_OPCODE_EOR 4
777 #define T2_OPCODE_ADD 8
778 #define T2_OPCODE_ADC 10
779 #define T2_OPCODE_SBC 11
780 #define T2_OPCODE_SUB 13
781 #define T2_OPCODE_RSB 14
783 #define T_OPCODE_MUL 0x4340
784 #define T_OPCODE_TST 0x4200
785 #define T_OPCODE_CMN 0x42c0
786 #define T_OPCODE_NEG 0x4240
787 #define T_OPCODE_MVN 0x43c0
789 #define T_OPCODE_ADD_R3 0x1800
790 #define T_OPCODE_SUB_R3 0x1a00
791 #define T_OPCODE_ADD_HI 0x4400
792 #define T_OPCODE_ADD_ST 0xb000
793 #define T_OPCODE_SUB_ST 0xb080
794 #define T_OPCODE_ADD_SP 0xa800
795 #define T_OPCODE_ADD_PC 0xa000
796 #define T_OPCODE_ADD_I8 0x3000
797 #define T_OPCODE_SUB_I8 0x3800
798 #define T_OPCODE_ADD_I3 0x1c00
799 #define T_OPCODE_SUB_I3 0x1e00
801 #define T_OPCODE_ASR_R 0x4100
802 #define T_OPCODE_LSL_R 0x4080
803 #define T_OPCODE_LSR_R 0x40c0
804 #define T_OPCODE_ROR_R 0x41c0
805 #define T_OPCODE_ASR_I 0x1000
806 #define T_OPCODE_LSL_I 0x0000
807 #define T_OPCODE_LSR_I 0x0800
809 #define T_OPCODE_MOV_I8 0x2000
810 #define T_OPCODE_CMP_I8 0x2800
811 #define T_OPCODE_CMP_LR 0x4280
812 #define T_OPCODE_MOV_HR 0x4600
813 #define T_OPCODE_CMP_HR 0x4500
815 #define T_OPCODE_LDR_PC 0x4800
816 #define T_OPCODE_LDR_SP 0x9800
817 #define T_OPCODE_STR_SP 0x9000
818 #define T_OPCODE_LDR_IW 0x6800
819 #define T_OPCODE_STR_IW 0x6000
820 #define T_OPCODE_LDR_IH 0x8800
821 #define T_OPCODE_STR_IH 0x8000
822 #define T_OPCODE_LDR_IB 0x7800
823 #define T_OPCODE_STR_IB 0x7000
824 #define T_OPCODE_LDR_RW 0x5800
825 #define T_OPCODE_STR_RW 0x5000
826 #define T_OPCODE_LDR_RH 0x5a00
827 #define T_OPCODE_STR_RH 0x5200
828 #define T_OPCODE_LDR_RB 0x5c00
829 #define T_OPCODE_STR_RB 0x5400
831 #define T_OPCODE_PUSH 0xb400
832 #define T_OPCODE_POP 0xbc00
834 #define T_OPCODE_BRANCH 0xe000
836 #define THUMB_SIZE 2 /* Size of thumb instruction. */
837 #define THUMB_PP_PC_LR 0x0100
838 #define THUMB_LOAD_BIT 0x0800
839 #define THUMB2_LOAD_BIT 0x00100000
841 #define BAD_ARGS _("bad arguments to instruction")
842 #define BAD_SP _("r13 not allowed here")
843 #define BAD_PC _("r15 not allowed here")
844 #define BAD_COND _("instruction cannot be conditional")
845 #define BAD_OVERLAP _("registers may not be the same")
846 #define BAD_HIREG _("lo register required")
847 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
848 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
849 #define BAD_BRANCH _("branch must be last instruction in IT block")
850 #define BAD_BRANCH_OFF _("branch out of range or not a multiple of 2")
851 #define BAD_NOT_IT _("instruction not allowed in IT block")
852 #define BAD_FPU _("selected FPU does not support instruction")
853 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
854 #define BAD_IT_COND _("incorrect condition in IT block")
855 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
856 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
857 #define BAD_PC_ADDRESSING \
858 _("cannot use register index with PC-relative addressing")
859 #define BAD_PC_WRITEBACK \
860 _("cannot use writeback with PC-relative addressing")
861 #define BAD_RANGE _("branch out of range")
862 #define BAD_FP16 _("selected processor does not support fp16 instruction")
863 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
864 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
866 static struct hash_control
* arm_ops_hsh
;
867 static struct hash_control
* arm_cond_hsh
;
868 static struct hash_control
* arm_shift_hsh
;
869 static struct hash_control
* arm_psr_hsh
;
870 static struct hash_control
* arm_v7m_psr_hsh
;
871 static struct hash_control
* arm_reg_hsh
;
872 static struct hash_control
* arm_reloc_hsh
;
873 static struct hash_control
* arm_barrier_opt_hsh
;
875 /* Stuff needed to resolve the label ambiguity
884 symbolS
* last_label_seen
;
885 static int label_is_thumb_function_name
= FALSE
;
887 /* Literal pool structure. Held on a per-section
888 and per-sub-section basis. */
890 #define MAX_LITERAL_POOL_SIZE 1024
891 typedef struct literal_pool
893 expressionS literals
[MAX_LITERAL_POOL_SIZE
];
894 unsigned int next_free_entry
;
900 struct dwarf2_line_info locs
[MAX_LITERAL_POOL_SIZE
];
902 struct literal_pool
* next
;
903 unsigned int alignment
;
906 /* Pointer to a linked list of literal pools. */
907 literal_pool
* list_of_pools
= NULL
;
909 typedef enum asmfunc_states
912 WAITING_ASMFUNC_NAME
,
916 static asmfunc_states asmfunc_state
= OUTSIDE_ASMFUNC
;
919 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
921 static struct current_it now_it
;
925 now_it_compatible (int cond
)
927 return (cond
& ~1) == (now_it
.cc
& ~1);
931 conditional_insn (void)
933 return inst
.cond
!= COND_ALWAYS
;
936 static int in_it_block (void);
938 static int handle_it_state (void);
940 static void force_automatic_it_block_close (void);
942 static void it_fsm_post_encode (void);
944 #define set_it_insn_type(type) \
947 inst.it_insn_type = type; \
948 if (handle_it_state () == FAIL) \
953 #define set_it_insn_type_nonvoid(type, failret) \
956 inst.it_insn_type = type; \
957 if (handle_it_state () == FAIL) \
962 #define set_it_insn_type_last() \
965 if (inst.cond == COND_ALWAYS) \
966 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
968 set_it_insn_type (INSIDE_IT_LAST_INSN); \
974 /* This array holds the chars that always start a comment. If the
975 pre-processor is disabled, these aren't very useful. */
976 char arm_comment_chars
[] = "@";
978 /* This array holds the chars that only start a comment at the beginning of
979 a line. If the line seems to have the form '# 123 filename'
980 .line and .file directives will appear in the pre-processed output. */
981 /* Note that input_file.c hand checks for '#' at the beginning of the
982 first line of the input file. This is because the compiler outputs
983 #NO_APP at the beginning of its output. */
984 /* Also note that comments like this one will always work. */
985 const char line_comment_chars
[] = "#";
987 char arm_line_separator_chars
[] = ";";
989 /* Chars that can be used to separate mant
990 from exp in floating point numbers. */
991 const char EXP_CHARS
[] = "eE";
993 /* Chars that mean this number is a floating point constant. */
997 const char FLT_CHARS
[] = "rRsSfFdDxXeEpP";
999 /* Prefix characters that indicate the start of an immediate
1001 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1003 /* Separator character handling. */
1005 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
1008 skip_past_char (char ** str
, char c
)
1010 /* PR gas/14987: Allow for whitespace before the expected character. */
1011 skip_whitespace (*str
);
1022 #define skip_past_comma(str) skip_past_char (str, ',')
1024 /* Arithmetic expressions (possibly involving symbols). */
1026 /* Return TRUE if anything in the expression is a bignum. */
1029 walk_no_bignums (symbolS
* sp
)
1031 if (symbol_get_value_expression (sp
)->X_op
== O_big
)
1034 if (symbol_get_value_expression (sp
)->X_add_symbol
)
1036 return (walk_no_bignums (symbol_get_value_expression (sp
)->X_add_symbol
)
1037 || (symbol_get_value_expression (sp
)->X_op_symbol
1038 && walk_no_bignums (symbol_get_value_expression (sp
)->X_op_symbol
)));
1044 static bfd_boolean in_my_get_expression
= FALSE
;
1046 /* Third argument to my_get_expression. */
1047 #define GE_NO_PREFIX 0
1048 #define GE_IMM_PREFIX 1
1049 #define GE_OPT_PREFIX 2
1050 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1051 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1052 #define GE_OPT_PREFIX_BIG 3
1055 my_get_expression (expressionS
* ep
, char ** str
, int prefix_mode
)
1059 /* In unified syntax, all prefixes are optional. */
1061 prefix_mode
= (prefix_mode
== GE_OPT_PREFIX_BIG
) ? prefix_mode
1064 switch (prefix_mode
)
1066 case GE_NO_PREFIX
: break;
1068 if (!is_immediate_prefix (**str
))
1070 inst
.error
= _("immediate expression requires a # prefix");
1076 case GE_OPT_PREFIX_BIG
:
1077 if (is_immediate_prefix (**str
))
1084 memset (ep
, 0, sizeof (expressionS
));
1086 save_in
= input_line_pointer
;
1087 input_line_pointer
= *str
;
1088 in_my_get_expression
= TRUE
;
1090 in_my_get_expression
= FALSE
;
1092 if (ep
->X_op
== O_illegal
|| ep
->X_op
== O_absent
)
1094 /* We found a bad or missing expression in md_operand(). */
1095 *str
= input_line_pointer
;
1096 input_line_pointer
= save_in
;
1097 if (inst
.error
== NULL
)
1098 inst
.error
= (ep
->X_op
== O_absent
1099 ? _("missing expression") :_("bad expression"));
1103 /* Get rid of any bignums now, so that we don't generate an error for which
1104 we can't establish a line number later on. Big numbers are never valid
1105 in instructions, which is where this routine is always called. */
1106 if (prefix_mode
!= GE_OPT_PREFIX_BIG
1107 && (ep
->X_op
== O_big
1108 || (ep
->X_add_symbol
1109 && (walk_no_bignums (ep
->X_add_symbol
)
1111 && walk_no_bignums (ep
->X_op_symbol
))))))
1113 inst
.error
= _("invalid constant");
1114 *str
= input_line_pointer
;
1115 input_line_pointer
= save_in
;
1119 *str
= input_line_pointer
;
1120 input_line_pointer
= save_in
;
1124 /* Turn a string in input_line_pointer into a floating point constant
1125 of type TYPE, and store the appropriate bytes in *LITP. The number
1126 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1127 returned, or NULL on OK.
1129 Note that fp constants aren't represent in the normal way on the ARM.
1130 In big endian mode, things are as expected. However, in little endian
1131 mode fp constants are big-endian word-wise, and little-endian byte-wise
1132 within the words. For example, (double) 1.1 in big endian mode is
1133 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1134 the byte sequence 99 99 f1 3f 9a 99 99 99.
1136 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1139 md_atof (int type
, char * litP
, int * sizeP
)
1142 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
1174 return _("Unrecognized or unsupported floating point constant");
1177 t
= atof_ieee (input_line_pointer
, type
, words
);
1179 input_line_pointer
= t
;
1180 *sizeP
= prec
* sizeof (LITTLENUM_TYPE
);
1182 if (target_big_endian
)
1184 for (i
= 0; i
< prec
; i
++)
1186 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1187 litP
+= sizeof (LITTLENUM_TYPE
);
1192 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
1193 for (i
= prec
- 1; i
>= 0; i
--)
1195 md_number_to_chars (litP
, (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1196 litP
+= sizeof (LITTLENUM_TYPE
);
1199 /* For a 4 byte float the order of elements in `words' is 1 0.
1200 For an 8 byte float the order is 1 0 3 2. */
1201 for (i
= 0; i
< prec
; i
+= 2)
1203 md_number_to_chars (litP
, (valueT
) words
[i
+ 1],
1204 sizeof (LITTLENUM_TYPE
));
1205 md_number_to_chars (litP
+ sizeof (LITTLENUM_TYPE
),
1206 (valueT
) words
[i
], sizeof (LITTLENUM_TYPE
));
1207 litP
+= 2 * sizeof (LITTLENUM_TYPE
);
1214 /* We handle all bad expressions here, so that we can report the faulty
1215 instruction in the error message. */
1218 md_operand (expressionS
* exp
)
1220 if (in_my_get_expression
)
1221 exp
->X_op
= O_illegal
;
1224 /* Immediate values. */
1227 /* Generic immediate-value read function for use in directives.
1228 Accepts anything that 'expression' can fold to a constant.
1229 *val receives the number. */
1232 immediate_for_directive (int *val
)
1235 exp
.X_op
= O_illegal
;
1237 if (is_immediate_prefix (*input_line_pointer
))
1239 input_line_pointer
++;
1243 if (exp
.X_op
!= O_constant
)
1245 as_bad (_("expected #constant"));
1246 ignore_rest_of_line ();
1249 *val
= exp
.X_add_number
;
1254 /* Register parsing. */
1256 /* Generic register parser. CCP points to what should be the
1257 beginning of a register name. If it is indeed a valid register
1258 name, advance CCP over it and return the reg_entry structure;
1259 otherwise return NULL. Does not issue diagnostics. */
1261 static struct reg_entry
*
1262 arm_reg_parse_multi (char **ccp
)
1266 struct reg_entry
*reg
;
1268 skip_whitespace (start
);
1270 #ifdef REGISTER_PREFIX
1271 if (*start
!= REGISTER_PREFIX
)
1275 #ifdef OPTIONAL_REGISTER_PREFIX
1276 if (*start
== OPTIONAL_REGISTER_PREFIX
)
1281 if (!ISALPHA (*p
) || !is_name_beginner (*p
))
1286 while (ISALPHA (*p
) || ISDIGIT (*p
) || *p
== '_');
1288 reg
= (struct reg_entry
*) hash_find_n (arm_reg_hsh
, start
, p
- start
);
1298 arm_reg_alt_syntax (char **ccp
, char *start
, struct reg_entry
*reg
,
1299 enum arm_reg_type type
)
1301 /* Alternative syntaxes are accepted for a few register classes. */
1308 /* Generic coprocessor register names are allowed for these. */
1309 if (reg
&& reg
->type
== REG_TYPE_CN
)
1314 /* For backward compatibility, a bare number is valid here. */
1316 unsigned long processor
= strtoul (start
, ccp
, 10);
1317 if (*ccp
!= start
&& processor
<= 15)
1322 case REG_TYPE_MMXWC
:
1323 /* WC includes WCG. ??? I'm not sure this is true for all
1324 instructions that take WC registers. */
1325 if (reg
&& reg
->type
== REG_TYPE_MMXWCG
)
1336 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1337 return value is the register number or FAIL. */
1340 arm_reg_parse (char **ccp
, enum arm_reg_type type
)
1343 struct reg_entry
*reg
= arm_reg_parse_multi (ccp
);
1346 /* Do not allow a scalar (reg+index) to parse as a register. */
1347 if (reg
&& reg
->neon
&& (reg
->neon
->defined
& NTA_HASINDEX
))
1350 if (reg
&& reg
->type
== type
)
1353 if ((ret
= arm_reg_alt_syntax (ccp
, start
, reg
, type
)) != FAIL
)
1360 /* Parse a Neon type specifier. *STR should point at the leading '.'
1361 character. Does no verification at this stage that the type fits the opcode
1368 Can all be legally parsed by this function.
1370 Fills in neon_type struct pointer with parsed information, and updates STR
1371 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1372 type, FAIL if not. */
1375 parse_neon_type (struct neon_type
*type
, char **str
)
1382 while (type
->elems
< NEON_MAX_TYPE_ELS
)
1384 enum neon_el_type thistype
= NT_untyped
;
1385 unsigned thissize
= -1u;
1392 /* Just a size without an explicit type. */
1396 switch (TOLOWER (*ptr
))
1398 case 'i': thistype
= NT_integer
; break;
1399 case 'f': thistype
= NT_float
; break;
1400 case 'p': thistype
= NT_poly
; break;
1401 case 's': thistype
= NT_signed
; break;
1402 case 'u': thistype
= NT_unsigned
; break;
1404 thistype
= NT_float
;
1409 as_bad (_("unexpected character `%c' in type specifier"), *ptr
);
1415 /* .f is an abbreviation for .f32. */
1416 if (thistype
== NT_float
&& !ISDIGIT (*ptr
))
1421 thissize
= strtoul (ptr
, &ptr
, 10);
1423 if (thissize
!= 8 && thissize
!= 16 && thissize
!= 32
1426 as_bad (_("bad size %d in type specifier"), thissize
);
1434 type
->el
[type
->elems
].type
= thistype
;
1435 type
->el
[type
->elems
].size
= thissize
;
1440 /* Empty/missing type is not a successful parse. */
1441 if (type
->elems
== 0)
1449 /* Errors may be set multiple times during parsing or bit encoding
1450 (particularly in the Neon bits), but usually the earliest error which is set
1451 will be the most meaningful. Avoid overwriting it with later (cascading)
1452 errors by calling this function. */
1455 first_error (const char *err
)
1461 /* Parse a single type, e.g. ".s32", leading period included. */
1463 parse_neon_operand_type (struct neon_type_el
*vectype
, char **ccp
)
1466 struct neon_type optype
;
1470 if (parse_neon_type (&optype
, &str
) == SUCCESS
)
1472 if (optype
.elems
== 1)
1473 *vectype
= optype
.el
[0];
1476 first_error (_("only one type should be specified for operand"));
1482 first_error (_("vector type expected"));
1494 /* Special meanings for indices (which have a range of 0-7), which will fit into
1497 #define NEON_ALL_LANES 15
1498 #define NEON_INTERLEAVE_LANES 14
1500 /* Parse either a register or a scalar, with an optional type. Return the
1501 register number, and optionally fill in the actual type of the register
1502 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1503 type/index information in *TYPEINFO. */
1506 parse_typed_reg_or_scalar (char **ccp
, enum arm_reg_type type
,
1507 enum arm_reg_type
*rtype
,
1508 struct neon_typed_alias
*typeinfo
)
1511 struct reg_entry
*reg
= arm_reg_parse_multi (&str
);
1512 struct neon_typed_alias atype
;
1513 struct neon_type_el parsetype
;
1517 atype
.eltype
.type
= NT_invtype
;
1518 atype
.eltype
.size
= -1;
1520 /* Try alternate syntax for some types of register. Note these are mutually
1521 exclusive with the Neon syntax extensions. */
1524 int altreg
= arm_reg_alt_syntax (&str
, *ccp
, reg
, type
);
1532 /* Undo polymorphism when a set of register types may be accepted. */
1533 if ((type
== REG_TYPE_NDQ
1534 && (reg
->type
== REG_TYPE_NQ
|| reg
->type
== REG_TYPE_VFD
))
1535 || (type
== REG_TYPE_VFSD
1536 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1537 || (type
== REG_TYPE_NSDQ
1538 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
1539 || reg
->type
== REG_TYPE_NQ
))
1540 || (type
== REG_TYPE_NSD
1541 && (reg
->type
== REG_TYPE_VFS
|| reg
->type
== REG_TYPE_VFD
))
1542 || (type
== REG_TYPE_MMXWC
1543 && (reg
->type
== REG_TYPE_MMXWCG
)))
1544 type
= (enum arm_reg_type
) reg
->type
;
1546 if (type
!= reg
->type
)
1552 if (parse_neon_operand_type (&parsetype
, &str
) == SUCCESS
)
1554 if ((atype
.defined
& NTA_HASTYPE
) != 0)
1556 first_error (_("can't redefine type for operand"));
1559 atype
.defined
|= NTA_HASTYPE
;
1560 atype
.eltype
= parsetype
;
1563 if (skip_past_char (&str
, '[') == SUCCESS
)
1565 if (type
!= REG_TYPE_VFD
1566 && !(type
== REG_TYPE_VFS
1567 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8_2
)))
1569 first_error (_("only D registers may be indexed"));
1573 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1575 first_error (_("can't change index for operand"));
1579 atype
.defined
|= NTA_HASINDEX
;
1581 if (skip_past_char (&str
, ']') == SUCCESS
)
1582 atype
.index
= NEON_ALL_LANES
;
1587 my_get_expression (&exp
, &str
, GE_NO_PREFIX
);
1589 if (exp
.X_op
!= O_constant
)
1591 first_error (_("constant expression required"));
1595 if (skip_past_char (&str
, ']') == FAIL
)
1598 atype
.index
= exp
.X_add_number
;
1613 /* Like arm_reg_parse, but also allow the following extra features:
1614 - If RTYPE is non-zero, return the (possibly restricted) type of the
1615 register (e.g. Neon double or quad reg when either has been requested).
1616 - If this is a Neon vector type with additional type information, fill
1617 in the struct pointed to by VECTYPE (if non-NULL).
1618 This function will fault on encountering a scalar. */
1621 arm_typed_reg_parse (char **ccp
, enum arm_reg_type type
,
1622 enum arm_reg_type
*rtype
, struct neon_type_el
*vectype
)
1624 struct neon_typed_alias atype
;
1626 int reg
= parse_typed_reg_or_scalar (&str
, type
, rtype
, &atype
);
1631 /* Do not allow regname(... to parse as a register. */
1635 /* Do not allow a scalar (reg+index) to parse as a register. */
1636 if ((atype
.defined
& NTA_HASINDEX
) != 0)
1638 first_error (_("register operand expected, but got scalar"));
1643 *vectype
= atype
.eltype
;
1650 #define NEON_SCALAR_REG(X) ((X) >> 4)
1651 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1653 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1654 have enough information to be able to do a good job bounds-checking. So, we
1655 just do easy checks here, and do further checks later. */
1658 parse_scalar (char **ccp
, int elsize
, struct neon_type_el
*type
)
1662 struct neon_typed_alias atype
;
1663 enum arm_reg_type reg_type
= REG_TYPE_VFD
;
1666 reg_type
= REG_TYPE_VFS
;
1668 reg
= parse_typed_reg_or_scalar (&str
, reg_type
, NULL
, &atype
);
1670 if (reg
== FAIL
|| (atype
.defined
& NTA_HASINDEX
) == 0)
1673 if (atype
.index
== NEON_ALL_LANES
)
1675 first_error (_("scalar must have an index"));
1678 else if (atype
.index
>= 64 / elsize
)
1680 first_error (_("scalar index out of range"));
1685 *type
= atype
.eltype
;
1689 return reg
* 16 + atype
.index
;
1692 /* Types of registers in a list. */
1705 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1708 parse_reg_list (char ** strp
, enum reg_list_els etype
)
1714 gas_assert (etype
== REGLIST_RN
|| etype
== REGLIST_CLRM
);
1716 /* We come back here if we get ranges concatenated by '+' or '|'. */
1719 skip_whitespace (str
);
1732 const char apsr_str
[] = "apsr";
1733 int apsr_str_len
= strlen (apsr_str
);
1735 reg
= arm_reg_parse (&str
, REGLIST_RN
);
1736 if (etype
== REGLIST_CLRM
)
1738 if (reg
== REG_SP
|| reg
== REG_PC
)
1740 else if (reg
== FAIL
1741 && !strncasecmp (str
, apsr_str
, apsr_str_len
)
1742 && !ISALPHA (*(str
+ apsr_str_len
)))
1745 str
+= apsr_str_len
;
1750 first_error (_("r0-r12, lr or APSR expected"));
1754 else /* etype == REGLIST_RN. */
1758 first_error (_(reg_expected_msgs
[REGLIST_RN
]));
1769 first_error (_("bad range in register list"));
1773 for (i
= cur_reg
+ 1; i
< reg
; i
++)
1775 if (range
& (1 << i
))
1777 (_("Warning: duplicated register (r%d) in register list"),
1785 if (range
& (1 << reg
))
1786 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1788 else if (reg
<= cur_reg
)
1789 as_tsktsk (_("Warning: register range not in ascending order"));
1794 while (skip_past_comma (&str
) != FAIL
1795 || (in_range
= 1, *str
++ == '-'));
1798 if (skip_past_char (&str
, '}') == FAIL
)
1800 first_error (_("missing `}'"));
1804 else if (etype
== REGLIST_RN
)
1808 if (my_get_expression (&exp
, &str
, GE_NO_PREFIX
))
1811 if (exp
.X_op
== O_constant
)
1813 if (exp
.X_add_number
1814 != (exp
.X_add_number
& 0x0000ffff))
1816 inst
.error
= _("invalid register mask");
1820 if ((range
& exp
.X_add_number
) != 0)
1822 int regno
= range
& exp
.X_add_number
;
1825 regno
= (1 << regno
) - 1;
1827 (_("Warning: duplicated register (r%d) in register list"),
1831 range
|= exp
.X_add_number
;
1835 if (inst
.relocs
[0].type
!= 0)
1837 inst
.error
= _("expression too complex");
1841 memcpy (&inst
.relocs
[0].exp
, &exp
, sizeof (expressionS
));
1842 inst
.relocs
[0].type
= BFD_RELOC_ARM_MULTI
;
1843 inst
.relocs
[0].pc_rel
= 0;
1847 if (*str
== '|' || *str
== '+')
1853 while (another_range
);
1859 /* Parse a VFP register list. If the string is invalid return FAIL.
1860 Otherwise return the number of registers, and set PBASE to the first
1861 register. Parses registers of type ETYPE.
1862 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1863 - Q registers can be used to specify pairs of D registers
1864 - { } can be omitted from around a singleton register list
1865 FIXME: This is not implemented, as it would require backtracking in
1868 This could be done (the meaning isn't really ambiguous), but doesn't
1869 fit in well with the current parsing framework.
1870 - 32 D registers may be used (also true for VFPv3).
1871 FIXME: Types are ignored in these register lists, which is probably a
1875 parse_vfp_reg_list (char **ccp
, unsigned int *pbase
, enum reg_list_els etype
,
1876 bfd_boolean
*partial_match
)
1881 enum arm_reg_type regtype
= (enum arm_reg_type
) 0;
1885 unsigned long mask
= 0;
1887 bfd_boolean vpr_seen
= FALSE
;
1888 bfd_boolean expect_vpr
=
1889 (etype
== REGLIST_VFP_S_VPR
) || (etype
== REGLIST_VFP_D_VPR
);
1891 if (skip_past_char (&str
, '{') == FAIL
)
1893 inst
.error
= _("expecting {");
1900 case REGLIST_VFP_S_VPR
:
1901 regtype
= REG_TYPE_VFS
;
1906 case REGLIST_VFP_D_VPR
:
1907 regtype
= REG_TYPE_VFD
;
1910 case REGLIST_NEON_D
:
1911 regtype
= REG_TYPE_NDQ
;
1918 if (etype
!= REGLIST_VFP_S
&& etype
!= REGLIST_VFP_S_VPR
)
1920 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1921 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
1925 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
1928 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
1935 base_reg
= max_regs
;
1936 *partial_match
= FALSE
;
1940 int setmask
= 1, addregs
= 1;
1941 const char vpr_str
[] = "vpr";
1942 int vpr_str_len
= strlen (vpr_str
);
1944 new_base
= arm_typed_reg_parse (&str
, regtype
, ®type
, NULL
);
1948 if (new_base
== FAIL
1949 && !strncasecmp (str
, vpr_str
, vpr_str_len
)
1950 && !ISALPHA (*(str
+ vpr_str_len
))
1956 base_reg
= 0; /* Canonicalize VPR only on d0 with 0 regs. */
1960 first_error (_("VPR expected last"));
1963 else if (new_base
== FAIL
)
1965 if (regtype
== REG_TYPE_VFS
)
1966 first_error (_("VFP single precision register or VPR "
1968 else /* regtype == REG_TYPE_VFD. */
1969 first_error (_("VFP/Neon double precision register or VPR "
1974 else if (new_base
== FAIL
)
1976 first_error (_(reg_expected_msgs
[regtype
]));
1980 *partial_match
= TRUE
;
1984 if (new_base
>= max_regs
)
1986 first_error (_("register out of range in list"));
1990 /* Note: a value of 2 * n is returned for the register Q<n>. */
1991 if (regtype
== REG_TYPE_NQ
)
1997 if (new_base
< base_reg
)
1998 base_reg
= new_base
;
2000 if (mask
& (setmask
<< new_base
))
2002 first_error (_("invalid register list"));
2006 if ((mask
>> new_base
) != 0 && ! warned
&& !vpr_seen
)
2008 as_tsktsk (_("register list not in ascending order"));
2012 mask
|= setmask
<< new_base
;
2015 if (*str
== '-') /* We have the start of a range expression */
2021 if ((high_range
= arm_typed_reg_parse (&str
, regtype
, NULL
, NULL
))
2024 inst
.error
= gettext (reg_expected_msgs
[regtype
]);
2028 if (high_range
>= max_regs
)
2030 first_error (_("register out of range in list"));
2034 if (regtype
== REG_TYPE_NQ
)
2035 high_range
= high_range
+ 1;
2037 if (high_range
<= new_base
)
2039 inst
.error
= _("register range not in ascending order");
2043 for (new_base
+= addregs
; new_base
<= high_range
; new_base
+= addregs
)
2045 if (mask
& (setmask
<< new_base
))
2047 inst
.error
= _("invalid register list");
2051 mask
|= setmask
<< new_base
;
2056 while (skip_past_comma (&str
) != FAIL
);
2060 /* Sanity check -- should have raised a parse error above. */
2061 if ((!vpr_seen
&& count
== 0) || count
> max_regs
)
2066 if (expect_vpr
&& !vpr_seen
)
2068 first_error (_("VPR expected last"));
2072 /* Final test -- the registers must be consecutive. */
2074 for (i
= 0; i
< count
; i
++)
2076 if ((mask
& (1u << i
)) == 0)
2078 inst
.error
= _("non-contiguous register range");
2088 /* True if two alias types are the same. */
2091 neon_alias_types_same (struct neon_typed_alias
*a
, struct neon_typed_alias
*b
)
2099 if (a
->defined
!= b
->defined
)
2102 if ((a
->defined
& NTA_HASTYPE
) != 0
2103 && (a
->eltype
.type
!= b
->eltype
.type
2104 || a
->eltype
.size
!= b
->eltype
.size
))
2107 if ((a
->defined
& NTA_HASINDEX
) != 0
2108 && (a
->index
!= b
->index
))
2114 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2115 The base register is put in *PBASE.
2116 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2118 The register stride (minus one) is put in bit 4 of the return value.
2119 Bits [6:5] encode the list length (minus one).
2120 The type of the list elements is put in *ELTYPE, if non-NULL. */
2122 #define NEON_LANE(X) ((X) & 0xf)
2123 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2124 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2127 parse_neon_el_struct_list (char **str
, unsigned *pbase
,
2128 struct neon_type_el
*eltype
)
2135 int leading_brace
= 0;
2136 enum arm_reg_type rtype
= REG_TYPE_NDQ
;
2137 const char *const incr_error
= _("register stride must be 1 or 2");
2138 const char *const type_error
= _("mismatched element/structure types in list");
2139 struct neon_typed_alias firsttype
;
2140 firsttype
.defined
= 0;
2141 firsttype
.eltype
.type
= NT_invtype
;
2142 firsttype
.eltype
.size
= -1;
2143 firsttype
.index
= -1;
2145 if (skip_past_char (&ptr
, '{') == SUCCESS
)
2150 struct neon_typed_alias atype
;
2151 int getreg
= parse_typed_reg_or_scalar (&ptr
, rtype
, &rtype
, &atype
);
2155 first_error (_(reg_expected_msgs
[rtype
]));
2162 if (rtype
== REG_TYPE_NQ
)
2168 else if (reg_incr
== -1)
2170 reg_incr
= getreg
- base_reg
;
2171 if (reg_incr
< 1 || reg_incr
> 2)
2173 first_error (_(incr_error
));
2177 else if (getreg
!= base_reg
+ reg_incr
* count
)
2179 first_error (_(incr_error
));
2183 if (! neon_alias_types_same (&atype
, &firsttype
))
2185 first_error (_(type_error
));
2189 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2193 struct neon_typed_alias htype
;
2194 int hireg
, dregs
= (rtype
== REG_TYPE_NQ
) ? 2 : 1;
2196 lane
= NEON_INTERLEAVE_LANES
;
2197 else if (lane
!= NEON_INTERLEAVE_LANES
)
2199 first_error (_(type_error
));
2204 else if (reg_incr
!= 1)
2206 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2210 hireg
= parse_typed_reg_or_scalar (&ptr
, rtype
, NULL
, &htype
);
2213 first_error (_(reg_expected_msgs
[rtype
]));
2216 if (! neon_alias_types_same (&htype
, &firsttype
))
2218 first_error (_(type_error
));
2221 count
+= hireg
+ dregs
- getreg
;
2225 /* If we're using Q registers, we can't use [] or [n] syntax. */
2226 if (rtype
== REG_TYPE_NQ
)
2232 if ((atype
.defined
& NTA_HASINDEX
) != 0)
2236 else if (lane
!= atype
.index
)
2238 first_error (_(type_error
));
2242 else if (lane
== -1)
2243 lane
= NEON_INTERLEAVE_LANES
;
2244 else if (lane
!= NEON_INTERLEAVE_LANES
)
2246 first_error (_(type_error
));
2251 while ((count
!= 1 || leading_brace
) && skip_past_comma (&ptr
) != FAIL
);
2253 /* No lane set by [x]. We must be interleaving structures. */
2255 lane
= NEON_INTERLEAVE_LANES
;
2258 if (lane
== -1 || base_reg
== -1 || count
< 1 || count
> 4
2259 || (count
> 1 && reg_incr
== -1))
2261 first_error (_("error parsing element/structure list"));
2265 if ((count
> 1 || leading_brace
) && skip_past_char (&ptr
, '}') == FAIL
)
2267 first_error (_("expected }"));
2275 *eltype
= firsttype
.eltype
;
2280 return lane
| ((reg_incr
- 1) << 4) | ((count
- 1) << 5);
2283 /* Parse an explicit relocation suffix on an expression. This is
2284 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2285 arm_reloc_hsh contains no entries, so this function can only
2286 succeed if there is no () after the word. Returns -1 on error,
2287 BFD_RELOC_UNUSED if there wasn't any suffix. */
2290 parse_reloc (char **str
)
2292 struct reloc_entry
*r
;
2296 return BFD_RELOC_UNUSED
;
2301 while (*q
&& *q
!= ')' && *q
!= ',')
2306 if ((r
= (struct reloc_entry
*)
2307 hash_find_n (arm_reloc_hsh
, p
, q
- p
)) == NULL
)
2314 /* Directives: register aliases. */
2316 static struct reg_entry
*
2317 insert_reg_alias (char *str
, unsigned number
, int type
)
2319 struct reg_entry
*new_reg
;
2322 if ((new_reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, str
)) != 0)
2324 if (new_reg
->builtin
)
2325 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str
);
2327 /* Only warn about a redefinition if it's not defined as the
2329 else if (new_reg
->number
!= number
|| new_reg
->type
!= type
)
2330 as_warn (_("ignoring redefinition of register alias '%s'"), str
);
2335 name
= xstrdup (str
);
2336 new_reg
= XNEW (struct reg_entry
);
2338 new_reg
->name
= name
;
2339 new_reg
->number
= number
;
2340 new_reg
->type
= type
;
2341 new_reg
->builtin
= FALSE
;
2342 new_reg
->neon
= NULL
;
2344 if (hash_insert (arm_reg_hsh
, name
, (void *) new_reg
))
2351 insert_neon_reg_alias (char *str
, int number
, int type
,
2352 struct neon_typed_alias
*atype
)
2354 struct reg_entry
*reg
= insert_reg_alias (str
, number
, type
);
2358 first_error (_("attempt to redefine typed alias"));
2364 reg
->neon
= XNEW (struct neon_typed_alias
);
2365 *reg
->neon
= *atype
;
2369 /* Look for the .req directive. This is of the form:
2371 new_register_name .req existing_register_name
2373 If we find one, or if it looks sufficiently like one that we want to
2374 handle any error here, return TRUE. Otherwise return FALSE. */
2377 create_register_alias (char * newname
, char *p
)
2379 struct reg_entry
*old
;
2380 char *oldname
, *nbuf
;
2383 /* The input scrubber ensures that whitespace after the mnemonic is
2384 collapsed to single spaces. */
2386 if (strncmp (oldname
, " .req ", 6) != 0)
2390 if (*oldname
== '\0')
2393 old
= (struct reg_entry
*) hash_find (arm_reg_hsh
, oldname
);
2396 as_warn (_("unknown register '%s' -- .req ignored"), oldname
);
2400 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2401 the desired alias name, and p points to its end. If not, then
2402 the desired alias name is in the global original_case_string. */
2403 #ifdef TC_CASE_SENSITIVE
2406 newname
= original_case_string
;
2407 nlen
= strlen (newname
);
2410 nbuf
= xmemdup0 (newname
, nlen
);
2412 /* Create aliases under the new name as stated; an all-lowercase
2413 version of the new name; and an all-uppercase version of the new
2415 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) != NULL
)
2417 for (p
= nbuf
; *p
; p
++)
2420 if (strncmp (nbuf
, newname
, nlen
))
2422 /* If this attempt to create an additional alias fails, do not bother
2423 trying to create the all-lower case alias. We will fail and issue
2424 a second, duplicate error message. This situation arises when the
2425 programmer does something like:
2428 The second .req creates the "Foo" alias but then fails to create
2429 the artificial FOO alias because it has already been created by the
2431 if (insert_reg_alias (nbuf
, old
->number
, old
->type
) == NULL
)
2438 for (p
= nbuf
; *p
; p
++)
2441 if (strncmp (nbuf
, newname
, nlen
))
2442 insert_reg_alias (nbuf
, old
->number
, old
->type
);
2449 /* Create a Neon typed/indexed register alias using directives, e.g.:
2454 These typed registers can be used instead of the types specified after the
2455 Neon mnemonic, so long as all operands given have types. Types can also be
2456 specified directly, e.g.:
2457 vadd d0.s32, d1.s32, d2.s32 */
2460 create_neon_reg_alias (char *newname
, char *p
)
2462 enum arm_reg_type basetype
;
2463 struct reg_entry
*basereg
;
2464 struct reg_entry mybasereg
;
2465 struct neon_type ntype
;
2466 struct neon_typed_alias typeinfo
;
2467 char *namebuf
, *nameend ATTRIBUTE_UNUSED
;
2470 typeinfo
.defined
= 0;
2471 typeinfo
.eltype
.type
= NT_invtype
;
2472 typeinfo
.eltype
.size
= -1;
2473 typeinfo
.index
= -1;
2477 if (strncmp (p
, " .dn ", 5) == 0)
2478 basetype
= REG_TYPE_VFD
;
2479 else if (strncmp (p
, " .qn ", 5) == 0)
2480 basetype
= REG_TYPE_NQ
;
2489 basereg
= arm_reg_parse_multi (&p
);
2491 if (basereg
&& basereg
->type
!= basetype
)
2493 as_bad (_("bad type for register"));
2497 if (basereg
== NULL
)
2500 /* Try parsing as an integer. */
2501 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2502 if (exp
.X_op
!= O_constant
)
2504 as_bad (_("expression must be constant"));
2507 basereg
= &mybasereg
;
2508 basereg
->number
= (basetype
== REG_TYPE_NQ
) ? exp
.X_add_number
* 2
2514 typeinfo
= *basereg
->neon
;
2516 if (parse_neon_type (&ntype
, &p
) == SUCCESS
)
2518 /* We got a type. */
2519 if (typeinfo
.defined
& NTA_HASTYPE
)
2521 as_bad (_("can't redefine the type of a register alias"));
2525 typeinfo
.defined
|= NTA_HASTYPE
;
2526 if (ntype
.elems
!= 1)
2528 as_bad (_("you must specify a single type only"));
2531 typeinfo
.eltype
= ntype
.el
[0];
2534 if (skip_past_char (&p
, '[') == SUCCESS
)
2537 /* We got a scalar index. */
2539 if (typeinfo
.defined
& NTA_HASINDEX
)
2541 as_bad (_("can't redefine the index of a scalar alias"));
2545 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
2547 if (exp
.X_op
!= O_constant
)
2549 as_bad (_("scalar index must be constant"));
2553 typeinfo
.defined
|= NTA_HASINDEX
;
2554 typeinfo
.index
= exp
.X_add_number
;
2556 if (skip_past_char (&p
, ']') == FAIL
)
2558 as_bad (_("expecting ]"));
2563 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2564 the desired alias name, and p points to its end. If not, then
2565 the desired alias name is in the global original_case_string. */
2566 #ifdef TC_CASE_SENSITIVE
2567 namelen
= nameend
- newname
;
2569 newname
= original_case_string
;
2570 namelen
= strlen (newname
);
2573 namebuf
= xmemdup0 (newname
, namelen
);
2575 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2576 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2578 /* Insert name in all uppercase. */
2579 for (p
= namebuf
; *p
; p
++)
2582 if (strncmp (namebuf
, newname
, namelen
))
2583 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2584 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2586 /* Insert name in all lowercase. */
2587 for (p
= namebuf
; *p
; p
++)
2590 if (strncmp (namebuf
, newname
, namelen
))
2591 insert_neon_reg_alias (namebuf
, basereg
->number
, basetype
,
2592 typeinfo
.defined
!= 0 ? &typeinfo
: NULL
);
2598 /* Should never be called, as .req goes between the alias and the
2599 register name, not at the beginning of the line. */
2602 s_req (int a ATTRIBUTE_UNUSED
)
2604 as_bad (_("invalid syntax for .req directive"));
2608 s_dn (int a ATTRIBUTE_UNUSED
)
2610 as_bad (_("invalid syntax for .dn directive"));
2614 s_qn (int a ATTRIBUTE_UNUSED
)
2616 as_bad (_("invalid syntax for .qn directive"));
2619 /* The .unreq directive deletes an alias which was previously defined
2620 by .req. For example:
2626 s_unreq (int a ATTRIBUTE_UNUSED
)
2631 name
= input_line_pointer
;
2633 while (*input_line_pointer
!= 0
2634 && *input_line_pointer
!= ' '
2635 && *input_line_pointer
!= '\n')
2636 ++input_line_pointer
;
2638 saved_char
= *input_line_pointer
;
2639 *input_line_pointer
= 0;
2642 as_bad (_("invalid syntax for .unreq directive"));
2645 struct reg_entry
*reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
,
2649 as_bad (_("unknown register alias '%s'"), name
);
2650 else if (reg
->builtin
)
2651 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2658 hash_delete (arm_reg_hsh
, name
, FALSE
);
2659 free ((char *) reg
->name
);
2664 /* Also locate the all upper case and all lower case versions.
2665 Do not complain if we cannot find one or the other as it
2666 was probably deleted above. */
2668 nbuf
= strdup (name
);
2669 for (p
= nbuf
; *p
; p
++)
2671 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2674 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2675 free ((char *) reg
->name
);
2681 for (p
= nbuf
; *p
; p
++)
2683 reg
= (struct reg_entry
*) hash_find (arm_reg_hsh
, nbuf
);
2686 hash_delete (arm_reg_hsh
, nbuf
, FALSE
);
2687 free ((char *) reg
->name
);
2697 *input_line_pointer
= saved_char
;
2698 demand_empty_rest_of_line ();
2701 /* Directives: Instruction set selection. */
2704 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2705 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2706 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2707 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2709 /* Create a new mapping symbol for the transition to STATE. */
2712 make_mapping_symbol (enum mstate state
, valueT value
, fragS
*frag
)
2715 const char * symname
;
2722 type
= BSF_NO_FLAGS
;
2726 type
= BSF_NO_FLAGS
;
2730 type
= BSF_NO_FLAGS
;
2736 symbolP
= symbol_new (symname
, now_seg
, value
, frag
);
2737 symbol_get_bfdsym (symbolP
)->flags
|= type
| BSF_LOCAL
;
2742 THUMB_SET_FUNC (symbolP
, 0);
2743 ARM_SET_THUMB (symbolP
, 0);
2744 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2748 THUMB_SET_FUNC (symbolP
, 1);
2749 ARM_SET_THUMB (symbolP
, 1);
2750 ARM_SET_INTERWORK (symbolP
, support_interwork
);
2758 /* Save the mapping symbols for future reference. Also check that
2759 we do not place two mapping symbols at the same offset within a
2760 frag. We'll handle overlap between frags in
2761 check_mapping_symbols.
2763 If .fill or other data filling directive generates zero sized data,
2764 the mapping symbol for the following code will have the same value
2765 as the one generated for the data filling directive. In this case,
2766 we replace the old symbol with the new one at the same address. */
2769 if (frag
->tc_frag_data
.first_map
!= NULL
)
2771 know (S_GET_VALUE (frag
->tc_frag_data
.first_map
) == 0);
2772 symbol_remove (frag
->tc_frag_data
.first_map
, &symbol_rootP
, &symbol_lastP
);
2774 frag
->tc_frag_data
.first_map
= symbolP
;
2776 if (frag
->tc_frag_data
.last_map
!= NULL
)
2778 know (S_GET_VALUE (frag
->tc_frag_data
.last_map
) <= S_GET_VALUE (symbolP
));
2779 if (S_GET_VALUE (frag
->tc_frag_data
.last_map
) == S_GET_VALUE (symbolP
))
2780 symbol_remove (frag
->tc_frag_data
.last_map
, &symbol_rootP
, &symbol_lastP
);
2782 frag
->tc_frag_data
.last_map
= symbolP
;
2785 /* We must sometimes convert a region marked as code to data during
2786 code alignment, if an odd number of bytes have to be padded. The
2787 code mapping symbol is pushed to an aligned address. */
2790 insert_data_mapping_symbol (enum mstate state
,
2791 valueT value
, fragS
*frag
, offsetT bytes
)
2793 /* If there was already a mapping symbol, remove it. */
2794 if (frag
->tc_frag_data
.last_map
!= NULL
2795 && S_GET_VALUE (frag
->tc_frag_data
.last_map
) == frag
->fr_address
+ value
)
2797 symbolS
*symp
= frag
->tc_frag_data
.last_map
;
2801 know (frag
->tc_frag_data
.first_map
== symp
);
2802 frag
->tc_frag_data
.first_map
= NULL
;
2804 frag
->tc_frag_data
.last_map
= NULL
;
2805 symbol_remove (symp
, &symbol_rootP
, &symbol_lastP
);
2808 make_mapping_symbol (MAP_DATA
, value
, frag
);
2809 make_mapping_symbol (state
, value
+ bytes
, frag
);
2812 static void mapping_state_2 (enum mstate state
, int max_chars
);
2814 /* Set the mapping state to STATE. Only call this when about to
2815 emit some STATE bytes to the file. */
2817 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2819 mapping_state (enum mstate state
)
2821 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2823 if (mapstate
== state
)
2824 /* The mapping symbol has already been emitted.
2825 There is nothing else to do. */
2828 if (state
== MAP_ARM
|| state
== MAP_THUMB
)
2830 All ARM instructions require 4-byte alignment.
2831 (Almost) all Thumb instructions require 2-byte alignment.
2833 When emitting instructions into any section, mark the section
2836 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2837 but themselves require 2-byte alignment; this applies to some
2838 PC- relative forms. However, these cases will involve implicit
2839 literal pool generation or an explicit .align >=2, both of
2840 which will cause the section to me marked with sufficient
2841 alignment. Thus, we don't handle those cases here. */
2842 record_alignment (now_seg
, state
== MAP_ARM
? 2 : 1);
2844 if (TRANSITION (MAP_UNDEFINED
, MAP_DATA
))
2845 /* This case will be evaluated later. */
2848 mapping_state_2 (state
, 0);
2851 /* Same as mapping_state, but MAX_CHARS bytes have already been
2852 allocated. Put the mapping symbol that far back. */
2855 mapping_state_2 (enum mstate state
, int max_chars
)
2857 enum mstate mapstate
= seg_info (now_seg
)->tc_segment_info_data
.mapstate
;
2859 if (!SEG_NORMAL (now_seg
))
2862 if (mapstate
== state
)
2863 /* The mapping symbol has already been emitted.
2864 There is nothing else to do. */
2867 if (TRANSITION (MAP_UNDEFINED
, MAP_ARM
)
2868 || TRANSITION (MAP_UNDEFINED
, MAP_THUMB
))
2870 struct frag
* const frag_first
= seg_info (now_seg
)->frchainP
->frch_root
;
2871 const int add_symbol
= (frag_now
!= frag_first
) || (frag_now_fix () > 0);
2874 make_mapping_symbol (MAP_DATA
, (valueT
) 0, frag_first
);
2877 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= state
;
2878 make_mapping_symbol (state
, (valueT
) frag_now_fix () - max_chars
, frag_now
);
2882 #define mapping_state(x) ((void)0)
2883 #define mapping_state_2(x, y) ((void)0)
2886 /* Find the real, Thumb encoded start of a Thumb function. */
2890 find_real_start (symbolS
* symbolP
)
2893 const char * name
= S_GET_NAME (symbolP
);
2894 symbolS
* new_target
;
2896 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2897 #define STUB_NAME ".real_start_of"
2902 /* The compiler may generate BL instructions to local labels because
2903 it needs to perform a branch to a far away location. These labels
2904 do not have a corresponding ".real_start_of" label. We check
2905 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2906 the ".real_start_of" convention for nonlocal branches. */
2907 if (S_IS_LOCAL (symbolP
) || name
[0] == '.')
2910 real_start
= concat (STUB_NAME
, name
, NULL
);
2911 new_target
= symbol_find (real_start
);
2914 if (new_target
== NULL
)
2916 as_warn (_("Failed to find real start of function: %s\n"), name
);
2917 new_target
= symbolP
;
2925 opcode_select (int width
)
2932 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
2933 as_bad (_("selected processor does not support THUMB opcodes"));
2936 /* No need to force the alignment, since we will have been
2937 coming from ARM mode, which is word-aligned. */
2938 record_alignment (now_seg
, 1);
2945 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
2946 as_bad (_("selected processor does not support ARM opcodes"));
2951 frag_align (2, 0, 0);
2953 record_alignment (now_seg
, 1);
2958 as_bad (_("invalid instruction size selected (%d)"), width
);
2963 s_arm (int ignore ATTRIBUTE_UNUSED
)
2966 demand_empty_rest_of_line ();
2970 s_thumb (int ignore ATTRIBUTE_UNUSED
)
2973 demand_empty_rest_of_line ();
2977 s_code (int unused ATTRIBUTE_UNUSED
)
2981 temp
= get_absolute_expression ();
2986 opcode_select (temp
);
2990 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp
);
2995 s_force_thumb (int ignore ATTRIBUTE_UNUSED
)
2997 /* If we are not already in thumb mode go into it, EVEN if
2998 the target processor does not support thumb instructions.
2999 This is used by gcc/config/arm/lib1funcs.asm for example
3000 to compile interworking support functions even if the
3001 target processor should not support interworking. */
3005 record_alignment (now_seg
, 1);
3008 demand_empty_rest_of_line ();
3012 s_thumb_func (int ignore ATTRIBUTE_UNUSED
)
3016 /* The following label is the name/address of the start of a Thumb function.
3017 We need to know this for the interworking support. */
3018 label_is_thumb_function_name
= TRUE
;
3021 /* Perform a .set directive, but also mark the alias as
3022 being a thumb function. */
3025 s_thumb_set (int equiv
)
3027 /* XXX the following is a duplicate of the code for s_set() in read.c
3028 We cannot just call that code as we need to get at the symbol that
3035 /* Especial apologies for the random logic:
3036 This just grew, and could be parsed much more simply!
3038 delim
= get_symbol_name (& name
);
3039 end_name
= input_line_pointer
;
3040 (void) restore_line_pointer (delim
);
3042 if (*input_line_pointer
!= ',')
3045 as_bad (_("expected comma after name \"%s\""), name
);
3047 ignore_rest_of_line ();
3051 input_line_pointer
++;
3054 if (name
[0] == '.' && name
[1] == '\0')
3056 /* XXX - this should not happen to .thumb_set. */
3060 if ((symbolP
= symbol_find (name
)) == NULL
3061 && (symbolP
= md_undefined_symbol (name
)) == NULL
)
3064 /* When doing symbol listings, play games with dummy fragments living
3065 outside the normal fragment chain to record the file and line info
3067 if (listing
& LISTING_SYMBOLS
)
3069 extern struct list_info_struct
* listing_tail
;
3070 fragS
* dummy_frag
= (fragS
* ) xmalloc (sizeof (fragS
));
3072 memset (dummy_frag
, 0, sizeof (fragS
));
3073 dummy_frag
->fr_type
= rs_fill
;
3074 dummy_frag
->line
= listing_tail
;
3075 symbolP
= symbol_new (name
, undefined_section
, 0, dummy_frag
);
3076 dummy_frag
->fr_symbol
= symbolP
;
3080 symbolP
= symbol_new (name
, undefined_section
, 0, &zero_address_frag
);
3083 /* "set" symbols are local unless otherwise specified. */
3084 SF_SET_LOCAL (symbolP
);
3085 #endif /* OBJ_COFF */
3086 } /* Make a new symbol. */
3088 symbol_table_insert (symbolP
);
3093 && S_IS_DEFINED (symbolP
)
3094 && S_GET_SEGMENT (symbolP
) != reg_section
)
3095 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP
));
3097 pseudo_set (symbolP
);
3099 demand_empty_rest_of_line ();
3101 /* XXX Now we come to the Thumb specific bit of code. */
3103 THUMB_SET_FUNC (symbolP
, 1);
3104 ARM_SET_THUMB (symbolP
, 1);
3105 #if defined OBJ_ELF || defined OBJ_COFF
3106 ARM_SET_INTERWORK (symbolP
, support_interwork
);
3110 /* Directives: Mode selection. */
3112 /* .syntax [unified|divided] - choose the new unified syntax
3113 (same for Arm and Thumb encoding, modulo slight differences in what
3114 can be represented) or the old divergent syntax for each mode. */
3116 s_syntax (int unused ATTRIBUTE_UNUSED
)
3120 delim
= get_symbol_name (& name
);
3122 if (!strcasecmp (name
, "unified"))
3123 unified_syntax
= TRUE
;
3124 else if (!strcasecmp (name
, "divided"))
3125 unified_syntax
= FALSE
;
3128 as_bad (_("unrecognized syntax mode \"%s\""), name
);
3131 (void) restore_line_pointer (delim
);
3132 demand_empty_rest_of_line ();
3135 /* Directives: sectioning and alignment. */
3138 s_bss (int ignore ATTRIBUTE_UNUSED
)
3140 /* We don't support putting frags in the BSS segment, we fake it by
3141 marking in_bss, then looking at s_skip for clues. */
3142 subseg_set (bss_section
, 0);
3143 demand_empty_rest_of_line ();
3145 #ifdef md_elf_section_change_hook
3146 md_elf_section_change_hook ();
3151 s_even (int ignore ATTRIBUTE_UNUSED
)
3153 /* Never make frag if expect extra pass. */
3155 frag_align (1, 0, 0);
3157 record_alignment (now_seg
, 1);
3159 demand_empty_rest_of_line ();
3162 /* Directives: CodeComposer Studio. */
3164 /* .ref (for CodeComposer Studio syntax only). */
3166 s_ccs_ref (int unused ATTRIBUTE_UNUSED
)
3168 if (codecomposer_syntax
)
3169 ignore_rest_of_line ();
3171 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3174 /* If name is not NULL, then it is used for marking the beginning of a
3175 function, whereas if it is NULL then it means the function end. */
3177 asmfunc_debug (const char * name
)
3179 static const char * last_name
= NULL
;
3183 gas_assert (last_name
== NULL
);
3186 if (debug_type
== DEBUG_STABS
)
3187 stabs_generate_asm_func (name
, name
);
3191 gas_assert (last_name
!= NULL
);
3193 if (debug_type
== DEBUG_STABS
)
3194 stabs_generate_asm_endfunc (last_name
, last_name
);
3201 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED
)
3203 if (codecomposer_syntax
)
3205 switch (asmfunc_state
)
3207 case OUTSIDE_ASMFUNC
:
3208 asmfunc_state
= WAITING_ASMFUNC_NAME
;
3211 case WAITING_ASMFUNC_NAME
:
3212 as_bad (_(".asmfunc repeated."));
3215 case WAITING_ENDASMFUNC
:
3216 as_bad (_(".asmfunc without function."));
3219 demand_empty_rest_of_line ();
3222 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3226 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED
)
3228 if (codecomposer_syntax
)
3230 switch (asmfunc_state
)
3232 case OUTSIDE_ASMFUNC
:
3233 as_bad (_(".endasmfunc without a .asmfunc."));
3236 case WAITING_ASMFUNC_NAME
:
3237 as_bad (_(".endasmfunc without function."));
3240 case WAITING_ENDASMFUNC
:
3241 asmfunc_state
= OUTSIDE_ASMFUNC
;
3242 asmfunc_debug (NULL
);
3245 demand_empty_rest_of_line ();
3248 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3252 s_ccs_def (int name
)
3254 if (codecomposer_syntax
)
3257 as_bad (_(".def pseudo-op only available with -mccs flag."));
3260 /* Directives: Literal pools. */
3262 static literal_pool
*
3263 find_literal_pool (void)
3265 literal_pool
* pool
;
3267 for (pool
= list_of_pools
; pool
!= NULL
; pool
= pool
->next
)
3269 if (pool
->section
== now_seg
3270 && pool
->sub_section
== now_subseg
)
3277 static literal_pool
*
3278 find_or_make_literal_pool (void)
3280 /* Next literal pool ID number. */
3281 static unsigned int latest_pool_num
= 1;
3282 literal_pool
* pool
;
3284 pool
= find_literal_pool ();
3288 /* Create a new pool. */
3289 pool
= XNEW (literal_pool
);
3293 pool
->next_free_entry
= 0;
3294 pool
->section
= now_seg
;
3295 pool
->sub_section
= now_subseg
;
3296 pool
->next
= list_of_pools
;
3297 pool
->symbol
= NULL
;
3298 pool
->alignment
= 2;
3300 /* Add it to the list. */
3301 list_of_pools
= pool
;
3304 /* New pools, and emptied pools, will have a NULL symbol. */
3305 if (pool
->symbol
== NULL
)
3307 pool
->symbol
= symbol_create (FAKE_LABEL_NAME
, undefined_section
,
3308 (valueT
) 0, &zero_address_frag
);
3309 pool
->id
= latest_pool_num
++;
3316 /* Add the literal in the global 'inst'
3317 structure to the relevant literal pool. */
3320 add_to_lit_pool (unsigned int nbytes
)
3322 #define PADDING_SLOT 0x1
3323 #define LIT_ENTRY_SIZE_MASK 0xFF
3324 literal_pool
* pool
;
3325 unsigned int entry
, pool_size
= 0;
3326 bfd_boolean padding_slot_p
= FALSE
;
3332 imm1
= inst
.operands
[1].imm
;
3333 imm2
= (inst
.operands
[1].regisimm
? inst
.operands
[1].reg
3334 : inst
.relocs
[0].exp
.X_unsigned
? 0
3335 : ((bfd_int64_t
) inst
.operands
[1].imm
) >> 32);
3336 if (target_big_endian
)
3339 imm2
= inst
.operands
[1].imm
;
3343 pool
= find_or_make_literal_pool ();
3345 /* Check if this literal value is already in the pool. */
3346 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3350 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3351 && (inst
.relocs
[0].exp
.X_op
== O_constant
)
3352 && (pool
->literals
[entry
].X_add_number
3353 == inst
.relocs
[0].exp
.X_add_number
)
3354 && (pool
->literals
[entry
].X_md
== nbytes
)
3355 && (pool
->literals
[entry
].X_unsigned
3356 == inst
.relocs
[0].exp
.X_unsigned
))
3359 if ((pool
->literals
[entry
].X_op
== inst
.relocs
[0].exp
.X_op
)
3360 && (inst
.relocs
[0].exp
.X_op
== O_symbol
)
3361 && (pool
->literals
[entry
].X_add_number
3362 == inst
.relocs
[0].exp
.X_add_number
)
3363 && (pool
->literals
[entry
].X_add_symbol
3364 == inst
.relocs
[0].exp
.X_add_symbol
)
3365 && (pool
->literals
[entry
].X_op_symbol
3366 == inst
.relocs
[0].exp
.X_op_symbol
)
3367 && (pool
->literals
[entry
].X_md
== nbytes
))
3370 else if ((nbytes
== 8)
3371 && !(pool_size
& 0x7)
3372 && ((entry
+ 1) != pool
->next_free_entry
)
3373 && (pool
->literals
[entry
].X_op
== O_constant
)
3374 && (pool
->literals
[entry
].X_add_number
== (offsetT
) imm1
)
3375 && (pool
->literals
[entry
].X_unsigned
3376 == inst
.relocs
[0].exp
.X_unsigned
)
3377 && (pool
->literals
[entry
+ 1].X_op
== O_constant
)
3378 && (pool
->literals
[entry
+ 1].X_add_number
== (offsetT
) imm2
)
3379 && (pool
->literals
[entry
+ 1].X_unsigned
3380 == inst
.relocs
[0].exp
.X_unsigned
))
3383 padding_slot_p
= ((pool
->literals
[entry
].X_md
>> 8) == PADDING_SLOT
);
3384 if (padding_slot_p
&& (nbytes
== 4))
3390 /* Do we need to create a new entry? */
3391 if (entry
== pool
->next_free_entry
)
3393 if (entry
>= MAX_LITERAL_POOL_SIZE
)
3395 inst
.error
= _("literal pool overflow");
3401 /* For 8-byte entries, we align to an 8-byte boundary,
3402 and split it into two 4-byte entries, because on 32-bit
3403 host, 8-byte constants are treated as big num, thus
3404 saved in "generic_bignum" which will be overwritten
3405 by later assignments.
3407 We also need to make sure there is enough space for
3410 We also check to make sure the literal operand is a
3412 if (!(inst
.relocs
[0].exp
.X_op
== O_constant
3413 || inst
.relocs
[0].exp
.X_op
== O_big
))
3415 inst
.error
= _("invalid type for literal pool");
3418 else if (pool_size
& 0x7)
3420 if ((entry
+ 2) >= MAX_LITERAL_POOL_SIZE
)
3422 inst
.error
= _("literal pool overflow");
3426 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3427 pool
->literals
[entry
].X_op
= O_constant
;
3428 pool
->literals
[entry
].X_add_number
= 0;
3429 pool
->literals
[entry
++].X_md
= (PADDING_SLOT
<< 8) | 4;
3430 pool
->next_free_entry
+= 1;
3433 else if ((entry
+ 1) >= MAX_LITERAL_POOL_SIZE
)
3435 inst
.error
= _("literal pool overflow");
3439 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3440 pool
->literals
[entry
].X_op
= O_constant
;
3441 pool
->literals
[entry
].X_add_number
= imm1
;
3442 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3443 pool
->literals
[entry
++].X_md
= 4;
3444 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3445 pool
->literals
[entry
].X_op
= O_constant
;
3446 pool
->literals
[entry
].X_add_number
= imm2
;
3447 pool
->literals
[entry
].X_unsigned
= inst
.relocs
[0].exp
.X_unsigned
;
3448 pool
->literals
[entry
].X_md
= 4;
3449 pool
->alignment
= 3;
3450 pool
->next_free_entry
+= 1;
3454 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3455 pool
->literals
[entry
].X_md
= 4;
3459 /* PR ld/12974: Record the location of the first source line to reference
3460 this entry in the literal pool. If it turns out during linking that the
3461 symbol does not exist we will be able to give an accurate line number for
3462 the (first use of the) missing reference. */
3463 if (debug_type
== DEBUG_DWARF2
)
3464 dwarf2_where (pool
->locs
+ entry
);
3466 pool
->next_free_entry
+= 1;
3468 else if (padding_slot_p
)
3470 pool
->literals
[entry
] = inst
.relocs
[0].exp
;
3471 pool
->literals
[entry
].X_md
= nbytes
;
3474 inst
.relocs
[0].exp
.X_op
= O_symbol
;
3475 inst
.relocs
[0].exp
.X_add_number
= pool_size
;
3476 inst
.relocs
[0].exp
.X_add_symbol
= pool
->symbol
;
3482 tc_start_label_without_colon (void)
3484 bfd_boolean ret
= TRUE
;
3486 if (codecomposer_syntax
&& asmfunc_state
== WAITING_ASMFUNC_NAME
)
3488 const char *label
= input_line_pointer
;
3490 while (!is_end_of_line
[(int) label
[-1]])
3495 as_bad (_("Invalid label '%s'"), label
);
3499 asmfunc_debug (label
);
3501 asmfunc_state
= WAITING_ENDASMFUNC
;
3507 /* Can't use symbol_new here, so have to create a symbol and then at
3508 a later date assign it a value. That's what these functions do. */
3511 symbol_locate (symbolS
* symbolP
,
3512 const char * name
, /* It is copied, the caller can modify. */
3513 segT segment
, /* Segment identifier (SEG_<something>). */
3514 valueT valu
, /* Symbol value. */
3515 fragS
* frag
) /* Associated fragment. */
3518 char * preserved_copy_of_name
;
3520 name_length
= strlen (name
) + 1; /* +1 for \0. */
3521 obstack_grow (¬es
, name
, name_length
);
3522 preserved_copy_of_name
= (char *) obstack_finish (¬es
);
3524 #ifdef tc_canonicalize_symbol_name
3525 preserved_copy_of_name
=
3526 tc_canonicalize_symbol_name (preserved_copy_of_name
);
3529 S_SET_NAME (symbolP
, preserved_copy_of_name
);
3531 S_SET_SEGMENT (symbolP
, segment
);
3532 S_SET_VALUE (symbolP
, valu
);
3533 symbol_clear_list_pointers (symbolP
);
3535 symbol_set_frag (symbolP
, frag
);
3537 /* Link to end of symbol chain. */
3539 extern int symbol_table_frozen
;
3541 if (symbol_table_frozen
)
3545 symbol_append (symbolP
, symbol_lastP
, & symbol_rootP
, & symbol_lastP
);
3547 obj_symbol_new_hook (symbolP
);
3549 #ifdef tc_symbol_new_hook
3550 tc_symbol_new_hook (symbolP
);
3554 verify_symbol_chain (symbol_rootP
, symbol_lastP
);
3555 #endif /* DEBUG_SYMS */
3559 s_ltorg (int ignored ATTRIBUTE_UNUSED
)
3562 literal_pool
* pool
;
3565 pool
= find_literal_pool ();
3567 || pool
->symbol
== NULL
3568 || pool
->next_free_entry
== 0)
3571 /* Align pool as you have word accesses.
3572 Only make a frag if we have to. */
3574 frag_align (pool
->alignment
, 0, 0);
3576 record_alignment (now_seg
, 2);
3579 seg_info (now_seg
)->tc_segment_info_data
.mapstate
= MAP_DATA
;
3580 make_mapping_symbol (MAP_DATA
, (valueT
) frag_now_fix (), frag_now
);
3582 sprintf (sym_name
, "$$lit_\002%x", pool
->id
);
3584 symbol_locate (pool
->symbol
, sym_name
, now_seg
,
3585 (valueT
) frag_now_fix (), frag_now
);
3586 symbol_table_insert (pool
->symbol
);
3588 ARM_SET_THUMB (pool
->symbol
, thumb_mode
);
3590 #if defined OBJ_COFF || defined OBJ_ELF
3591 ARM_SET_INTERWORK (pool
->symbol
, support_interwork
);
3594 for (entry
= 0; entry
< pool
->next_free_entry
; entry
++)
3597 if (debug_type
== DEBUG_DWARF2
)
3598 dwarf2_gen_line_info (frag_now_fix (), pool
->locs
+ entry
);
3600 /* First output the expression in the instruction to the pool. */
3601 emit_expr (&(pool
->literals
[entry
]),
3602 pool
->literals
[entry
].X_md
& LIT_ENTRY_SIZE_MASK
);
3605 /* Mark the pool as empty. */
3606 pool
->next_free_entry
= 0;
3607 pool
->symbol
= NULL
;
3611 /* Forward declarations for functions below, in the MD interface
3613 static void fix_new_arm (fragS
*, int, short, expressionS
*, int, int);
3614 static valueT
create_unwind_entry (int);
3615 static void start_unwind_section (const segT
, int);
3616 static void add_unwind_opcode (valueT
, int);
3617 static void flush_pending_unwind (void);
3619 /* Directives: Data. */
3622 s_arm_elf_cons (int nbytes
)
3626 #ifdef md_flush_pending_output
3627 md_flush_pending_output ();
3630 if (is_it_end_of_statement ())
3632 demand_empty_rest_of_line ();
3636 #ifdef md_cons_align
3637 md_cons_align (nbytes
);
3640 mapping_state (MAP_DATA
);
3644 char *base
= input_line_pointer
;
3648 if (exp
.X_op
!= O_symbol
)
3649 emit_expr (&exp
, (unsigned int) nbytes
);
3652 char *before_reloc
= input_line_pointer
;
3653 reloc
= parse_reloc (&input_line_pointer
);
3656 as_bad (_("unrecognized relocation suffix"));
3657 ignore_rest_of_line ();
3660 else if (reloc
== BFD_RELOC_UNUSED
)
3661 emit_expr (&exp
, (unsigned int) nbytes
);
3664 reloc_howto_type
*howto
= (reloc_howto_type
*)
3665 bfd_reloc_type_lookup (stdoutput
,
3666 (bfd_reloc_code_real_type
) reloc
);
3667 int size
= bfd_get_reloc_size (howto
);
3669 if (reloc
== BFD_RELOC_ARM_PLT32
)
3671 as_bad (_("(plt) is only valid on branch targets"));
3672 reloc
= BFD_RELOC_UNUSED
;
3677 as_bad (ngettext ("%s relocations do not fit in %d byte",
3678 "%s relocations do not fit in %d bytes",
3680 howto
->name
, nbytes
);
3683 /* We've parsed an expression stopping at O_symbol.
3684 But there may be more expression left now that we
3685 have parsed the relocation marker. Parse it again.
3686 XXX Surely there is a cleaner way to do this. */
3687 char *p
= input_line_pointer
;
3689 char *save_buf
= XNEWVEC (char, input_line_pointer
- base
);
3691 memcpy (save_buf
, base
, input_line_pointer
- base
);
3692 memmove (base
+ (input_line_pointer
- before_reloc
),
3693 base
, before_reloc
- base
);
3695 input_line_pointer
= base
+ (input_line_pointer
-before_reloc
);
3697 memcpy (base
, save_buf
, p
- base
);
3699 offset
= nbytes
- size
;
3700 p
= frag_more (nbytes
);
3701 memset (p
, 0, nbytes
);
3702 fix_new_exp (frag_now
, p
- frag_now
->fr_literal
+ offset
,
3703 size
, &exp
, 0, (enum bfd_reloc_code_real
) reloc
);
3709 while (*input_line_pointer
++ == ',');
3711 /* Put terminator back into stream. */
3712 input_line_pointer
--;
3713 demand_empty_rest_of_line ();
3716 /* Emit an expression containing a 32-bit thumb instruction.
3717 Implementation based on put_thumb32_insn. */
3720 emit_thumb32_expr (expressionS
* exp
)
3722 expressionS exp_high
= *exp
;
3724 exp_high
.X_add_number
= (unsigned long)exp_high
.X_add_number
>> 16;
3725 emit_expr (& exp_high
, (unsigned int) THUMB_SIZE
);
3726 exp
->X_add_number
&= 0xffff;
3727 emit_expr (exp
, (unsigned int) THUMB_SIZE
);
3730 /* Guess the instruction size based on the opcode. */
3733 thumb_insn_size (int opcode
)
3735 if ((unsigned int) opcode
< 0xe800u
)
3737 else if ((unsigned int) opcode
>= 0xe8000000u
)
3744 emit_insn (expressionS
*exp
, int nbytes
)
3748 if (exp
->X_op
== O_constant
)
3753 size
= thumb_insn_size (exp
->X_add_number
);
3757 if (size
== 2 && (unsigned int)exp
->X_add_number
> 0xffffu
)
3759 as_bad (_(".inst.n operand too big. "\
3760 "Use .inst.w instead"));
3765 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
3766 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN
, 0);
3768 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN
, 0);
3770 if (thumb_mode
&& (size
> THUMB_SIZE
) && !target_big_endian
)
3771 emit_thumb32_expr (exp
);
3773 emit_expr (exp
, (unsigned int) size
);
3775 it_fsm_post_encode ();
3779 as_bad (_("cannot determine Thumb instruction size. " \
3780 "Use .inst.n/.inst.w instead"));
3783 as_bad (_("constant expression required"));
3788 /* Like s_arm_elf_cons but do not use md_cons_align and
3789 set the mapping state to MAP_ARM/MAP_THUMB. */
3792 s_arm_elf_inst (int nbytes
)
3794 if (is_it_end_of_statement ())
3796 demand_empty_rest_of_line ();
3800 /* Calling mapping_state () here will not change ARM/THUMB,
3801 but will ensure not to be in DATA state. */
3804 mapping_state (MAP_THUMB
);
3809 as_bad (_("width suffixes are invalid in ARM mode"));
3810 ignore_rest_of_line ();
3816 mapping_state (MAP_ARM
);
3825 if (! emit_insn (& exp
, nbytes
))
3827 ignore_rest_of_line ();
3831 while (*input_line_pointer
++ == ',');
3833 /* Put terminator back into stream. */
3834 input_line_pointer
--;
3835 demand_empty_rest_of_line ();
3838 /* Parse a .rel31 directive. */
3841 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED
)
3848 if (*input_line_pointer
== '1')
3849 highbit
= 0x80000000;
3850 else if (*input_line_pointer
!= '0')
3851 as_bad (_("expected 0 or 1"));
3853 input_line_pointer
++;
3854 if (*input_line_pointer
!= ',')
3855 as_bad (_("missing comma"));
3856 input_line_pointer
++;
3858 #ifdef md_flush_pending_output
3859 md_flush_pending_output ();
3862 #ifdef md_cons_align
3866 mapping_state (MAP_DATA
);
3871 md_number_to_chars (p
, highbit
, 4);
3872 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 1,
3873 BFD_RELOC_ARM_PREL31
);
3875 demand_empty_rest_of_line ();
3878 /* Directives: AEABI stack-unwind tables. */
3880 /* Parse an unwind_fnstart directive. Simply records the current location. */
3883 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED
)
3885 demand_empty_rest_of_line ();
3886 if (unwind
.proc_start
)
3888 as_bad (_("duplicate .fnstart directive"));
3892 /* Mark the start of the function. */
3893 unwind
.proc_start
= expr_build_dot ();
3895 /* Reset the rest of the unwind info. */
3896 unwind
.opcode_count
= 0;
3897 unwind
.table_entry
= NULL
;
3898 unwind
.personality_routine
= NULL
;
3899 unwind
.personality_index
= -1;
3900 unwind
.frame_size
= 0;
3901 unwind
.fp_offset
= 0;
3902 unwind
.fp_reg
= REG_SP
;
3904 unwind
.sp_restored
= 0;
3908 /* Parse a handlerdata directive. Creates the exception handling table entry
3909 for the function. */
3912 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED
)
3914 demand_empty_rest_of_line ();
3915 if (!unwind
.proc_start
)
3916 as_bad (MISSING_FNSTART
);
3918 if (unwind
.table_entry
)
3919 as_bad (_("duplicate .handlerdata directive"));
3921 create_unwind_entry (1);
3924 /* Parse an unwind_fnend directive. Generates the index table entry. */
3927 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED
)
3932 unsigned int marked_pr_dependency
;
3934 demand_empty_rest_of_line ();
3936 if (!unwind
.proc_start
)
3938 as_bad (_(".fnend directive without .fnstart"));
3942 /* Add eh table entry. */
3943 if (unwind
.table_entry
== NULL
)
3944 val
= create_unwind_entry (0);
3948 /* Add index table entry. This is two words. */
3949 start_unwind_section (unwind
.saved_seg
, 1);
3950 frag_align (2, 0, 0);
3951 record_alignment (now_seg
, 2);
3953 ptr
= frag_more (8);
3955 where
= frag_now_fix () - 8;
3957 /* Self relative offset of the function start. */
3958 fix_new (frag_now
, where
, 4, unwind
.proc_start
, 0, 1,
3959 BFD_RELOC_ARM_PREL31
);
3961 /* Indicate dependency on EHABI-defined personality routines to the
3962 linker, if it hasn't been done already. */
3963 marked_pr_dependency
3964 = seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
;
3965 if (unwind
.personality_index
>= 0 && unwind
.personality_index
< 3
3966 && !(marked_pr_dependency
& (1 << unwind
.personality_index
)))
3968 static const char *const name
[] =
3970 "__aeabi_unwind_cpp_pr0",
3971 "__aeabi_unwind_cpp_pr1",
3972 "__aeabi_unwind_cpp_pr2"
3974 symbolS
*pr
= symbol_find_or_make (name
[unwind
.personality_index
]);
3975 fix_new (frag_now
, where
, 0, pr
, 0, 1, BFD_RELOC_NONE
);
3976 seg_info (now_seg
)->tc_segment_info_data
.marked_pr_dependency
3977 |= 1 << unwind
.personality_index
;
3981 /* Inline exception table entry. */
3982 md_number_to_chars (ptr
+ 4, val
, 4);
3984 /* Self relative offset of the table entry. */
3985 fix_new (frag_now
, where
+ 4, 4, unwind
.table_entry
, 0, 1,
3986 BFD_RELOC_ARM_PREL31
);
3988 /* Restore the original section. */
3989 subseg_set (unwind
.saved_seg
, unwind
.saved_subseg
);
3991 unwind
.proc_start
= NULL
;
3995 /* Parse an unwind_cantunwind directive. */
3998 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED
)
4000 demand_empty_rest_of_line ();
4001 if (!unwind
.proc_start
)
4002 as_bad (MISSING_FNSTART
);
4004 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4005 as_bad (_("personality routine specified for cantunwind frame"));
4007 unwind
.personality_index
= -2;
4011 /* Parse a personalityindex directive. */
4014 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED
)
4018 if (!unwind
.proc_start
)
4019 as_bad (MISSING_FNSTART
);
4021 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4022 as_bad (_("duplicate .personalityindex directive"));
4026 if (exp
.X_op
!= O_constant
4027 || exp
.X_add_number
< 0 || exp
.X_add_number
> 15)
4029 as_bad (_("bad personality routine number"));
4030 ignore_rest_of_line ();
4034 unwind
.personality_index
= exp
.X_add_number
;
4036 demand_empty_rest_of_line ();
4040 /* Parse a personality directive. */
4043 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED
)
4047 if (!unwind
.proc_start
)
4048 as_bad (MISSING_FNSTART
);
4050 if (unwind
.personality_routine
|| unwind
.personality_index
!= -1)
4051 as_bad (_("duplicate .personality directive"));
4053 c
= get_symbol_name (& name
);
4054 p
= input_line_pointer
;
4056 ++ input_line_pointer
;
4057 unwind
.personality_routine
= symbol_find_or_make (name
);
4059 demand_empty_rest_of_line ();
4063 /* Parse a directive saving core registers. */
4066 s_arm_unwind_save_core (void)
4072 range
= parse_reg_list (&input_line_pointer
, REGLIST_RN
);
4075 as_bad (_("expected register list"));
4076 ignore_rest_of_line ();
4080 demand_empty_rest_of_line ();
4082 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4083 into .unwind_save {..., sp...}. We aren't bothered about the value of
4084 ip because it is clobbered by calls. */
4085 if (unwind
.sp_restored
&& unwind
.fp_reg
== 12
4086 && (range
& 0x3000) == 0x1000)
4088 unwind
.opcode_count
--;
4089 unwind
.sp_restored
= 0;
4090 range
= (range
| 0x2000) & ~0x1000;
4091 unwind
.pending_offset
= 0;
4097 /* See if we can use the short opcodes. These pop a block of up to 8
4098 registers starting with r4, plus maybe r14. */
4099 for (n
= 0; n
< 8; n
++)
4101 /* Break at the first non-saved register. */
4102 if ((range
& (1 << (n
+ 4))) == 0)
4105 /* See if there are any other bits set. */
4106 if (n
== 0 || (range
& (0xfff0 << n
) & 0xbff0) != 0)
4108 /* Use the long form. */
4109 op
= 0x8000 | ((range
>> 4) & 0xfff);
4110 add_unwind_opcode (op
, 2);
4114 /* Use the short form. */
4116 op
= 0xa8; /* Pop r14. */
4118 op
= 0xa0; /* Do not pop r14. */
4120 add_unwind_opcode (op
, 1);
4127 op
= 0xb100 | (range
& 0xf);
4128 add_unwind_opcode (op
, 2);
4131 /* Record the number of bytes pushed. */
4132 for (n
= 0; n
< 16; n
++)
4134 if (range
& (1 << n
))
4135 unwind
.frame_size
+= 4;
4140 /* Parse a directive saving FPA registers. */
4143 s_arm_unwind_save_fpa (int reg
)
4149 /* Get Number of registers to transfer. */
4150 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4153 exp
.X_op
= O_illegal
;
4155 if (exp
.X_op
!= O_constant
)
4157 as_bad (_("expected , <constant>"));
4158 ignore_rest_of_line ();
4162 num_regs
= exp
.X_add_number
;
4164 if (num_regs
< 1 || num_regs
> 4)
4166 as_bad (_("number of registers must be in the range [1:4]"));
4167 ignore_rest_of_line ();
4171 demand_empty_rest_of_line ();
4176 op
= 0xb4 | (num_regs
- 1);
4177 add_unwind_opcode (op
, 1);
4182 op
= 0xc800 | (reg
<< 4) | (num_regs
- 1);
4183 add_unwind_opcode (op
, 2);
4185 unwind
.frame_size
+= num_regs
* 12;
4189 /* Parse a directive saving VFP registers for ARMv6 and above. */
4192 s_arm_unwind_save_vfp_armv6 (void)
4197 int num_vfpv3_regs
= 0;
4198 int num_regs_below_16
;
4199 bfd_boolean partial_match
;
4201 count
= parse_vfp_reg_list (&input_line_pointer
, &start
, REGLIST_VFP_D
,
4205 as_bad (_("expected register list"));
4206 ignore_rest_of_line ();
4210 demand_empty_rest_of_line ();
4212 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4213 than FSTMX/FLDMX-style ones). */
4215 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4217 num_vfpv3_regs
= count
;
4218 else if (start
+ count
> 16)
4219 num_vfpv3_regs
= start
+ count
- 16;
4221 if (num_vfpv3_regs
> 0)
4223 int start_offset
= start
> 16 ? start
- 16 : 0;
4224 op
= 0xc800 | (start_offset
<< 4) | (num_vfpv3_regs
- 1);
4225 add_unwind_opcode (op
, 2);
4228 /* Generate opcode for registers numbered in the range 0 .. 15. */
4229 num_regs_below_16
= num_vfpv3_regs
> 0 ? 16 - (int) start
: count
;
4230 gas_assert (num_regs_below_16
+ num_vfpv3_regs
== count
);
4231 if (num_regs_below_16
> 0)
4233 op
= 0xc900 | (start
<< 4) | (num_regs_below_16
- 1);
4234 add_unwind_opcode (op
, 2);
4237 unwind
.frame_size
+= count
* 8;
4241 /* Parse a directive saving VFP registers for pre-ARMv6. */
4244 s_arm_unwind_save_vfp (void)
4249 bfd_boolean partial_match
;
4251 count
= parse_vfp_reg_list (&input_line_pointer
, ®
, REGLIST_VFP_D
,
4255 as_bad (_("expected register list"));
4256 ignore_rest_of_line ();
4260 demand_empty_rest_of_line ();
4265 op
= 0xb8 | (count
- 1);
4266 add_unwind_opcode (op
, 1);
4271 op
= 0xb300 | (reg
<< 4) | (count
- 1);
4272 add_unwind_opcode (op
, 2);
4274 unwind
.frame_size
+= count
* 8 + 4;
4278 /* Parse a directive saving iWMMXt data registers. */
4281 s_arm_unwind_save_mmxwr (void)
4289 if (*input_line_pointer
== '{')
4290 input_line_pointer
++;
4294 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4298 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4303 as_tsktsk (_("register list not in ascending order"));
4306 if (*input_line_pointer
== '-')
4308 input_line_pointer
++;
4309 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWR
);
4312 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWR
]));
4315 else if (reg
>= hi_reg
)
4317 as_bad (_("bad register range"));
4320 for (; reg
< hi_reg
; reg
++)
4324 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4326 skip_past_char (&input_line_pointer
, '}');
4328 demand_empty_rest_of_line ();
4330 /* Generate any deferred opcodes because we're going to be looking at
4332 flush_pending_unwind ();
4334 for (i
= 0; i
< 16; i
++)
4336 if (mask
& (1 << i
))
4337 unwind
.frame_size
+= 8;
4340 /* Attempt to combine with a previous opcode. We do this because gcc
4341 likes to output separate unwind directives for a single block of
4343 if (unwind
.opcode_count
> 0)
4345 i
= unwind
.opcodes
[unwind
.opcode_count
- 1];
4346 if ((i
& 0xf8) == 0xc0)
4349 /* Only merge if the blocks are contiguous. */
4352 if ((mask
& 0xfe00) == (1 << 9))
4354 mask
|= ((1 << (i
+ 11)) - 1) & 0xfc00;
4355 unwind
.opcode_count
--;
4358 else if (i
== 6 && unwind
.opcode_count
>= 2)
4360 i
= unwind
.opcodes
[unwind
.opcode_count
- 2];
4364 op
= 0xffff << (reg
- 1);
4366 && ((mask
& op
) == (1u << (reg
- 1))))
4368 op
= (1 << (reg
+ i
+ 1)) - 1;
4369 op
&= ~((1 << reg
) - 1);
4371 unwind
.opcode_count
-= 2;
4378 /* We want to generate opcodes in the order the registers have been
4379 saved, ie. descending order. */
4380 for (reg
= 15; reg
>= -1; reg
--)
4382 /* Save registers in blocks. */
4384 || !(mask
& (1 << reg
)))
4386 /* We found an unsaved reg. Generate opcodes to save the
4393 op
= 0xc0 | (hi_reg
- 10);
4394 add_unwind_opcode (op
, 1);
4399 op
= 0xc600 | ((reg
+ 1) << 4) | ((hi_reg
- reg
) - 1);
4400 add_unwind_opcode (op
, 2);
4409 ignore_rest_of_line ();
4413 s_arm_unwind_save_mmxwcg (void)
4420 if (*input_line_pointer
== '{')
4421 input_line_pointer
++;
4423 skip_whitespace (input_line_pointer
);
4427 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4431 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4437 as_tsktsk (_("register list not in ascending order"));
4440 if (*input_line_pointer
== '-')
4442 input_line_pointer
++;
4443 hi_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_MMXWCG
);
4446 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_MMXWCG
]));
4449 else if (reg
>= hi_reg
)
4451 as_bad (_("bad register range"));
4454 for (; reg
< hi_reg
; reg
++)
4458 while (skip_past_comma (&input_line_pointer
) != FAIL
);
4460 skip_past_char (&input_line_pointer
, '}');
4462 demand_empty_rest_of_line ();
4464 /* Generate any deferred opcodes because we're going to be looking at
4466 flush_pending_unwind ();
4468 for (reg
= 0; reg
< 16; reg
++)
4470 if (mask
& (1 << reg
))
4471 unwind
.frame_size
+= 4;
4474 add_unwind_opcode (op
, 2);
4477 ignore_rest_of_line ();
4481 /* Parse an unwind_save directive.
4482 If the argument is non-zero, this is a .vsave directive. */
4485 s_arm_unwind_save (int arch_v6
)
4488 struct reg_entry
*reg
;
4489 bfd_boolean had_brace
= FALSE
;
4491 if (!unwind
.proc_start
)
4492 as_bad (MISSING_FNSTART
);
4494 /* Figure out what sort of save we have. */
4495 peek
= input_line_pointer
;
4503 reg
= arm_reg_parse_multi (&peek
);
4507 as_bad (_("register expected"));
4508 ignore_rest_of_line ();
4517 as_bad (_("FPA .unwind_save does not take a register list"));
4518 ignore_rest_of_line ();
4521 input_line_pointer
= peek
;
4522 s_arm_unwind_save_fpa (reg
->number
);
4526 s_arm_unwind_save_core ();
4531 s_arm_unwind_save_vfp_armv6 ();
4533 s_arm_unwind_save_vfp ();
4536 case REG_TYPE_MMXWR
:
4537 s_arm_unwind_save_mmxwr ();
4540 case REG_TYPE_MMXWCG
:
4541 s_arm_unwind_save_mmxwcg ();
4545 as_bad (_(".unwind_save does not support this kind of register"));
4546 ignore_rest_of_line ();
4551 /* Parse an unwind_movsp directive. */
4554 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED
)
4560 if (!unwind
.proc_start
)
4561 as_bad (MISSING_FNSTART
);
4563 reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4566 as_bad ("%s", _(reg_expected_msgs
[REG_TYPE_RN
]));
4567 ignore_rest_of_line ();
4571 /* Optional constant. */
4572 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4574 if (immediate_for_directive (&offset
) == FAIL
)
4580 demand_empty_rest_of_line ();
4582 if (reg
== REG_SP
|| reg
== REG_PC
)
4584 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4588 if (unwind
.fp_reg
!= REG_SP
)
4589 as_bad (_("unexpected .unwind_movsp directive"));
4591 /* Generate opcode to restore the value. */
4593 add_unwind_opcode (op
, 1);
4595 /* Record the information for later. */
4596 unwind
.fp_reg
= reg
;
4597 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4598 unwind
.sp_restored
= 1;
4601 /* Parse an unwind_pad directive. */
4604 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED
)
4608 if (!unwind
.proc_start
)
4609 as_bad (MISSING_FNSTART
);
4611 if (immediate_for_directive (&offset
) == FAIL
)
4616 as_bad (_("stack increment must be multiple of 4"));
4617 ignore_rest_of_line ();
4621 /* Don't generate any opcodes, just record the details for later. */
4622 unwind
.frame_size
+= offset
;
4623 unwind
.pending_offset
+= offset
;
4625 demand_empty_rest_of_line ();
4628 /* Parse an unwind_setfp directive. */
4631 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED
)
4637 if (!unwind
.proc_start
)
4638 as_bad (MISSING_FNSTART
);
4640 fp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4641 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4644 sp_reg
= arm_reg_parse (&input_line_pointer
, REG_TYPE_RN
);
4646 if (fp_reg
== FAIL
|| sp_reg
== FAIL
)
4648 as_bad (_("expected <reg>, <reg>"));
4649 ignore_rest_of_line ();
4653 /* Optional constant. */
4654 if (skip_past_comma (&input_line_pointer
) != FAIL
)
4656 if (immediate_for_directive (&offset
) == FAIL
)
4662 demand_empty_rest_of_line ();
4664 if (sp_reg
!= REG_SP
&& sp_reg
!= unwind
.fp_reg
)
4666 as_bad (_("register must be either sp or set by a previous"
4667 "unwind_movsp directive"));
4671 /* Don't generate any opcodes, just record the information for later. */
4672 unwind
.fp_reg
= fp_reg
;
4674 if (sp_reg
== REG_SP
)
4675 unwind
.fp_offset
= unwind
.frame_size
- offset
;
4677 unwind
.fp_offset
-= offset
;
4680 /* Parse an unwind_raw directive. */
4683 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED
)
4686 /* This is an arbitrary limit. */
4687 unsigned char op
[16];
4690 if (!unwind
.proc_start
)
4691 as_bad (MISSING_FNSTART
);
4694 if (exp
.X_op
== O_constant
4695 && skip_past_comma (&input_line_pointer
) != FAIL
)
4697 unwind
.frame_size
+= exp
.X_add_number
;
4701 exp
.X_op
= O_illegal
;
4703 if (exp
.X_op
!= O_constant
)
4705 as_bad (_("expected <offset>, <opcode>"));
4706 ignore_rest_of_line ();
4712 /* Parse the opcode. */
4717 as_bad (_("unwind opcode too long"));
4718 ignore_rest_of_line ();
4720 if (exp
.X_op
!= O_constant
|| exp
.X_add_number
& ~0xff)
4722 as_bad (_("invalid unwind opcode"));
4723 ignore_rest_of_line ();
4726 op
[count
++] = exp
.X_add_number
;
4728 /* Parse the next byte. */
4729 if (skip_past_comma (&input_line_pointer
) == FAIL
)
4735 /* Add the opcode bytes in reverse order. */
4737 add_unwind_opcode (op
[count
], 1);
4739 demand_empty_rest_of_line ();
4743 /* Parse a .eabi_attribute directive. */
4746 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED
)
4748 int tag
= obj_elf_vendor_attribute (OBJ_ATTR_PROC
);
4750 if (tag
>= 0 && tag
< NUM_KNOWN_OBJ_ATTRIBUTES
)
4751 attributes_set_explicitly
[tag
] = 1;
4754 /* Emit a tls fix for the symbol. */
4757 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED
)
4761 #ifdef md_flush_pending_output
4762 md_flush_pending_output ();
4765 #ifdef md_cons_align
4769 /* Since we're just labelling the code, there's no need to define a
4772 p
= obstack_next_free (&frchain_now
->frch_obstack
);
4773 fix_new_arm (frag_now
, p
- frag_now
->fr_literal
, 4, &exp
, 0,
4774 thumb_mode
? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4775 : BFD_RELOC_ARM_TLS_DESCSEQ
);
4777 #endif /* OBJ_ELF */
4779 static void s_arm_arch (int);
4780 static void s_arm_object_arch (int);
4781 static void s_arm_cpu (int);
4782 static void s_arm_fpu (int);
4783 static void s_arm_arch_extension (int);
4788 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED
)
4795 if (exp
.X_op
== O_symbol
)
4796 exp
.X_op
= O_secrel
;
4798 emit_expr (&exp
, 4);
4800 while (*input_line_pointer
++ == ',');
4802 input_line_pointer
--;
4803 demand_empty_rest_of_line ();
4807 /* This table describes all the machine specific pseudo-ops the assembler
4808 has to support. The fields are:
4809 pseudo-op name without dot
4810 function to call to execute this pseudo-op
4811 Integer arg to pass to the function. */
4813 const pseudo_typeS md_pseudo_table
[] =
4815 /* Never called because '.req' does not start a line. */
4816 { "req", s_req
, 0 },
4817 /* Following two are likewise never called. */
4820 { "unreq", s_unreq
, 0 },
4821 { "bss", s_bss
, 0 },
4822 { "align", s_align_ptwo
, 2 },
4823 { "arm", s_arm
, 0 },
4824 { "thumb", s_thumb
, 0 },
4825 { "code", s_code
, 0 },
4826 { "force_thumb", s_force_thumb
, 0 },
4827 { "thumb_func", s_thumb_func
, 0 },
4828 { "thumb_set", s_thumb_set
, 0 },
4829 { "even", s_even
, 0 },
4830 { "ltorg", s_ltorg
, 0 },
4831 { "pool", s_ltorg
, 0 },
4832 { "syntax", s_syntax
, 0 },
4833 { "cpu", s_arm_cpu
, 0 },
4834 { "arch", s_arm_arch
, 0 },
4835 { "object_arch", s_arm_object_arch
, 0 },
4836 { "fpu", s_arm_fpu
, 0 },
4837 { "arch_extension", s_arm_arch_extension
, 0 },
4839 { "word", s_arm_elf_cons
, 4 },
4840 { "long", s_arm_elf_cons
, 4 },
4841 { "inst.n", s_arm_elf_inst
, 2 },
4842 { "inst.w", s_arm_elf_inst
, 4 },
4843 { "inst", s_arm_elf_inst
, 0 },
4844 { "rel31", s_arm_rel31
, 0 },
4845 { "fnstart", s_arm_unwind_fnstart
, 0 },
4846 { "fnend", s_arm_unwind_fnend
, 0 },
4847 { "cantunwind", s_arm_unwind_cantunwind
, 0 },
4848 { "personality", s_arm_unwind_personality
, 0 },
4849 { "personalityindex", s_arm_unwind_personalityindex
, 0 },
4850 { "handlerdata", s_arm_unwind_handlerdata
, 0 },
4851 { "save", s_arm_unwind_save
, 0 },
4852 { "vsave", s_arm_unwind_save
, 1 },
4853 { "movsp", s_arm_unwind_movsp
, 0 },
4854 { "pad", s_arm_unwind_pad
, 0 },
4855 { "setfp", s_arm_unwind_setfp
, 0 },
4856 { "unwind_raw", s_arm_unwind_raw
, 0 },
4857 { "eabi_attribute", s_arm_eabi_attribute
, 0 },
4858 { "tlsdescseq", s_arm_tls_descseq
, 0 },
4862 /* These are used for dwarf. */
4866 /* These are used for dwarf2. */
4867 { "file", dwarf2_directive_file
, 0 },
4868 { "loc", dwarf2_directive_loc
, 0 },
4869 { "loc_mark_labels", dwarf2_directive_loc_mark_labels
, 0 },
4871 { "extend", float_cons
, 'x' },
4872 { "ldouble", float_cons
, 'x' },
4873 { "packed", float_cons
, 'p' },
4875 {"secrel32", pe_directive_secrel
, 0},
4878 /* These are for compatibility with CodeComposer Studio. */
4879 {"ref", s_ccs_ref
, 0},
4880 {"def", s_ccs_def
, 0},
4881 {"asmfunc", s_ccs_asmfunc
, 0},
4882 {"endasmfunc", s_ccs_endasmfunc
, 0},
4887 /* Parser functions used exclusively in instruction operands. */
4889 /* Generic immediate-value read function for use in insn parsing.
4890 STR points to the beginning of the immediate (the leading #);
4891 VAL receives the value; if the value is outside [MIN, MAX]
4892 issue an error. PREFIX_OPT is true if the immediate prefix is
4896 parse_immediate (char **str
, int *val
, int min
, int max
,
4897 bfd_boolean prefix_opt
)
4901 my_get_expression (&exp
, str
, prefix_opt
? GE_OPT_PREFIX
: GE_IMM_PREFIX
);
4902 if (exp
.X_op
!= O_constant
)
4904 inst
.error
= _("constant expression required");
4908 if (exp
.X_add_number
< min
|| exp
.X_add_number
> max
)
4910 inst
.error
= _("immediate value out of range");
4914 *val
= exp
.X_add_number
;
4918 /* Less-generic immediate-value read function with the possibility of loading a
4919 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4920 instructions. Puts the result directly in inst.operands[i]. */
4923 parse_big_immediate (char **str
, int i
, expressionS
*in_exp
,
4924 bfd_boolean allow_symbol_p
)
4927 expressionS
*exp_p
= in_exp
? in_exp
: &exp
;
4930 my_get_expression (exp_p
, &ptr
, GE_OPT_PREFIX_BIG
);
4932 if (exp_p
->X_op
== O_constant
)
4934 inst
.operands
[i
].imm
= exp_p
->X_add_number
& 0xffffffff;
4935 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4936 O_constant. We have to be careful not to break compilation for
4937 32-bit X_add_number, though. */
4938 if ((exp_p
->X_add_number
& ~(offsetT
)(0xffffffffU
)) != 0)
4940 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4941 inst
.operands
[i
].reg
= (((exp_p
->X_add_number
>> 16) >> 16)
4943 inst
.operands
[i
].regisimm
= 1;
4946 else if (exp_p
->X_op
== O_big
4947 && LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 32)
4949 unsigned parts
= 32 / LITTLENUM_NUMBER_OF_BITS
, j
, idx
= 0;
4951 /* Bignums have their least significant bits in
4952 generic_bignum[0]. Make sure we put 32 bits in imm and
4953 32 bits in reg, in a (hopefully) portable way. */
4954 gas_assert (parts
!= 0);
4956 /* Make sure that the number is not too big.
4957 PR 11972: Bignums can now be sign-extended to the
4958 size of a .octa so check that the out of range bits
4959 are all zero or all one. */
4960 if (LITTLENUM_NUMBER_OF_BITS
* exp_p
->X_add_number
> 64)
4962 LITTLENUM_TYPE m
= -1;
4964 if (generic_bignum
[parts
* 2] != 0
4965 && generic_bignum
[parts
* 2] != m
)
4968 for (j
= parts
* 2 + 1; j
< (unsigned) exp_p
->X_add_number
; j
++)
4969 if (generic_bignum
[j
] != generic_bignum
[j
-1])
4973 inst
.operands
[i
].imm
= 0;
4974 for (j
= 0; j
< parts
; j
++, idx
++)
4975 inst
.operands
[i
].imm
|= generic_bignum
[idx
]
4976 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4977 inst
.operands
[i
].reg
= 0;
4978 for (j
= 0; j
< parts
; j
++, idx
++)
4979 inst
.operands
[i
].reg
|= generic_bignum
[idx
]
4980 << (LITTLENUM_NUMBER_OF_BITS
* j
);
4981 inst
.operands
[i
].regisimm
= 1;
4983 else if (!(exp_p
->X_op
== O_symbol
&& allow_symbol_p
))
4991 /* Returns the pseudo-register number of an FPA immediate constant,
4992 or FAIL if there isn't a valid constant here. */
4995 parse_fpa_immediate (char ** str
)
4997 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5003 /* First try and match exact strings, this is to guarantee
5004 that some formats will work even for cross assembly. */
5006 for (i
= 0; fp_const
[i
]; i
++)
5008 if (strncmp (*str
, fp_const
[i
], strlen (fp_const
[i
])) == 0)
5012 *str
+= strlen (fp_const
[i
]);
5013 if (is_end_of_line
[(unsigned char) **str
])
5019 /* Just because we didn't get a match doesn't mean that the constant
5020 isn't valid, just that it is in a format that we don't
5021 automatically recognize. Try parsing it with the standard
5022 expression routines. */
5024 memset (words
, 0, MAX_LITTLENUMS
* sizeof (LITTLENUM_TYPE
));
5026 /* Look for a raw floating point number. */
5027 if ((save_in
= atof_ieee (*str
, 'x', words
)) != NULL
5028 && is_end_of_line
[(unsigned char) *save_in
])
5030 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5032 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5034 if (words
[j
] != fp_values
[i
][j
])
5038 if (j
== MAX_LITTLENUMS
)
5046 /* Try and parse a more complex expression, this will probably fail
5047 unless the code uses a floating point prefix (eg "0f"). */
5048 save_in
= input_line_pointer
;
5049 input_line_pointer
= *str
;
5050 if (expression (&exp
) == absolute_section
5051 && exp
.X_op
== O_big
5052 && exp
.X_add_number
< 0)
5054 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5056 #define X_PRECISION 5
5057 #define E_PRECISION 15L
5058 if (gen_to_words (words
, X_PRECISION
, E_PRECISION
) == 0)
5060 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
5062 for (j
= 0; j
< MAX_LITTLENUMS
; j
++)
5064 if (words
[j
] != fp_values
[i
][j
])
5068 if (j
== MAX_LITTLENUMS
)
5070 *str
= input_line_pointer
;
5071 input_line_pointer
= save_in
;
5078 *str
= input_line_pointer
;
5079 input_line_pointer
= save_in
;
5080 inst
.error
= _("invalid FPA immediate expression");
5084 /* Returns 1 if a number has "quarter-precision" float format
5085 0baBbbbbbc defgh000 00000000 00000000. */
5088 is_quarter_float (unsigned imm
)
5090 int bs
= (imm
& 0x20000000) ? 0x3e000000 : 0x40000000;
5091 return (imm
& 0x7ffff) == 0 && ((imm
& 0x7e000000) ^ bs
) == 0;
5095 /* Detect the presence of a floating point or integer zero constant,
5099 parse_ifimm_zero (char **in
)
5103 if (!is_immediate_prefix (**in
))
5105 /* In unified syntax, all prefixes are optional. */
5106 if (!unified_syntax
)
5112 /* Accept #0x0 as a synonym for #0. */
5113 if (strncmp (*in
, "0x", 2) == 0)
5116 if (parse_immediate (in
, &val
, 0, 0, TRUE
) == FAIL
)
5121 error_code
= atof_generic (in
, ".", EXP_CHARS
,
5122 &generic_floating_point_number
);
5125 && generic_floating_point_number
.sign
== '+'
5126 && (generic_floating_point_number
.low
5127 > generic_floating_point_number
.leader
))
5133 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5134 0baBbbbbbc defgh000 00000000 00000000.
5135 The zero and minus-zero cases need special handling, since they can't be
5136 encoded in the "quarter-precision" float format, but can nonetheless be
5137 loaded as integer constants. */
5140 parse_qfloat_immediate (char **ccp
, int *immed
)
5144 LITTLENUM_TYPE words
[MAX_LITTLENUMS
];
5145 int found_fpchar
= 0;
5147 skip_past_char (&str
, '#');
5149 /* We must not accidentally parse an integer as a floating-point number. Make
5150 sure that the value we parse is not an integer by checking for special
5151 characters '.' or 'e'.
5152 FIXME: This is a horrible hack, but doing better is tricky because type
5153 information isn't in a very usable state at parse time. */
5155 skip_whitespace (fpnum
);
5157 if (strncmp (fpnum
, "0x", 2) == 0)
5161 for (; *fpnum
!= '\0' && *fpnum
!= ' ' && *fpnum
!= '\n'; fpnum
++)
5162 if (*fpnum
== '.' || *fpnum
== 'e' || *fpnum
== 'E')
5172 if ((str
= atof_ieee (str
, 's', words
)) != NULL
)
5174 unsigned fpword
= 0;
5177 /* Our FP word must be 32 bits (single-precision FP). */
5178 for (i
= 0; i
< 32 / LITTLENUM_NUMBER_OF_BITS
; i
++)
5180 fpword
<<= LITTLENUM_NUMBER_OF_BITS
;
5184 if (is_quarter_float (fpword
) || (fpword
& 0x7fffffff) == 0)
5197 /* Shift operands. */
5200 SHIFT_LSL
, SHIFT_LSR
, SHIFT_ASR
, SHIFT_ROR
, SHIFT_RRX
5203 struct asm_shift_name
5206 enum shift_kind kind
;
5209 /* Third argument to parse_shift. */
5210 enum parse_shift_mode
5212 NO_SHIFT_RESTRICT
, /* Any kind of shift is accepted. */
5213 SHIFT_IMMEDIATE
, /* Shift operand must be an immediate. */
5214 SHIFT_LSL_OR_ASR_IMMEDIATE
, /* Shift must be LSL or ASR immediate. */
5215 SHIFT_ASR_IMMEDIATE
, /* Shift must be ASR immediate. */
5216 SHIFT_LSL_IMMEDIATE
, /* Shift must be LSL immediate. */
5219 /* Parse a <shift> specifier on an ARM data processing instruction.
5220 This has three forms:
5222 (LSL|LSR|ASL|ASR|ROR) Rs
5223 (LSL|LSR|ASL|ASR|ROR) #imm
5226 Note that ASL is assimilated to LSL in the instruction encoding, and
5227 RRX to ROR #0 (which cannot be written as such). */
5230 parse_shift (char **str
, int i
, enum parse_shift_mode mode
)
5232 const struct asm_shift_name
*shift_name
;
5233 enum shift_kind shift
;
5238 for (p
= *str
; ISALPHA (*p
); p
++)
5243 inst
.error
= _("shift expression expected");
5247 shift_name
= (const struct asm_shift_name
*) hash_find_n (arm_shift_hsh
, *str
,
5250 if (shift_name
== NULL
)
5252 inst
.error
= _("shift expression expected");
5256 shift
= shift_name
->kind
;
5260 case NO_SHIFT_RESTRICT
:
5261 case SHIFT_IMMEDIATE
: break;
5263 case SHIFT_LSL_OR_ASR_IMMEDIATE
:
5264 if (shift
!= SHIFT_LSL
&& shift
!= SHIFT_ASR
)
5266 inst
.error
= _("'LSL' or 'ASR' required");
5271 case SHIFT_LSL_IMMEDIATE
:
5272 if (shift
!= SHIFT_LSL
)
5274 inst
.error
= _("'LSL' required");
5279 case SHIFT_ASR_IMMEDIATE
:
5280 if (shift
!= SHIFT_ASR
)
5282 inst
.error
= _("'ASR' required");
5290 if (shift
!= SHIFT_RRX
)
5292 /* Whitespace can appear here if the next thing is a bare digit. */
5293 skip_whitespace (p
);
5295 if (mode
== NO_SHIFT_RESTRICT
5296 && (reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5298 inst
.operands
[i
].imm
= reg
;
5299 inst
.operands
[i
].immisreg
= 1;
5301 else if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5304 inst
.operands
[i
].shift_kind
= shift
;
5305 inst
.operands
[i
].shifted
= 1;
5310 /* Parse a <shifter_operand> for an ARM data processing instruction:
5313 #<immediate>, <rotate>
5317 where <shift> is defined by parse_shift above, and <rotate> is a
5318 multiple of 2 between 0 and 30. Validation of immediate operands
5319 is deferred to md_apply_fix. */
5322 parse_shifter_operand (char **str
, int i
)
5327 if ((value
= arm_reg_parse (str
, REG_TYPE_RN
)) != FAIL
)
5329 inst
.operands
[i
].reg
= value
;
5330 inst
.operands
[i
].isreg
= 1;
5332 /* parse_shift will override this if appropriate */
5333 inst
.relocs
[0].exp
.X_op
= O_constant
;
5334 inst
.relocs
[0].exp
.X_add_number
= 0;
5336 if (skip_past_comma (str
) == FAIL
)
5339 /* Shift operation on register. */
5340 return parse_shift (str
, i
, NO_SHIFT_RESTRICT
);
5343 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_IMM_PREFIX
))
5346 if (skip_past_comma (str
) == SUCCESS
)
5348 /* #x, y -- ie explicit rotation by Y. */
5349 if (my_get_expression (&exp
, str
, GE_NO_PREFIX
))
5352 if (exp
.X_op
!= O_constant
|| inst
.relocs
[0].exp
.X_op
!= O_constant
)
5354 inst
.error
= _("constant expression expected");
5358 value
= exp
.X_add_number
;
5359 if (value
< 0 || value
> 30 || value
% 2 != 0)
5361 inst
.error
= _("invalid rotation");
5364 if (inst
.relocs
[0].exp
.X_add_number
< 0
5365 || inst
.relocs
[0].exp
.X_add_number
> 255)
5367 inst
.error
= _("invalid constant");
5371 /* Encode as specified. */
5372 inst
.operands
[i
].imm
= inst
.relocs
[0].exp
.X_add_number
| value
<< 7;
5376 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
5377 inst
.relocs
[0].pc_rel
= 0;
5381 /* Group relocation information. Each entry in the table contains the
5382 textual name of the relocation as may appear in assembler source
5383 and must end with a colon.
5384 Along with this textual name are the relocation codes to be used if
5385 the corresponding instruction is an ALU instruction (ADD or SUB only),
5386 an LDR, an LDRS, or an LDC. */
5388 struct group_reloc_table_entry
5399 /* Varieties of non-ALU group relocation. */
5406 static struct group_reloc_table_entry group_reloc_table
[] =
5407 { /* Program counter relative: */
5409 BFD_RELOC_ARM_ALU_PC_G0_NC
, /* ALU */
5414 BFD_RELOC_ARM_ALU_PC_G0
, /* ALU */
5415 BFD_RELOC_ARM_LDR_PC_G0
, /* LDR */
5416 BFD_RELOC_ARM_LDRS_PC_G0
, /* LDRS */
5417 BFD_RELOC_ARM_LDC_PC_G0
}, /* LDC */
5419 BFD_RELOC_ARM_ALU_PC_G1_NC
, /* ALU */
5424 BFD_RELOC_ARM_ALU_PC_G1
, /* ALU */
5425 BFD_RELOC_ARM_LDR_PC_G1
, /* LDR */
5426 BFD_RELOC_ARM_LDRS_PC_G1
, /* LDRS */
5427 BFD_RELOC_ARM_LDC_PC_G1
}, /* LDC */
5429 BFD_RELOC_ARM_ALU_PC_G2
, /* ALU */
5430 BFD_RELOC_ARM_LDR_PC_G2
, /* LDR */
5431 BFD_RELOC_ARM_LDRS_PC_G2
, /* LDRS */
5432 BFD_RELOC_ARM_LDC_PC_G2
}, /* LDC */
5433 /* Section base relative */
5435 BFD_RELOC_ARM_ALU_SB_G0_NC
, /* ALU */
5440 BFD_RELOC_ARM_ALU_SB_G0
, /* ALU */
5441 BFD_RELOC_ARM_LDR_SB_G0
, /* LDR */
5442 BFD_RELOC_ARM_LDRS_SB_G0
, /* LDRS */
5443 BFD_RELOC_ARM_LDC_SB_G0
}, /* LDC */
5445 BFD_RELOC_ARM_ALU_SB_G1_NC
, /* ALU */
5450 BFD_RELOC_ARM_ALU_SB_G1
, /* ALU */
5451 BFD_RELOC_ARM_LDR_SB_G1
, /* LDR */
5452 BFD_RELOC_ARM_LDRS_SB_G1
, /* LDRS */
5453 BFD_RELOC_ARM_LDC_SB_G1
}, /* LDC */
5455 BFD_RELOC_ARM_ALU_SB_G2
, /* ALU */
5456 BFD_RELOC_ARM_LDR_SB_G2
, /* LDR */
5457 BFD_RELOC_ARM_LDRS_SB_G2
, /* LDRS */
5458 BFD_RELOC_ARM_LDC_SB_G2
}, /* LDC */
5459 /* Absolute thumb alu relocations. */
5461 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
,/* ALU. */
5466 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
,/* ALU. */
5471 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
,/* ALU. */
5476 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,/* ALU. */
5481 /* Given the address of a pointer pointing to the textual name of a group
5482 relocation as may appear in assembler source, attempt to find its details
5483 in group_reloc_table. The pointer will be updated to the character after
5484 the trailing colon. On failure, FAIL will be returned; SUCCESS
5485 otherwise. On success, *entry will be updated to point at the relevant
5486 group_reloc_table entry. */
5489 find_group_reloc_table_entry (char **str
, struct group_reloc_table_entry
**out
)
5492 for (i
= 0; i
< ARRAY_SIZE (group_reloc_table
); i
++)
5494 int length
= strlen (group_reloc_table
[i
].name
);
5496 if (strncasecmp (group_reloc_table
[i
].name
, *str
, length
) == 0
5497 && (*str
)[length
] == ':')
5499 *out
= &group_reloc_table
[i
];
5500 *str
+= (length
+ 1);
5508 /* Parse a <shifter_operand> for an ARM data processing instruction
5509 (as for parse_shifter_operand) where group relocations are allowed:
5512 #<immediate>, <rotate>
5513 #:<group_reloc>:<expression>
5517 where <group_reloc> is one of the strings defined in group_reloc_table.
5518 The hashes are optional.
5520 Everything else is as for parse_shifter_operand. */
5522 static parse_operand_result
5523 parse_shifter_operand_group_reloc (char **str
, int i
)
5525 /* Determine if we have the sequence of characters #: or just :
5526 coming next. If we do, then we check for a group relocation.
5527 If we don't, punt the whole lot to parse_shifter_operand. */
5529 if (((*str
)[0] == '#' && (*str
)[1] == ':')
5530 || (*str
)[0] == ':')
5532 struct group_reloc_table_entry
*entry
;
5534 if ((*str
)[0] == '#')
5539 /* Try to parse a group relocation. Anything else is an error. */
5540 if (find_group_reloc_table_entry (str
, &entry
) == FAIL
)
5542 inst
.error
= _("unknown group relocation");
5543 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5546 /* We now have the group relocation table entry corresponding to
5547 the name in the assembler source. Next, we parse the expression. */
5548 if (my_get_expression (&inst
.relocs
[0].exp
, str
, GE_NO_PREFIX
))
5549 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5551 /* Record the relocation type (always the ALU variant here). */
5552 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) entry
->alu_code
;
5553 gas_assert (inst
.relocs
[0].type
!= 0);
5555 return PARSE_OPERAND_SUCCESS
;
5558 return parse_shifter_operand (str
, i
) == SUCCESS
5559 ? PARSE_OPERAND_SUCCESS
: PARSE_OPERAND_FAIL
;
5561 /* Never reached. */
5564 /* Parse a Neon alignment expression. Information is written to
5565 inst.operands[i]. We assume the initial ':' has been skipped.
5567 align .imm = align << 8, .immisalign=1, .preind=0 */
5568 static parse_operand_result
5569 parse_neon_alignment (char **str
, int i
)
5574 my_get_expression (&exp
, &p
, GE_NO_PREFIX
);
5576 if (exp
.X_op
!= O_constant
)
5578 inst
.error
= _("alignment must be constant");
5579 return PARSE_OPERAND_FAIL
;
5582 inst
.operands
[i
].imm
= exp
.X_add_number
<< 8;
5583 inst
.operands
[i
].immisalign
= 1;
5584 /* Alignments are not pre-indexes. */
5585 inst
.operands
[i
].preind
= 0;
5588 return PARSE_OPERAND_SUCCESS
;
5591 /* Parse all forms of an ARM address expression. Information is written
5592 to inst.operands[i] and/or inst.relocs[0].
5594 Preindexed addressing (.preind=1):
5596 [Rn, #offset] .reg=Rn .relocs[0].exp=offset
5597 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5598 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5599 .shift_kind=shift .relocs[0].exp=shift_imm
5601 These three may have a trailing ! which causes .writeback to be set also.
5603 Postindexed addressing (.postind=1, .writeback=1):
5605 [Rn], #offset .reg=Rn .relocs[0].exp=offset
5606 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5607 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5608 .shift_kind=shift .relocs[0].exp=shift_imm
5610 Unindexed addressing (.preind=0, .postind=0):
5612 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5616 [Rn]{!} shorthand for [Rn,#0]{!}
5617 =immediate .isreg=0 .relocs[0].exp=immediate
5618 label .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5620 It is the caller's responsibility to check for addressing modes not
5621 supported by the instruction, and to set inst.relocs[0].type. */
5623 static parse_operand_result
5624 parse_address_main (char **str
, int i
, int group_relocations
,
5625 group_reloc_type group_type
)
5630 if (skip_past_char (&p
, '[') == FAIL
)
5632 if (skip_past_char (&p
, '=') == FAIL
)
5634 /* Bare address - translate to PC-relative offset. */
5635 inst
.relocs
[0].pc_rel
= 1;
5636 inst
.operands
[i
].reg
= REG_PC
;
5637 inst
.operands
[i
].isreg
= 1;
5638 inst
.operands
[i
].preind
= 1;
5640 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_OPT_PREFIX_BIG
))
5641 return PARSE_OPERAND_FAIL
;
5643 else if (parse_big_immediate (&p
, i
, &inst
.relocs
[0].exp
,
5644 /*allow_symbol_p=*/TRUE
))
5645 return PARSE_OPERAND_FAIL
;
5648 return PARSE_OPERAND_SUCCESS
;
5651 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5652 skip_whitespace (p
);
5654 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
5656 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
5657 return PARSE_OPERAND_FAIL
;
5659 inst
.operands
[i
].reg
= reg
;
5660 inst
.operands
[i
].isreg
= 1;
5662 if (skip_past_comma (&p
) == SUCCESS
)
5664 inst
.operands
[i
].preind
= 1;
5667 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5669 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5671 inst
.operands
[i
].imm
= reg
;
5672 inst
.operands
[i
].immisreg
= 1;
5674 if (skip_past_comma (&p
) == SUCCESS
)
5675 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5676 return PARSE_OPERAND_FAIL
;
5678 else if (skip_past_char (&p
, ':') == SUCCESS
)
5680 /* FIXME: '@' should be used here, but it's filtered out by generic
5681 code before we get to see it here. This may be subject to
5683 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5685 if (result
!= PARSE_OPERAND_SUCCESS
)
5690 if (inst
.operands
[i
].negative
)
5692 inst
.operands
[i
].negative
= 0;
5696 if (group_relocations
5697 && ((*p
== '#' && *(p
+ 1) == ':') || *p
== ':'))
5699 struct group_reloc_table_entry
*entry
;
5701 /* Skip over the #: or : sequence. */
5707 /* Try to parse a group relocation. Anything else is an
5709 if (find_group_reloc_table_entry (&p
, &entry
) == FAIL
)
5711 inst
.error
= _("unknown group relocation");
5712 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5715 /* We now have the group relocation table entry corresponding to
5716 the name in the assembler source. Next, we parse the
5718 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5719 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5721 /* Record the relocation type. */
5726 = (bfd_reloc_code_real_type
) entry
->ldr_code
;
5731 = (bfd_reloc_code_real_type
) entry
->ldrs_code
;
5736 = (bfd_reloc_code_real_type
) entry
->ldc_code
;
5743 if (inst
.relocs
[0].type
== 0)
5745 inst
.error
= _("this group relocation is not allowed on this instruction");
5746 return PARSE_OPERAND_FAIL_NO_BACKTRACK
;
5753 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5754 return PARSE_OPERAND_FAIL
;
5755 /* If the offset is 0, find out if it's a +0 or -0. */
5756 if (inst
.relocs
[0].exp
.X_op
== O_constant
5757 && inst
.relocs
[0].exp
.X_add_number
== 0)
5759 skip_whitespace (q
);
5763 skip_whitespace (q
);
5766 inst
.operands
[i
].negative
= 1;
5771 else if (skip_past_char (&p
, ':') == SUCCESS
)
5773 /* FIXME: '@' should be used here, but it's filtered out by generic code
5774 before we get to see it here. This may be subject to change. */
5775 parse_operand_result result
= parse_neon_alignment (&p
, i
);
5777 if (result
!= PARSE_OPERAND_SUCCESS
)
5781 if (skip_past_char (&p
, ']') == FAIL
)
5783 inst
.error
= _("']' expected");
5784 return PARSE_OPERAND_FAIL
;
5787 if (skip_past_char (&p
, '!') == SUCCESS
)
5788 inst
.operands
[i
].writeback
= 1;
5790 else if (skip_past_comma (&p
) == SUCCESS
)
5792 if (skip_past_char (&p
, '{') == SUCCESS
)
5794 /* [Rn], {expr} - unindexed, with option */
5795 if (parse_immediate (&p
, &inst
.operands
[i
].imm
,
5796 0, 255, TRUE
) == FAIL
)
5797 return PARSE_OPERAND_FAIL
;
5799 if (skip_past_char (&p
, '}') == FAIL
)
5801 inst
.error
= _("'}' expected at end of 'option' field");
5802 return PARSE_OPERAND_FAIL
;
5804 if (inst
.operands
[i
].preind
)
5806 inst
.error
= _("cannot combine index with option");
5807 return PARSE_OPERAND_FAIL
;
5810 return PARSE_OPERAND_SUCCESS
;
5814 inst
.operands
[i
].postind
= 1;
5815 inst
.operands
[i
].writeback
= 1;
5817 if (inst
.operands
[i
].preind
)
5819 inst
.error
= _("cannot combine pre- and post-indexing");
5820 return PARSE_OPERAND_FAIL
;
5824 else if (*p
== '-') p
++, inst
.operands
[i
].negative
= 1;
5826 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) != FAIL
)
5828 /* We might be using the immediate for alignment already. If we
5829 are, OR the register number into the low-order bits. */
5830 if (inst
.operands
[i
].immisalign
)
5831 inst
.operands
[i
].imm
|= reg
;
5833 inst
.operands
[i
].imm
= reg
;
5834 inst
.operands
[i
].immisreg
= 1;
5836 if (skip_past_comma (&p
) == SUCCESS
)
5837 if (parse_shift (&p
, i
, SHIFT_IMMEDIATE
) == FAIL
)
5838 return PARSE_OPERAND_FAIL
;
5844 if (inst
.operands
[i
].negative
)
5846 inst
.operands
[i
].negative
= 0;
5849 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_IMM_PREFIX
))
5850 return PARSE_OPERAND_FAIL
;
5851 /* If the offset is 0, find out if it's a +0 or -0. */
5852 if (inst
.relocs
[0].exp
.X_op
== O_constant
5853 && inst
.relocs
[0].exp
.X_add_number
== 0)
5855 skip_whitespace (q
);
5859 skip_whitespace (q
);
5862 inst
.operands
[i
].negative
= 1;
5868 /* If at this point neither .preind nor .postind is set, we have a
5869 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5870 if (inst
.operands
[i
].preind
== 0 && inst
.operands
[i
].postind
== 0)
5872 inst
.operands
[i
].preind
= 1;
5873 inst
.relocs
[0].exp
.X_op
= O_constant
;
5874 inst
.relocs
[0].exp
.X_add_number
= 0;
5877 return PARSE_OPERAND_SUCCESS
;
5881 parse_address (char **str
, int i
)
5883 return parse_address_main (str
, i
, 0, GROUP_LDR
) == PARSE_OPERAND_SUCCESS
5887 static parse_operand_result
5888 parse_address_group_reloc (char **str
, int i
, group_reloc_type type
)
5890 return parse_address_main (str
, i
, 1, type
);
5893 /* Parse an operand for a MOVW or MOVT instruction. */
5895 parse_half (char **str
)
5900 skip_past_char (&p
, '#');
5901 if (strncasecmp (p
, ":lower16:", 9) == 0)
5902 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVW
;
5903 else if (strncasecmp (p
, ":upper16:", 9) == 0)
5904 inst
.relocs
[0].type
= BFD_RELOC_ARM_MOVT
;
5906 if (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
)
5909 skip_whitespace (p
);
5912 if (my_get_expression (&inst
.relocs
[0].exp
, &p
, GE_NO_PREFIX
))
5915 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
5917 if (inst
.relocs
[0].exp
.X_op
!= O_constant
)
5919 inst
.error
= _("constant expression expected");
5922 if (inst
.relocs
[0].exp
.X_add_number
< 0
5923 || inst
.relocs
[0].exp
.X_add_number
> 0xffff)
5925 inst
.error
= _("immediate value out of range");
5933 /* Miscellaneous. */
5935 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5936 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5938 parse_psr (char **str
, bfd_boolean lhs
)
5941 unsigned long psr_field
;
5942 const struct asm_psr
*psr
;
5944 bfd_boolean is_apsr
= FALSE
;
5945 bfd_boolean m_profile
= ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
);
5947 /* PR gas/12698: If the user has specified -march=all then m_profile will
5948 be TRUE, but we want to ignore it in this case as we are building for any
5949 CPU type, including non-m variants. */
5950 if (ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
))
5953 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5954 feature for ease of use and backwards compatibility. */
5956 if (strncasecmp (p
, "SPSR", 4) == 0)
5959 goto unsupported_psr
;
5961 psr_field
= SPSR_BIT
;
5963 else if (strncasecmp (p
, "CPSR", 4) == 0)
5966 goto unsupported_psr
;
5970 else if (strncasecmp (p
, "APSR", 4) == 0)
5972 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5973 and ARMv7-R architecture CPUs. */
5982 while (ISALNUM (*p
) || *p
== '_');
5984 if (strncasecmp (start
, "iapsr", 5) == 0
5985 || strncasecmp (start
, "eapsr", 5) == 0
5986 || strncasecmp (start
, "xpsr", 4) == 0
5987 || strncasecmp (start
, "psr", 3) == 0)
5988 p
= start
+ strcspn (start
, "rR") + 1;
5990 psr
= (const struct asm_psr
*) hash_find_n (arm_v7m_psr_hsh
, start
,
5996 /* If APSR is being written, a bitfield may be specified. Note that
5997 APSR itself is handled above. */
5998 if (psr
->field
<= 3)
6000 psr_field
= psr
->field
;
6006 /* M-profile MSR instructions have the mask field set to "10", except
6007 *PSR variants which modify APSR, which may use a different mask (and
6008 have been handled already). Do that by setting the PSR_f field
6010 return psr
->field
| (lhs
? PSR_f
: 0);
6013 goto unsupported_psr
;
6019 /* A suffix follows. */
6025 while (ISALNUM (*p
) || *p
== '_');
6029 /* APSR uses a notation for bits, rather than fields. */
6030 unsigned int nzcvq_bits
= 0;
6031 unsigned int g_bit
= 0;
6034 for (bit
= start
; bit
!= p
; bit
++)
6036 switch (TOLOWER (*bit
))
6039 nzcvq_bits
|= (nzcvq_bits
& 0x01) ? 0x20 : 0x01;
6043 nzcvq_bits
|= (nzcvq_bits
& 0x02) ? 0x20 : 0x02;
6047 nzcvq_bits
|= (nzcvq_bits
& 0x04) ? 0x20 : 0x04;
6051 nzcvq_bits
|= (nzcvq_bits
& 0x08) ? 0x20 : 0x08;
6055 nzcvq_bits
|= (nzcvq_bits
& 0x10) ? 0x20 : 0x10;
6059 g_bit
|= (g_bit
& 0x1) ? 0x2 : 0x1;
6063 inst
.error
= _("unexpected bit specified after APSR");
6068 if (nzcvq_bits
== 0x1f)
6073 if (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
))
6075 inst
.error
= _("selected processor does not "
6076 "support DSP extension");
6083 if ((nzcvq_bits
& 0x20) != 0
6084 || (nzcvq_bits
!= 0x1f && nzcvq_bits
!= 0)
6085 || (g_bit
& 0x2) != 0)
6087 inst
.error
= _("bad bitmask specified after APSR");
6093 psr
= (const struct asm_psr
*) hash_find_n (arm_psr_hsh
, start
,
6098 psr_field
|= psr
->field
;
6104 goto error
; /* Garbage after "[CS]PSR". */
6106 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6107 is deprecated, but allow it anyway. */
6111 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6114 else if (!m_profile
)
6115 /* These bits are never right for M-profile devices: don't set them
6116 (only code paths which read/write APSR reach here). */
6117 psr_field
|= (PSR_c
| PSR_f
);
6123 inst
.error
= _("selected processor does not support requested special "
6124 "purpose register");
6128 inst
.error
= _("flag for {c}psr instruction expected");
6133 parse_sys_vldr_vstr (char **str
)
6142 {"FPSCR", 0x1, 0x0},
6143 {"FPSCR_nzcvqc", 0x2, 0x0},
6146 {"FPCXTNS", 0x6, 0x1},
6147 {"FPCXTS", 0x7, 0x1}
6149 char *op_end
= strchr (*str
, ',');
6150 size_t op_strlen
= op_end
- *str
;
6152 for (i
= 0; i
< sizeof (sysregs
) / sizeof (sysregs
[0]); i
++)
6154 if (!strncmp (*str
, sysregs
[i
].name
, op_strlen
))
6156 val
= sysregs
[i
].regl
| (sysregs
[i
].regh
<< 3);
6165 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6166 value suitable for splatting into the AIF field of the instruction. */
6169 parse_cps_flags (char **str
)
6178 case '\0': case ',':
6181 case 'a': case 'A': saw_a_flag
= 1; val
|= 0x4; break;
6182 case 'i': case 'I': saw_a_flag
= 1; val
|= 0x2; break;
6183 case 'f': case 'F': saw_a_flag
= 1; val
|= 0x1; break;
6186 inst
.error
= _("unrecognized CPS flag");
6191 if (saw_a_flag
== 0)
6193 inst
.error
= _("missing CPS flags");
6201 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6202 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6205 parse_endian_specifier (char **str
)
6210 if (strncasecmp (s
, "BE", 2))
6212 else if (strncasecmp (s
, "LE", 2))
6216 inst
.error
= _("valid endian specifiers are be or le");
6220 if (ISALNUM (s
[2]) || s
[2] == '_')
6222 inst
.error
= _("valid endian specifiers are be or le");
6227 return little_endian
;
6230 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6231 value suitable for poking into the rotate field of an sxt or sxta
6232 instruction, or FAIL on error. */
6235 parse_ror (char **str
)
6240 if (strncasecmp (s
, "ROR", 3) == 0)
6244 inst
.error
= _("missing rotation field after comma");
6248 if (parse_immediate (&s
, &rot
, 0, 24, FALSE
) == FAIL
)
6253 case 0: *str
= s
; return 0x0;
6254 case 8: *str
= s
; return 0x1;
6255 case 16: *str
= s
; return 0x2;
6256 case 24: *str
= s
; return 0x3;
6259 inst
.error
= _("rotation can only be 0, 8, 16, or 24");
6264 /* Parse a conditional code (from conds[] below). The value returned is in the
6265 range 0 .. 14, or FAIL. */
6267 parse_cond (char **str
)
6270 const struct asm_cond
*c
;
6272 /* Condition codes are always 2 characters, so matching up to
6273 3 characters is sufficient. */
6278 while (ISALPHA (*q
) && n
< 3)
6280 cond
[n
] = TOLOWER (*q
);
6285 c
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, cond
, n
);
6288 inst
.error
= _("condition required");
6296 /* Record a use of the given feature. */
6298 record_feature_use (const arm_feature_set
*feature
)
6301 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
, *feature
);
6303 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, *feature
);
6306 /* If the given feature is currently allowed, mark it as used and return TRUE.
6307 Return FALSE otherwise. */
6309 mark_feature_used (const arm_feature_set
*feature
)
6311 /* Ensure the option is currently allowed. */
6312 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
6315 /* Add the appropriate architecture feature for the barrier option used. */
6316 record_feature_use (feature
);
6321 /* Parse an option for a barrier instruction. Returns the encoding for the
6324 parse_barrier (char **str
)
6327 const struct asm_barrier_opt
*o
;
6330 while (ISALPHA (*q
))
6333 o
= (const struct asm_barrier_opt
*) hash_find_n (arm_barrier_opt_hsh
, p
,
6338 if (!mark_feature_used (&o
->arch
))
6345 /* Parse the operands of a table branch instruction. Similar to a memory
6348 parse_tb (char **str
)
6353 if (skip_past_char (&p
, '[') == FAIL
)
6355 inst
.error
= _("'[' expected");
6359 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6361 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6364 inst
.operands
[0].reg
= reg
;
6366 if (skip_past_comma (&p
) == FAIL
)
6368 inst
.error
= _("',' expected");
6372 if ((reg
= arm_reg_parse (&p
, REG_TYPE_RN
)) == FAIL
)
6374 inst
.error
= _(reg_expected_msgs
[REG_TYPE_RN
]);
6377 inst
.operands
[0].imm
= reg
;
6379 if (skip_past_comma (&p
) == SUCCESS
)
6381 if (parse_shift (&p
, 0, SHIFT_LSL_IMMEDIATE
) == FAIL
)
6383 if (inst
.relocs
[0].exp
.X_add_number
!= 1)
6385 inst
.error
= _("invalid shift");
6388 inst
.operands
[0].shifted
= 1;
6391 if (skip_past_char (&p
, ']') == FAIL
)
6393 inst
.error
= _("']' expected");
6400 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6401 information on the types the operands can take and how they are encoded.
6402 Up to four operands may be read; this function handles setting the
6403 ".present" field for each read operand itself.
6404 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6405 else returns FAIL. */
6408 parse_neon_mov (char **str
, int *which_operand
)
6410 int i
= *which_operand
, val
;
6411 enum arm_reg_type rtype
;
6413 struct neon_type_el optype
;
6415 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6417 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6418 inst
.operands
[i
].reg
= val
;
6419 inst
.operands
[i
].isscalar
= 1;
6420 inst
.operands
[i
].vectype
= optype
;
6421 inst
.operands
[i
++].present
= 1;
6423 if (skip_past_comma (&ptr
) == FAIL
)
6426 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6429 inst
.operands
[i
].reg
= val
;
6430 inst
.operands
[i
].isreg
= 1;
6431 inst
.operands
[i
].present
= 1;
6433 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
, &optype
))
6436 /* Cases 0, 1, 2, 3, 5 (D only). */
6437 if (skip_past_comma (&ptr
) == FAIL
)
6440 inst
.operands
[i
].reg
= val
;
6441 inst
.operands
[i
].isreg
= 1;
6442 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6443 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6444 inst
.operands
[i
].isvec
= 1;
6445 inst
.operands
[i
].vectype
= optype
;
6446 inst
.operands
[i
++].present
= 1;
6448 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6450 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6451 Case 13: VMOV <Sd>, <Rm> */
6452 inst
.operands
[i
].reg
= val
;
6453 inst
.operands
[i
].isreg
= 1;
6454 inst
.operands
[i
].present
= 1;
6456 if (rtype
== REG_TYPE_NQ
)
6458 first_error (_("can't use Neon quad register here"));
6461 else if (rtype
!= REG_TYPE_VFS
)
6464 if (skip_past_comma (&ptr
) == FAIL
)
6466 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6468 inst
.operands
[i
].reg
= val
;
6469 inst
.operands
[i
].isreg
= 1;
6470 inst
.operands
[i
].present
= 1;
6473 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_NSDQ
, &rtype
,
6476 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6477 Case 1: VMOV<c><q> <Dd>, <Dm>
6478 Case 8: VMOV.F32 <Sd>, <Sm>
6479 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6481 inst
.operands
[i
].reg
= val
;
6482 inst
.operands
[i
].isreg
= 1;
6483 inst
.operands
[i
].isquad
= (rtype
== REG_TYPE_NQ
);
6484 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6485 inst
.operands
[i
].isvec
= 1;
6486 inst
.operands
[i
].vectype
= optype
;
6487 inst
.operands
[i
].present
= 1;
6489 if (skip_past_comma (&ptr
) == SUCCESS
)
6494 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6497 inst
.operands
[i
].reg
= val
;
6498 inst
.operands
[i
].isreg
= 1;
6499 inst
.operands
[i
++].present
= 1;
6501 if (skip_past_comma (&ptr
) == FAIL
)
6504 if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) == FAIL
)
6507 inst
.operands
[i
].reg
= val
;
6508 inst
.operands
[i
].isreg
= 1;
6509 inst
.operands
[i
].present
= 1;
6512 else if (parse_qfloat_immediate (&ptr
, &inst
.operands
[i
].imm
) == SUCCESS
)
6513 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6514 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6515 Case 10: VMOV.F32 <Sd>, #<imm>
6516 Case 11: VMOV.F64 <Dd>, #<imm> */
6517 inst
.operands
[i
].immisfloat
= 1;
6518 else if (parse_big_immediate (&ptr
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
6520 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6521 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6525 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6529 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6532 inst
.operands
[i
].reg
= val
;
6533 inst
.operands
[i
].isreg
= 1;
6534 inst
.operands
[i
++].present
= 1;
6536 if (skip_past_comma (&ptr
) == FAIL
)
6539 if ((val
= parse_scalar (&ptr
, 8, &optype
)) != FAIL
)
6541 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6542 inst
.operands
[i
].reg
= val
;
6543 inst
.operands
[i
].isscalar
= 1;
6544 inst
.operands
[i
].present
= 1;
6545 inst
.operands
[i
].vectype
= optype
;
6547 else if ((val
= arm_reg_parse (&ptr
, REG_TYPE_RN
)) != FAIL
)
6549 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6550 inst
.operands
[i
].reg
= val
;
6551 inst
.operands
[i
].isreg
= 1;
6552 inst
.operands
[i
++].present
= 1;
6554 if (skip_past_comma (&ptr
) == FAIL
)
6557 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFSD
, &rtype
, &optype
))
6560 first_error (_(reg_expected_msgs
[REG_TYPE_VFSD
]));
6564 inst
.operands
[i
].reg
= val
;
6565 inst
.operands
[i
].isreg
= 1;
6566 inst
.operands
[i
].isvec
= 1;
6567 inst
.operands
[i
].issingle
= (rtype
== REG_TYPE_VFS
);
6568 inst
.operands
[i
].vectype
= optype
;
6569 inst
.operands
[i
].present
= 1;
6571 if (rtype
== REG_TYPE_VFS
)
6575 if (skip_past_comma (&ptr
) == FAIL
)
6577 if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
,
6580 first_error (_(reg_expected_msgs
[REG_TYPE_VFS
]));
6583 inst
.operands
[i
].reg
= val
;
6584 inst
.operands
[i
].isreg
= 1;
6585 inst
.operands
[i
].isvec
= 1;
6586 inst
.operands
[i
].issingle
= 1;
6587 inst
.operands
[i
].vectype
= optype
;
6588 inst
.operands
[i
].present
= 1;
6591 else if ((val
= arm_typed_reg_parse (&ptr
, REG_TYPE_VFS
, NULL
, &optype
))
6595 inst
.operands
[i
].reg
= val
;
6596 inst
.operands
[i
].isreg
= 1;
6597 inst
.operands
[i
].isvec
= 1;
6598 inst
.operands
[i
].issingle
= 1;
6599 inst
.operands
[i
].vectype
= optype
;
6600 inst
.operands
[i
].present
= 1;
6605 first_error (_("parse error"));
6609 /* Successfully parsed the operands. Update args. */
6615 first_error (_("expected comma"));
6619 first_error (_(reg_expected_msgs
[REG_TYPE_RN
]));
6623 /* Use this macro when the operand constraints are different
6624 for ARM and THUMB (e.g. ldrd). */
6625 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6626 ((arm_operand) | ((thumb_operand) << 16))
6628 /* Matcher codes for parse_operands. */
6629 enum operand_parse_code
6631 OP_stop
, /* end of line */
6633 OP_RR
, /* ARM register */
6634 OP_RRnpc
, /* ARM register, not r15 */
6635 OP_RRnpcsp
, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6636 OP_RRnpcb
, /* ARM register, not r15, in square brackets */
6637 OP_RRnpctw
, /* ARM register, not r15 in Thumb-state or with writeback,
6638 optional trailing ! */
6639 OP_RRw
, /* ARM register, not r15, optional trailing ! */
6640 OP_RCP
, /* Coprocessor number */
6641 OP_RCN
, /* Coprocessor register */
6642 OP_RF
, /* FPA register */
6643 OP_RVS
, /* VFP single precision register */
6644 OP_RVD
, /* VFP double precision register (0..15) */
6645 OP_RND
, /* Neon double precision register (0..31) */
6646 OP_RNQ
, /* Neon quad precision register */
6647 OP_RVSD
, /* VFP single or double precision register */
6648 OP_RNSD
, /* Neon single or double precision register */
6649 OP_RNDQ
, /* Neon double or quad precision register */
6650 OP_RNSDQ
, /* Neon single, double or quad precision register */
6651 OP_RNSC
, /* Neon scalar D[X] */
6652 OP_RVC
, /* VFP control register */
6653 OP_RMF
, /* Maverick F register */
6654 OP_RMD
, /* Maverick D register */
6655 OP_RMFX
, /* Maverick FX register */
6656 OP_RMDX
, /* Maverick DX register */
6657 OP_RMAX
, /* Maverick AX register */
6658 OP_RMDS
, /* Maverick DSPSC register */
6659 OP_RIWR
, /* iWMMXt wR register */
6660 OP_RIWC
, /* iWMMXt wC register */
6661 OP_RIWG
, /* iWMMXt wCG register */
6662 OP_RXA
, /* XScale accumulator register */
6664 /* New operands for Armv8.1-M Mainline. */
6665 OP_LR
, /* ARM LR register */
6666 OP_RRnpcsp_I32
, /* ARM register (no BadReg) or literal 1 .. 32 */
6668 OP_REGLST
, /* ARM register list */
6669 OP_CLRMLST
, /* CLRM register list */
6670 OP_VRSLST
, /* VFP single-precision register list */
6671 OP_VRDLST
, /* VFP double-precision register list */
6672 OP_VRSDLST
, /* VFP single or double-precision register list (& quad) */
6673 OP_NRDLST
, /* Neon double-precision register list (d0-d31, qN aliases) */
6674 OP_NSTRLST
, /* Neon element/structure list */
6675 OP_VRSDVLST
, /* VFP single or double-precision register list and VPR */
6677 OP_RNDQ_I0
, /* Neon D or Q reg, or immediate zero. */
6678 OP_RVSD_I0
, /* VFP S or D reg, or immediate zero. */
6679 OP_RSVD_FI0
, /* VFP S or D reg, or floating point immediate zero. */
6680 OP_RR_RNSC
, /* ARM reg or Neon scalar. */
6681 OP_RNSD_RNSC
, /* Neon S or D reg, or Neon scalar. */
6682 OP_RNSDQ_RNSC
, /* Vector S, D or Q reg, or Neon scalar. */
6683 OP_RNDQ_RNSC
, /* Neon D or Q reg, or Neon scalar. */
6684 OP_RND_RNSC
, /* Neon D reg, or Neon scalar. */
6685 OP_VMOV
, /* Neon VMOV operands. */
6686 OP_RNDQ_Ibig
, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6687 OP_RNDQ_I63b
, /* Neon D or Q reg, or immediate for shift. */
6688 OP_RIWR_I32z
, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6689 OP_VLDR
, /* VLDR operand. */
6691 OP_I0
, /* immediate zero */
6692 OP_I7
, /* immediate value 0 .. 7 */
6693 OP_I15
, /* 0 .. 15 */
6694 OP_I16
, /* 1 .. 16 */
6695 OP_I16z
, /* 0 .. 16 */
6696 OP_I31
, /* 0 .. 31 */
6697 OP_I31w
, /* 0 .. 31, optional trailing ! */
6698 OP_I32
, /* 1 .. 32 */
6699 OP_I32z
, /* 0 .. 32 */
6700 OP_I63
, /* 0 .. 63 */
6701 OP_I63s
, /* -64 .. 63 */
6702 OP_I64
, /* 1 .. 64 */
6703 OP_I64z
, /* 0 .. 64 */
6704 OP_I255
, /* 0 .. 255 */
6706 OP_I4b
, /* immediate, prefix optional, 1 .. 4 */
6707 OP_I7b
, /* 0 .. 7 */
6708 OP_I15b
, /* 0 .. 15 */
6709 OP_I31b
, /* 0 .. 31 */
6711 OP_SH
, /* shifter operand */
6712 OP_SHG
, /* shifter operand with possible group relocation */
6713 OP_ADDR
, /* Memory address expression (any mode) */
6714 OP_ADDRGLDR
, /* Mem addr expr (any mode) with possible LDR group reloc */
6715 OP_ADDRGLDRS
, /* Mem addr expr (any mode) with possible LDRS group reloc */
6716 OP_ADDRGLDC
, /* Mem addr expr (any mode) with possible LDC group reloc */
6717 OP_EXP
, /* arbitrary expression */
6718 OP_EXPi
, /* same, with optional immediate prefix */
6719 OP_EXPr
, /* same, with optional relocation suffix */
6720 OP_EXPs
, /* same, with optional non-first operand relocation suffix */
6721 OP_HALF
, /* 0 .. 65535 or low/high reloc. */
6722 OP_IROT1
, /* VCADD rotate immediate: 90, 270. */
6723 OP_IROT2
, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6725 OP_CPSF
, /* CPS flags */
6726 OP_ENDI
, /* Endianness specifier */
6727 OP_wPSR
, /* CPSR/SPSR/APSR mask for msr (writing). */
6728 OP_rPSR
, /* CPSR/SPSR/APSR mask for msr (reading). */
6729 OP_COND
, /* conditional code */
6730 OP_TB
, /* Table branch. */
6732 OP_APSR_RR
, /* ARM register or "APSR_nzcv". */
6734 OP_RRnpc_I0
, /* ARM register or literal 0 */
6735 OP_RR_EXr
, /* ARM register or expression with opt. reloc stuff. */
6736 OP_RR_EXi
, /* ARM register or expression with imm prefix */
6737 OP_RF_IF
, /* FPA register or immediate */
6738 OP_RIWR_RIWC
, /* iWMMXt R or C reg */
6739 OP_RIWC_RIWG
, /* iWMMXt wC or wCG reg */
6741 /* Optional operands. */
6742 OP_oI7b
, /* immediate, prefix optional, 0 .. 7 */
6743 OP_oI31b
, /* 0 .. 31 */
6744 OP_oI32b
, /* 1 .. 32 */
6745 OP_oI32z
, /* 0 .. 32 */
6746 OP_oIffffb
, /* 0 .. 65535 */
6747 OP_oI255c
, /* curly-brace enclosed, 0 .. 255 */
6749 OP_oRR
, /* ARM register */
6750 OP_oLR
, /* ARM LR register */
6751 OP_oRRnpc
, /* ARM register, not the PC */
6752 OP_oRRnpcsp
, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6753 OP_oRRw
, /* ARM register, not r15, optional trailing ! */
6754 OP_oRND
, /* Optional Neon double precision register */
6755 OP_oRNQ
, /* Optional Neon quad precision register */
6756 OP_oRNDQ
, /* Optional Neon double or quad precision register */
6757 OP_oRNSDQ
, /* Optional single, double or quad precision vector register */
6758 OP_oSHll
, /* LSL immediate */
6759 OP_oSHar
, /* ASR immediate */
6760 OP_oSHllar
, /* LSL or ASR immediate */
6761 OP_oROR
, /* ROR 0/8/16/24 */
6762 OP_oBARRIER_I15
, /* Option argument for a barrier instruction. */
6764 /* Some pre-defined mixed (ARM/THUMB) operands. */
6765 OP_RR_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RR
, OP_RRnpcsp
),
6766 OP_RRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_RRnpc
, OP_RRnpcsp
),
6767 OP_oRRnpc_npcsp
= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc
, OP_oRRnpcsp
),
6769 OP_FIRST_OPTIONAL
= OP_oI7b
6772 /* Generic instruction operand parser. This does no encoding and no
6773 semantic validation; it merely squirrels values away in the inst
6774 structure. Returns SUCCESS or FAIL depending on whether the
6775 specified grammar matched. */
6777 parse_operands (char *str
, const unsigned int *pattern
, bfd_boolean thumb
)
6779 unsigned const int *upat
= pattern
;
6780 char *backtrack_pos
= 0;
6781 const char *backtrack_error
= 0;
6782 int i
, val
= 0, backtrack_index
= 0;
6783 enum arm_reg_type rtype
;
6784 parse_operand_result result
;
6785 unsigned int op_parse_code
;
6786 bfd_boolean partial_match
;
6788 #define po_char_or_fail(chr) \
6791 if (skip_past_char (&str, chr) == FAIL) \
6796 #define po_reg_or_fail(regtype) \
6799 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6800 & inst.operands[i].vectype); \
6803 first_error (_(reg_expected_msgs[regtype])); \
6806 inst.operands[i].reg = val; \
6807 inst.operands[i].isreg = 1; \
6808 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6809 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6810 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6811 || rtype == REG_TYPE_VFD \
6812 || rtype == REG_TYPE_NQ); \
6816 #define po_reg_or_goto(regtype, label) \
6819 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6820 & inst.operands[i].vectype); \
6824 inst.operands[i].reg = val; \
6825 inst.operands[i].isreg = 1; \
6826 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6827 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6828 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6829 || rtype == REG_TYPE_VFD \
6830 || rtype == REG_TYPE_NQ); \
6834 #define po_imm_or_fail(min, max, popt) \
6837 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6839 inst.operands[i].imm = val; \
6843 #define po_scalar_or_goto(elsz, label) \
6846 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6849 inst.operands[i].reg = val; \
6850 inst.operands[i].isscalar = 1; \
6854 #define po_misc_or_fail(expr) \
6862 #define po_misc_or_fail_no_backtrack(expr) \
6866 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6867 backtrack_pos = 0; \
6868 if (result != PARSE_OPERAND_SUCCESS) \
6873 #define po_barrier_or_imm(str) \
6876 val = parse_barrier (&str); \
6877 if (val == FAIL && ! ISALPHA (*str)) \
6880 /* ISB can only take SY as an option. */ \
6881 || ((inst.instruction & 0xf0) == 0x60 \
6884 inst.error = _("invalid barrier type"); \
6885 backtrack_pos = 0; \
6891 skip_whitespace (str
);
6893 for (i
= 0; upat
[i
] != OP_stop
; i
++)
6895 op_parse_code
= upat
[i
];
6896 if (op_parse_code
>= 1<<16)
6897 op_parse_code
= thumb
? (op_parse_code
>> 16)
6898 : (op_parse_code
& ((1<<16)-1));
6900 if (op_parse_code
>= OP_FIRST_OPTIONAL
)
6902 /* Remember where we are in case we need to backtrack. */
6903 gas_assert (!backtrack_pos
);
6904 backtrack_pos
= str
;
6905 backtrack_error
= inst
.error
;
6906 backtrack_index
= i
;
6909 if (i
> 0 && (i
> 1 || inst
.operands
[0].present
))
6910 po_char_or_fail (',');
6912 switch (op_parse_code
)
6922 case OP_RR
: po_reg_or_fail (REG_TYPE_RN
); break;
6923 case OP_RCP
: po_reg_or_fail (REG_TYPE_CP
); break;
6924 case OP_RCN
: po_reg_or_fail (REG_TYPE_CN
); break;
6925 case OP_RF
: po_reg_or_fail (REG_TYPE_FN
); break;
6926 case OP_RVS
: po_reg_or_fail (REG_TYPE_VFS
); break;
6927 case OP_RVD
: po_reg_or_fail (REG_TYPE_VFD
); break;
6929 case OP_RND
: po_reg_or_fail (REG_TYPE_VFD
); break;
6931 po_reg_or_goto (REG_TYPE_VFC
, coproc_reg
);
6933 /* Also accept generic coprocessor regs for unknown registers. */
6935 po_reg_or_fail (REG_TYPE_CN
);
6937 case OP_RMF
: po_reg_or_fail (REG_TYPE_MVF
); break;
6938 case OP_RMD
: po_reg_or_fail (REG_TYPE_MVD
); break;
6939 case OP_RMFX
: po_reg_or_fail (REG_TYPE_MVFX
); break;
6940 case OP_RMDX
: po_reg_or_fail (REG_TYPE_MVDX
); break;
6941 case OP_RMAX
: po_reg_or_fail (REG_TYPE_MVAX
); break;
6942 case OP_RMDS
: po_reg_or_fail (REG_TYPE_DSPSC
); break;
6943 case OP_RIWR
: po_reg_or_fail (REG_TYPE_MMXWR
); break;
6944 case OP_RIWC
: po_reg_or_fail (REG_TYPE_MMXWC
); break;
6945 case OP_RIWG
: po_reg_or_fail (REG_TYPE_MMXWCG
); break;
6946 case OP_RXA
: po_reg_or_fail (REG_TYPE_XSCALE
); break;
6948 case OP_RNQ
: po_reg_or_fail (REG_TYPE_NQ
); break;
6949 case OP_RNSD
: po_reg_or_fail (REG_TYPE_NSD
); break;
6951 case OP_RNDQ
: po_reg_or_fail (REG_TYPE_NDQ
); break;
6952 case OP_RVSD
: po_reg_or_fail (REG_TYPE_VFSD
); break;
6954 case OP_RNSDQ
: po_reg_or_fail (REG_TYPE_NSDQ
); break;
6956 /* Neon scalar. Using an element size of 8 means that some invalid
6957 scalars are accepted here, so deal with those in later code. */
6958 case OP_RNSC
: po_scalar_or_goto (8, failure
); break;
6962 po_reg_or_goto (REG_TYPE_NDQ
, try_imm0
);
6965 po_imm_or_fail (0, 0, TRUE
);
6970 po_reg_or_goto (REG_TYPE_VFSD
, try_imm0
);
6975 po_reg_or_goto (REG_TYPE_VFSD
, try_ifimm0
);
6978 if (parse_ifimm_zero (&str
))
6979 inst
.operands
[i
].imm
= 0;
6983 = _("only floating point zero is allowed as immediate value");
6991 po_scalar_or_goto (8, try_rr
);
6994 po_reg_or_fail (REG_TYPE_RN
);
7000 po_scalar_or_goto (8, try_nsdq
);
7003 po_reg_or_fail (REG_TYPE_NSDQ
);
7009 po_scalar_or_goto (8, try_s_scalar
);
7012 po_scalar_or_goto (4, try_nsd
);
7015 po_reg_or_fail (REG_TYPE_NSD
);
7021 po_scalar_or_goto (8, try_ndq
);
7024 po_reg_or_fail (REG_TYPE_NDQ
);
7030 po_scalar_or_goto (8, try_vfd
);
7033 po_reg_or_fail (REG_TYPE_VFD
);
7038 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7039 not careful then bad things might happen. */
7040 po_misc_or_fail (parse_neon_mov (&str
, &i
) == FAIL
);
7045 po_reg_or_goto (REG_TYPE_NDQ
, try_immbig
);
7048 /* There's a possibility of getting a 64-bit immediate here, so
7049 we need special handling. */
7050 if (parse_big_immediate (&str
, i
, NULL
, /*allow_symbol_p=*/FALSE
)
7053 inst
.error
= _("immediate value is out of range");
7061 po_reg_or_goto (REG_TYPE_NDQ
, try_shimm
);
7064 po_imm_or_fail (0, 63, TRUE
);
7069 po_char_or_fail ('[');
7070 po_reg_or_fail (REG_TYPE_RN
);
7071 po_char_or_fail (']');
7077 po_reg_or_fail (REG_TYPE_RN
);
7078 if (skip_past_char (&str
, '!') == SUCCESS
)
7079 inst
.operands
[i
].writeback
= 1;
7083 case OP_I7
: po_imm_or_fail ( 0, 7, FALSE
); break;
7084 case OP_I15
: po_imm_or_fail ( 0, 15, FALSE
); break;
7085 case OP_I16
: po_imm_or_fail ( 1, 16, FALSE
); break;
7086 case OP_I16z
: po_imm_or_fail ( 0, 16, FALSE
); break;
7087 case OP_I31
: po_imm_or_fail ( 0, 31, FALSE
); break;
7088 case OP_I32
: po_imm_or_fail ( 1, 32, FALSE
); break;
7089 case OP_I32z
: po_imm_or_fail ( 0, 32, FALSE
); break;
7090 case OP_I63s
: po_imm_or_fail (-64, 63, FALSE
); break;
7091 case OP_I63
: po_imm_or_fail ( 0, 63, FALSE
); break;
7092 case OP_I64
: po_imm_or_fail ( 1, 64, FALSE
); break;
7093 case OP_I64z
: po_imm_or_fail ( 0, 64, FALSE
); break;
7094 case OP_I255
: po_imm_or_fail ( 0, 255, FALSE
); break;
7096 case OP_I4b
: po_imm_or_fail ( 1, 4, TRUE
); break;
7098 case OP_I7b
: po_imm_or_fail ( 0, 7, TRUE
); break;
7099 case OP_I15b
: po_imm_or_fail ( 0, 15, TRUE
); break;
7101 case OP_I31b
: po_imm_or_fail ( 0, 31, TRUE
); break;
7102 case OP_oI32b
: po_imm_or_fail ( 1, 32, TRUE
); break;
7103 case OP_oI32z
: po_imm_or_fail ( 0, 32, TRUE
); break;
7104 case OP_oIffffb
: po_imm_or_fail ( 0, 0xffff, TRUE
); break;
7106 /* Immediate variants */
7108 po_char_or_fail ('{');
7109 po_imm_or_fail (0, 255, TRUE
);
7110 po_char_or_fail ('}');
7114 /* The expression parser chokes on a trailing !, so we have
7115 to find it first and zap it. */
7118 while (*s
&& *s
!= ',')
7123 inst
.operands
[i
].writeback
= 1;
7125 po_imm_or_fail (0, 31, TRUE
);
7133 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7138 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7143 po_misc_or_fail (my_get_expression (&inst
.relocs
[0].exp
, &str
,
7145 if (inst
.relocs
[0].exp
.X_op
== O_symbol
)
7147 val
= parse_reloc (&str
);
7150 inst
.error
= _("unrecognized relocation suffix");
7153 else if (val
!= BFD_RELOC_UNUSED
)
7155 inst
.operands
[i
].imm
= val
;
7156 inst
.operands
[i
].hasreloc
= 1;
7162 po_misc_or_fail (my_get_expression (&inst
.relocs
[i
].exp
, &str
,
7164 if (inst
.relocs
[i
].exp
.X_op
== O_symbol
)
7166 inst
.operands
[i
].hasreloc
= 1;
7168 else if (inst
.relocs
[i
].exp
.X_op
== O_constant
)
7170 inst
.operands
[i
].imm
= inst
.relocs
[i
].exp
.X_add_number
;
7171 inst
.operands
[i
].hasreloc
= 0;
7175 /* Operand for MOVW or MOVT. */
7177 po_misc_or_fail (parse_half (&str
));
7180 /* Register or expression. */
7181 case OP_RR_EXr
: po_reg_or_goto (REG_TYPE_RN
, EXPr
); break;
7182 case OP_RR_EXi
: po_reg_or_goto (REG_TYPE_RN
, EXPi
); break;
7184 /* Register or immediate. */
7185 case OP_RRnpc_I0
: po_reg_or_goto (REG_TYPE_RN
, I0
); break;
7186 I0
: po_imm_or_fail (0, 0, FALSE
); break;
7188 case OP_RF_IF
: po_reg_or_goto (REG_TYPE_FN
, IF
); break;
7190 if (!is_immediate_prefix (*str
))
7193 val
= parse_fpa_immediate (&str
);
7196 /* FPA immediates are encoded as registers 8-15.
7197 parse_fpa_immediate has already applied the offset. */
7198 inst
.operands
[i
].reg
= val
;
7199 inst
.operands
[i
].isreg
= 1;
7202 case OP_RIWR_I32z
: po_reg_or_goto (REG_TYPE_MMXWR
, I32z
); break;
7203 I32z
: po_imm_or_fail (0, 32, FALSE
); break;
7205 /* Two kinds of register. */
7208 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7210 || (rege
->type
!= REG_TYPE_MMXWR
7211 && rege
->type
!= REG_TYPE_MMXWC
7212 && rege
->type
!= REG_TYPE_MMXWCG
))
7214 inst
.error
= _("iWMMXt data or control register expected");
7217 inst
.operands
[i
].reg
= rege
->number
;
7218 inst
.operands
[i
].isreg
= (rege
->type
== REG_TYPE_MMXWR
);
7224 struct reg_entry
*rege
= arm_reg_parse_multi (&str
);
7226 || (rege
->type
!= REG_TYPE_MMXWC
7227 && rege
->type
!= REG_TYPE_MMXWCG
))
7229 inst
.error
= _("iWMMXt control register expected");
7232 inst
.operands
[i
].reg
= rege
->number
;
7233 inst
.operands
[i
].isreg
= 1;
7238 case OP_CPSF
: val
= parse_cps_flags (&str
); break;
7239 case OP_ENDI
: val
= parse_endian_specifier (&str
); break;
7240 case OP_oROR
: val
= parse_ror (&str
); break;
7241 case OP_COND
: val
= parse_cond (&str
); break;
7242 case OP_oBARRIER_I15
:
7243 po_barrier_or_imm (str
); break;
7245 if (parse_immediate (&str
, &val
, 0, 15, TRUE
) == FAIL
)
7251 po_reg_or_goto (REG_TYPE_RNB
, try_psr
);
7252 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_virt
))
7254 inst
.error
= _("Banked registers are not available with this "
7260 val
= parse_psr (&str
, op_parse_code
== OP_wPSR
);
7264 po_reg_or_goto (REG_TYPE_VFSD
, try_sysreg
);
7267 val
= parse_sys_vldr_vstr (&str
);
7271 po_reg_or_goto (REG_TYPE_RN
, try_apsr
);
7274 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7276 if (strncasecmp (str
, "APSR_", 5) == 0)
7283 case 'c': found
= (found
& 1) ? 16 : found
| 1; break;
7284 case 'n': found
= (found
& 2) ? 16 : found
| 2; break;
7285 case 'z': found
= (found
& 4) ? 16 : found
| 4; break;
7286 case 'v': found
= (found
& 8) ? 16 : found
| 8; break;
7287 default: found
= 16;
7291 inst
.operands
[i
].isvec
= 1;
7292 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7293 inst
.operands
[i
].reg
= REG_PC
;
7300 po_misc_or_fail (parse_tb (&str
));
7303 /* Register lists. */
7305 val
= parse_reg_list (&str
, REGLIST_RN
);
7308 inst
.operands
[i
].writeback
= 1;
7314 val
= parse_reg_list (&str
, REGLIST_CLRM
);
7318 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_S
,
7323 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
, REGLIST_VFP_D
,
7328 /* Allow Q registers too. */
7329 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7330 REGLIST_NEON_D
, &partial_match
);
7334 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7335 REGLIST_VFP_S
, &partial_match
);
7336 inst
.operands
[i
].issingle
= 1;
7341 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7342 REGLIST_VFP_D_VPR
, &partial_match
);
7343 if (val
== FAIL
&& !partial_match
)
7346 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7347 REGLIST_VFP_S_VPR
, &partial_match
);
7348 inst
.operands
[i
].issingle
= 1;
7353 val
= parse_vfp_reg_list (&str
, &inst
.operands
[i
].reg
,
7354 REGLIST_NEON_D
, &partial_match
);
7358 val
= parse_neon_el_struct_list (&str
, &inst
.operands
[i
].reg
,
7359 &inst
.operands
[i
].vectype
);
7362 /* Addressing modes */
7364 po_misc_or_fail (parse_address (&str
, i
));
7368 po_misc_or_fail_no_backtrack (
7369 parse_address_group_reloc (&str
, i
, GROUP_LDR
));
7373 po_misc_or_fail_no_backtrack (
7374 parse_address_group_reloc (&str
, i
, GROUP_LDRS
));
7378 po_misc_or_fail_no_backtrack (
7379 parse_address_group_reloc (&str
, i
, GROUP_LDC
));
7383 po_misc_or_fail (parse_shifter_operand (&str
, i
));
7387 po_misc_or_fail_no_backtrack (
7388 parse_shifter_operand_group_reloc (&str
, i
));
7392 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_IMMEDIATE
));
7396 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_ASR_IMMEDIATE
));
7400 po_misc_or_fail (parse_shift (&str
, i
, SHIFT_LSL_OR_ASR_IMMEDIATE
));
7404 as_fatal (_("unhandled operand code %d"), op_parse_code
);
7407 /* Various value-based sanity checks and shared operations. We
7408 do not signal immediate failures for the register constraints;
7409 this allows a syntax error to take precedence. */
7410 switch (op_parse_code
)
7418 if (inst
.operands
[i
].isreg
&& inst
.operands
[i
].reg
== REG_PC
)
7419 inst
.error
= BAD_PC
;
7424 if (inst
.operands
[i
].isreg
)
7426 if (inst
.operands
[i
].reg
== REG_PC
)
7427 inst
.error
= BAD_PC
;
7428 else if (inst
.operands
[i
].reg
== REG_SP
7429 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7430 relaxed since ARMv8-A. */
7431 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
7434 inst
.error
= BAD_SP
;
7440 if (inst
.operands
[i
].isreg
7441 && inst
.operands
[i
].reg
== REG_PC
7442 && (inst
.operands
[i
].writeback
|| thumb
))
7443 inst
.error
= BAD_PC
;
7447 if (inst
.operands
[i
].isreg
)
7456 case OP_oBARRIER_I15
:
7467 inst
.operands
[i
].imm
= val
;
7472 if (inst
.operands
[i
].reg
!= REG_LR
)
7473 inst
.error
= _("operand must be LR register");
7480 /* If we get here, this operand was successfully parsed. */
7481 inst
.operands
[i
].present
= 1;
7485 inst
.error
= BAD_ARGS
;
7490 /* The parse routine should already have set inst.error, but set a
7491 default here just in case. */
7493 inst
.error
= _("syntax error");
7497 /* Do not backtrack over a trailing optional argument that
7498 absorbed some text. We will only fail again, with the
7499 'garbage following instruction' error message, which is
7500 probably less helpful than the current one. */
7501 if (backtrack_index
== i
&& backtrack_pos
!= str
7502 && upat
[i
+1] == OP_stop
)
7505 inst
.error
= _("syntax error");
7509 /* Try again, skipping the optional argument at backtrack_pos. */
7510 str
= backtrack_pos
;
7511 inst
.error
= backtrack_error
;
7512 inst
.operands
[backtrack_index
].present
= 0;
7513 i
= backtrack_index
;
7517 /* Check that we have parsed all the arguments. */
7518 if (*str
!= '\0' && !inst
.error
)
7519 inst
.error
= _("garbage following instruction");
7521 return inst
.error
? FAIL
: SUCCESS
;
7524 #undef po_char_or_fail
7525 #undef po_reg_or_fail
7526 #undef po_reg_or_goto
7527 #undef po_imm_or_fail
7528 #undef po_scalar_or_fail
7529 #undef po_barrier_or_imm
7531 /* Shorthand macro for instruction encoding functions issuing errors. */
7532 #define constraint(expr, err) \
7543 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7544 instructions are unpredictable if these registers are used. This
7545 is the BadReg predicate in ARM's Thumb-2 documentation.
7547 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7548 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7549 #define reject_bad_reg(reg) \
7551 if (reg == REG_PC) \
7553 inst.error = BAD_PC; \
7556 else if (reg == REG_SP \
7557 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7559 inst.error = BAD_SP; \
7564 /* If REG is R13 (the stack pointer), warn that its use is
7566 #define warn_deprecated_sp(reg) \
7568 if (warn_on_deprecated && reg == REG_SP) \
7569 as_tsktsk (_("use of r13 is deprecated")); \
7572 /* Functions for operand encoding. ARM, then Thumb. */
7574 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7576 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7578 The only binary encoding difference is the Coprocessor number. Coprocessor
7579 9 is used for half-precision calculations or conversions. The format of the
7580 instruction is the same as the equivalent Coprocessor 10 instruction that
7581 exists for Single-Precision operation. */
7584 do_scalar_fp16_v82_encode (void)
7586 if (inst
.cond
!= COND_ALWAYS
)
7587 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7588 " the behaviour is UNPREDICTABLE"));
7589 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
7592 inst
.instruction
= (inst
.instruction
& 0xfffff0ff) | 0x900;
7593 mark_feature_used (&arm_ext_fp16
);
7596 /* If VAL can be encoded in the immediate field of an ARM instruction,
7597 return the encoded form. Otherwise, return FAIL. */
7600 encode_arm_immediate (unsigned int val
)
7607 for (i
= 2; i
< 32; i
+= 2)
7608 if ((a
= rotate_left (val
, i
)) <= 0xff)
7609 return a
| (i
<< 7); /* 12-bit pack: [shift-cnt,const]. */
7614 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7615 return the encoded form. Otherwise, return FAIL. */
7617 encode_thumb32_immediate (unsigned int val
)
7624 for (i
= 1; i
<= 24; i
++)
7627 if ((val
& ~(0xff << i
)) == 0)
7628 return ((val
>> i
) & 0x7f) | ((32 - i
) << 7);
7632 if (val
== ((a
<< 16) | a
))
7634 if (val
== ((a
<< 24) | (a
<< 16) | (a
<< 8) | a
))
7638 if (val
== ((a
<< 16) | a
))
7639 return 0x200 | (a
>> 8);
7643 /* Encode a VFP SP or DP register number into inst.instruction. */
7646 encode_arm_vfp_reg (int reg
, enum vfp_reg_pos pos
)
7648 if ((pos
== VFP_REG_Dd
|| pos
== VFP_REG_Dn
|| pos
== VFP_REG_Dm
)
7651 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_d32
))
7654 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
7657 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
7662 first_error (_("D register out of range for selected VFP version"));
7670 inst
.instruction
|= ((reg
>> 1) << 12) | ((reg
& 1) << 22);
7674 inst
.instruction
|= ((reg
>> 1) << 16) | ((reg
& 1) << 7);
7678 inst
.instruction
|= ((reg
>> 1) << 0) | ((reg
& 1) << 5);
7682 inst
.instruction
|= ((reg
& 15) << 12) | ((reg
>> 4) << 22);
7686 inst
.instruction
|= ((reg
& 15) << 16) | ((reg
>> 4) << 7);
7690 inst
.instruction
|= (reg
& 15) | ((reg
>> 4) << 5);
7698 /* Encode a <shift> in an ARM-format instruction. The immediate,
7699 if any, is handled by md_apply_fix. */
7701 encode_arm_shift (int i
)
7703 /* register-shifted register. */
7704 if (inst
.operands
[i
].immisreg
)
7707 for (op_index
= 0; op_index
<= i
; ++op_index
)
7709 /* Check the operand only when it's presented. In pre-UAL syntax,
7710 if the destination register is the same as the first operand, two
7711 register form of the instruction can be used. */
7712 if (inst
.operands
[op_index
].present
&& inst
.operands
[op_index
].isreg
7713 && inst
.operands
[op_index
].reg
== REG_PC
)
7714 as_warn (UNPRED_REG ("r15"));
7717 if (inst
.operands
[i
].imm
== REG_PC
)
7718 as_warn (UNPRED_REG ("r15"));
7721 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7722 inst
.instruction
|= SHIFT_ROR
<< 5;
7725 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7726 if (inst
.operands
[i
].immisreg
)
7728 inst
.instruction
|= SHIFT_BY_REG
;
7729 inst
.instruction
|= inst
.operands
[i
].imm
<< 8;
7732 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7737 encode_arm_shifter_operand (int i
)
7739 if (inst
.operands
[i
].isreg
)
7741 inst
.instruction
|= inst
.operands
[i
].reg
;
7742 encode_arm_shift (i
);
7746 inst
.instruction
|= INST_IMMEDIATE
;
7747 if (inst
.relocs
[0].type
!= BFD_RELOC_ARM_IMMEDIATE
)
7748 inst
.instruction
|= inst
.operands
[i
].imm
;
7752 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7754 encode_arm_addr_mode_common (int i
, bfd_boolean is_t
)
7757 Generate an error if the operand is not a register. */
7758 constraint (!inst
.operands
[i
].isreg
,
7759 _("Instruction does not support =N addresses"));
7761 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
7763 if (inst
.operands
[i
].preind
)
7767 inst
.error
= _("instruction does not accept preindexed addressing");
7770 inst
.instruction
|= PRE_INDEX
;
7771 if (inst
.operands
[i
].writeback
)
7772 inst
.instruction
|= WRITE_BACK
;
7775 else if (inst
.operands
[i
].postind
)
7777 gas_assert (inst
.operands
[i
].writeback
);
7779 inst
.instruction
|= WRITE_BACK
;
7781 else /* unindexed - only for coprocessor */
7783 inst
.error
= _("instruction does not accept unindexed addressing");
7787 if (((inst
.instruction
& WRITE_BACK
) || !(inst
.instruction
& PRE_INDEX
))
7788 && (((inst
.instruction
& 0x000f0000) >> 16)
7789 == ((inst
.instruction
& 0x0000f000) >> 12)))
7790 as_warn ((inst
.instruction
& LOAD_BIT
)
7791 ? _("destination register same as write-back base")
7792 : _("source register same as write-back base"));
7795 /* inst.operands[i] was set up by parse_address. Encode it into an
7796 ARM-format mode 2 load or store instruction. If is_t is true,
7797 reject forms that cannot be used with a T instruction (i.e. not
7800 encode_arm_addr_mode_2 (int i
, bfd_boolean is_t
)
7802 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
7804 encode_arm_addr_mode_common (i
, is_t
);
7806 if (inst
.operands
[i
].immisreg
)
7808 constraint ((inst
.operands
[i
].imm
== REG_PC
7809 || (is_pc
&& inst
.operands
[i
].writeback
)),
7811 inst
.instruction
|= INST_IMMEDIATE
; /* yes, this is backwards */
7812 inst
.instruction
|= inst
.operands
[i
].imm
;
7813 if (!inst
.operands
[i
].negative
)
7814 inst
.instruction
|= INDEX_UP
;
7815 if (inst
.operands
[i
].shifted
)
7817 if (inst
.operands
[i
].shift_kind
== SHIFT_RRX
)
7818 inst
.instruction
|= SHIFT_ROR
<< 5;
7821 inst
.instruction
|= inst
.operands
[i
].shift_kind
<< 5;
7822 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
7826 else /* immediate offset in inst.relocs[0] */
7828 if (is_pc
&& !inst
.relocs
[0].pc_rel
)
7830 const bfd_boolean is_load
= ((inst
.instruction
& LOAD_BIT
) != 0);
7832 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7833 cannot use PC in addressing.
7834 PC cannot be used in writeback addressing, either. */
7835 constraint ((is_t
|| inst
.operands
[i
].writeback
),
7838 /* Use of PC in str is deprecated for ARMv7. */
7839 if (warn_on_deprecated
7841 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
))
7842 as_tsktsk (_("use of PC in this instruction is deprecated"));
7845 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7847 /* Prefer + for zero encoded value. */
7848 if (!inst
.operands
[i
].negative
)
7849 inst
.instruction
|= INDEX_UP
;
7850 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM
;
7855 /* inst.operands[i] was set up by parse_address. Encode it into an
7856 ARM-format mode 3 load or store instruction. Reject forms that
7857 cannot be used with such instructions. If is_t is true, reject
7858 forms that cannot be used with a T instruction (i.e. not
7861 encode_arm_addr_mode_3 (int i
, bfd_boolean is_t
)
7863 if (inst
.operands
[i
].immisreg
&& inst
.operands
[i
].shifted
)
7865 inst
.error
= _("instruction does not accept scaled register index");
7869 encode_arm_addr_mode_common (i
, is_t
);
7871 if (inst
.operands
[i
].immisreg
)
7873 constraint ((inst
.operands
[i
].imm
== REG_PC
7874 || (is_t
&& inst
.operands
[i
].reg
== REG_PC
)),
7876 constraint (inst
.operands
[i
].reg
== REG_PC
&& inst
.operands
[i
].writeback
,
7878 inst
.instruction
|= inst
.operands
[i
].imm
;
7879 if (!inst
.operands
[i
].negative
)
7880 inst
.instruction
|= INDEX_UP
;
7882 else /* immediate offset in inst.relocs[0] */
7884 constraint ((inst
.operands
[i
].reg
== REG_PC
&& !inst
.relocs
[0].pc_rel
7885 && inst
.operands
[i
].writeback
),
7887 inst
.instruction
|= HWOFFSET_IMM
;
7888 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
7890 /* Prefer + for zero encoded value. */
7891 if (!inst
.operands
[i
].negative
)
7892 inst
.instruction
|= INDEX_UP
;
7894 inst
.relocs
[0].type
= BFD_RELOC_ARM_OFFSET_IMM8
;
7899 /* Write immediate bits [7:0] to the following locations:
7901 |28/24|23 19|18 16|15 4|3 0|
7902 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7904 This function is used by VMOV/VMVN/VORR/VBIC. */
7907 neon_write_immbits (unsigned immbits
)
7909 inst
.instruction
|= immbits
& 0xf;
7910 inst
.instruction
|= ((immbits
>> 4) & 0x7) << 16;
7911 inst
.instruction
|= ((immbits
>> 7) & 0x1) << (thumb_mode
? 28 : 24);
7914 /* Invert low-order SIZE bits of XHI:XLO. */
7917 neon_invert_size (unsigned *xlo
, unsigned *xhi
, int size
)
7919 unsigned immlo
= xlo
? *xlo
: 0;
7920 unsigned immhi
= xhi
? *xhi
: 0;
7925 immlo
= (~immlo
) & 0xff;
7929 immlo
= (~immlo
) & 0xffff;
7933 immhi
= (~immhi
) & 0xffffffff;
7937 immlo
= (~immlo
) & 0xffffffff;
7951 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7955 neon_bits_same_in_bytes (unsigned imm
)
7957 return ((imm
& 0x000000ff) == 0 || (imm
& 0x000000ff) == 0x000000ff)
7958 && ((imm
& 0x0000ff00) == 0 || (imm
& 0x0000ff00) == 0x0000ff00)
7959 && ((imm
& 0x00ff0000) == 0 || (imm
& 0x00ff0000) == 0x00ff0000)
7960 && ((imm
& 0xff000000) == 0 || (imm
& 0xff000000) == 0xff000000);
7963 /* For immediate of above form, return 0bABCD. */
7966 neon_squash_bits (unsigned imm
)
7968 return (imm
& 0x01) | ((imm
& 0x0100) >> 7) | ((imm
& 0x010000) >> 14)
7969 | ((imm
& 0x01000000) >> 21);
7972 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7975 neon_qfloat_bits (unsigned imm
)
7977 return ((imm
>> 19) & 0x7f) | ((imm
>> 24) & 0x80);
7980 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7981 the instruction. *OP is passed as the initial value of the op field, and
7982 may be set to a different value depending on the constant (i.e.
7983 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7984 MVN). If the immediate looks like a repeated pattern then also
7985 try smaller element sizes. */
7988 neon_cmode_for_move_imm (unsigned immlo
, unsigned immhi
, int float_p
,
7989 unsigned *immbits
, int *op
, int size
,
7990 enum neon_el_type type
)
7992 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7994 if (type
== NT_float
&& !float_p
)
7997 if (type
== NT_float
&& is_quarter_float (immlo
) && immhi
== 0)
7999 if (size
!= 32 || *op
== 1)
8001 *immbits
= neon_qfloat_bits (immlo
);
8007 if (neon_bits_same_in_bytes (immhi
)
8008 && neon_bits_same_in_bytes (immlo
))
8012 *immbits
= (neon_squash_bits (immhi
) << 4)
8013 | neon_squash_bits (immlo
);
8024 if (immlo
== (immlo
& 0x000000ff))
8029 else if (immlo
== (immlo
& 0x0000ff00))
8031 *immbits
= immlo
>> 8;
8034 else if (immlo
== (immlo
& 0x00ff0000))
8036 *immbits
= immlo
>> 16;
8039 else if (immlo
== (immlo
& 0xff000000))
8041 *immbits
= immlo
>> 24;
8044 else if (immlo
== ((immlo
& 0x0000ff00) | 0x000000ff))
8046 *immbits
= (immlo
>> 8) & 0xff;
8049 else if (immlo
== ((immlo
& 0x00ff0000) | 0x0000ffff))
8051 *immbits
= (immlo
>> 16) & 0xff;
8055 if ((immlo
& 0xffff) != (immlo
>> 16))
8062 if (immlo
== (immlo
& 0x000000ff))
8067 else if (immlo
== (immlo
& 0x0000ff00))
8069 *immbits
= immlo
>> 8;
8073 if ((immlo
& 0xff) != (immlo
>> 8))
8078 if (immlo
== (immlo
& 0x000000ff))
8080 /* Don't allow MVN with 8-bit immediate. */
8090 #if defined BFD_HOST_64_BIT
8091 /* Returns TRUE if double precision value V may be cast
8092 to single precision without loss of accuracy. */
8095 is_double_a_single (bfd_int64_t v
)
8097 int exp
= (int)((v
>> 52) & 0x7FF);
8098 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8100 return (exp
== 0 || exp
== 0x7FF
8101 || (exp
>= 1023 - 126 && exp
<= 1023 + 127))
8102 && (mantissa
& 0x1FFFFFFFl
) == 0;
8105 /* Returns a double precision value casted to single precision
8106 (ignoring the least significant bits in exponent and mantissa). */
8109 double_to_single (bfd_int64_t v
)
8111 int sign
= (int) ((v
>> 63) & 1l);
8112 int exp
= (int) ((v
>> 52) & 0x7FF);
8113 bfd_int64_t mantissa
= (v
& (bfd_int64_t
)0xFFFFFFFFFFFFFULL
);
8119 exp
= exp
- 1023 + 127;
8128 /* No denormalized numbers. */
8134 return (sign
<< 31) | (exp
<< 23) | mantissa
;
8136 #endif /* BFD_HOST_64_BIT */
8145 static void do_vfp_nsyn_opcode (const char *);
8147 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8148 Determine whether it can be performed with a move instruction; if
8149 it can, convert inst.instruction to that move instruction and
8150 return TRUE; if it can't, convert inst.instruction to a literal-pool
8151 load and return FALSE. If this is not a valid thing to do in the
8152 current context, set inst.error and return TRUE.
8154 inst.operands[i] describes the destination register. */
8157 move_or_literal_pool (int i
, enum lit_type t
, bfd_boolean mode_3
)
8160 bfd_boolean thumb_p
= (t
== CONST_THUMB
);
8161 bfd_boolean arm_p
= (t
== CONST_ARM
);
8164 tbit
= (inst
.instruction
> 0xffff) ? THUMB2_LOAD_BIT
: THUMB_LOAD_BIT
;
8168 if ((inst
.instruction
& tbit
) == 0)
8170 inst
.error
= _("invalid pseudo operation");
8174 if (inst
.relocs
[0].exp
.X_op
!= O_constant
8175 && inst
.relocs
[0].exp
.X_op
!= O_symbol
8176 && inst
.relocs
[0].exp
.X_op
!= O_big
)
8178 inst
.error
= _("constant expression expected");
8182 if (inst
.relocs
[0].exp
.X_op
== O_constant
8183 || inst
.relocs
[0].exp
.X_op
== O_big
)
8185 #if defined BFD_HOST_64_BIT
8190 if (inst
.relocs
[0].exp
.X_op
== O_big
)
8192 LITTLENUM_TYPE w
[X_PRECISION
];
8195 if (inst
.relocs
[0].exp
.X_add_number
== -1)
8197 gen_to_words (w
, X_PRECISION
, E_PRECISION
);
8199 /* FIXME: Should we check words w[2..5] ? */
8204 #if defined BFD_HOST_64_BIT
8206 ((((((((bfd_int64_t
) l
[3] & LITTLENUM_MASK
)
8207 << LITTLENUM_NUMBER_OF_BITS
)
8208 | ((bfd_int64_t
) l
[2] & LITTLENUM_MASK
))
8209 << LITTLENUM_NUMBER_OF_BITS
)
8210 | ((bfd_int64_t
) l
[1] & LITTLENUM_MASK
))
8211 << LITTLENUM_NUMBER_OF_BITS
)
8212 | ((bfd_int64_t
) l
[0] & LITTLENUM_MASK
));
8214 v
= ((l
[1] & LITTLENUM_MASK
) << LITTLENUM_NUMBER_OF_BITS
)
8215 | (l
[0] & LITTLENUM_MASK
);
8219 v
= inst
.relocs
[0].exp
.X_add_number
;
8221 if (!inst
.operands
[i
].issingle
)
8225 /* LDR should not use lead in a flag-setting instruction being
8226 chosen so we do not check whether movs can be used. */
8228 if ((ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
8229 || ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8230 && inst
.operands
[i
].reg
!= 13
8231 && inst
.operands
[i
].reg
!= 15)
8233 /* Check if on thumb2 it can be done with a mov.w, mvn or
8234 movw instruction. */
8235 unsigned int newimm
;
8236 bfd_boolean isNegated
;
8238 newimm
= encode_thumb32_immediate (v
);
8239 if (newimm
!= (unsigned int) FAIL
)
8243 newimm
= encode_thumb32_immediate (~v
);
8244 if (newimm
!= (unsigned int) FAIL
)
8248 /* The number can be loaded with a mov.w or mvn
8250 if (newimm
!= (unsigned int) FAIL
8251 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
8253 inst
.instruction
= (0xf04f0000 /* MOV.W. */
8254 | (inst
.operands
[i
].reg
<< 8));
8255 /* Change to MOVN. */
8256 inst
.instruction
|= (isNegated
? 0x200000 : 0);
8257 inst
.instruction
|= (newimm
& 0x800) << 15;
8258 inst
.instruction
|= (newimm
& 0x700) << 4;
8259 inst
.instruction
|= (newimm
& 0x0ff);
8262 /* The number can be loaded with a movw instruction. */
8263 else if ((v
& ~0xFFFF) == 0
8264 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
))
8266 int imm
= v
& 0xFFFF;
8268 inst
.instruction
= 0xf2400000; /* MOVW. */
8269 inst
.instruction
|= (inst
.operands
[i
].reg
<< 8);
8270 inst
.instruction
|= (imm
& 0xf000) << 4;
8271 inst
.instruction
|= (imm
& 0x0800) << 15;
8272 inst
.instruction
|= (imm
& 0x0700) << 4;
8273 inst
.instruction
|= (imm
& 0x00ff);
8280 int value
= encode_arm_immediate (v
);
8284 /* This can be done with a mov instruction. */
8285 inst
.instruction
&= LITERAL_MASK
;
8286 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MOV
<< DATA_OP_SHIFT
);
8287 inst
.instruction
|= value
& 0xfff;
8291 value
= encode_arm_immediate (~ v
);
8294 /* This can be done with a mvn instruction. */
8295 inst
.instruction
&= LITERAL_MASK
;
8296 inst
.instruction
|= INST_IMMEDIATE
| (OPCODE_MVN
<< DATA_OP_SHIFT
);
8297 inst
.instruction
|= value
& 0xfff;
8301 else if (t
== CONST_VEC
&& ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
))
8304 unsigned immbits
= 0;
8305 unsigned immlo
= inst
.operands
[1].imm
;
8306 unsigned immhi
= inst
.operands
[1].regisimm
8307 ? inst
.operands
[1].reg
8308 : inst
.relocs
[0].exp
.X_unsigned
8310 : ((bfd_int64_t
)((int) immlo
)) >> 32;
8311 int cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8312 &op
, 64, NT_invtype
);
8316 neon_invert_size (&immlo
, &immhi
, 64);
8318 cmode
= neon_cmode_for_move_imm (immlo
, immhi
, FALSE
, &immbits
,
8319 &op
, 64, NT_invtype
);
8324 inst
.instruction
= (inst
.instruction
& VLDR_VMOV_SAME
)
8330 /* Fill other bits in vmov encoding for both thumb and arm. */
8332 inst
.instruction
|= (0x7U
<< 29) | (0xF << 24);
8334 inst
.instruction
|= (0xFU
<< 28) | (0x1 << 25);
8335 neon_write_immbits (immbits
);
8343 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8344 if (inst
.operands
[i
].issingle
8345 && is_quarter_float (inst
.operands
[1].imm
)
8346 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3xd
))
8348 inst
.operands
[1].imm
=
8349 neon_qfloat_bits (v
);
8350 do_vfp_nsyn_opcode ("fconsts");
8354 /* If our host does not support a 64-bit type then we cannot perform
8355 the following optimization. This mean that there will be a
8356 discrepancy between the output produced by an assembler built for
8357 a 32-bit-only host and the output produced from a 64-bit host, but
8358 this cannot be helped. */
8359 #if defined BFD_HOST_64_BIT
8360 else if (!inst
.operands
[1].issingle
8361 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v3
))
8363 if (is_double_a_single (v
)
8364 && is_quarter_float (double_to_single (v
)))
8366 inst
.operands
[1].imm
=
8367 neon_qfloat_bits (double_to_single (v
));
8368 do_vfp_nsyn_opcode ("fconstd");
8376 if (add_to_lit_pool ((!inst
.operands
[i
].isvec
8377 || inst
.operands
[i
].issingle
) ? 4 : 8) == FAIL
)
8380 inst
.operands
[1].reg
= REG_PC
;
8381 inst
.operands
[1].isreg
= 1;
8382 inst
.operands
[1].preind
= 1;
8383 inst
.relocs
[0].pc_rel
= 1;
8384 inst
.relocs
[0].type
= (thumb_p
8385 ? BFD_RELOC_ARM_THUMB_OFFSET
8387 ? BFD_RELOC_ARM_HWLITERAL
8388 : BFD_RELOC_ARM_LITERAL
));
8392 /* inst.operands[i] was set up by parse_address. Encode it into an
8393 ARM-format instruction. Reject all forms which cannot be encoded
8394 into a coprocessor load/store instruction. If wb_ok is false,
8395 reject use of writeback; if unind_ok is false, reject use of
8396 unindexed addressing. If reloc_override is not 0, use it instead
8397 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8398 (in which case it is preserved). */
8401 encode_arm_cp_address (int i
, int wb_ok
, int unind_ok
, int reloc_override
)
8403 if (!inst
.operands
[i
].isreg
)
8406 if (! inst
.operands
[0].isvec
)
8408 inst
.error
= _("invalid co-processor operand");
8411 if (move_or_literal_pool (0, CONST_VEC
, /*mode_3=*/FALSE
))
8415 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
8417 gas_assert (!(inst
.operands
[i
].preind
&& inst
.operands
[i
].postind
));
8419 if (!inst
.operands
[i
].preind
&& !inst
.operands
[i
].postind
) /* unindexed */
8421 gas_assert (!inst
.operands
[i
].writeback
);
8424 inst
.error
= _("instruction does not support unindexed addressing");
8427 inst
.instruction
|= inst
.operands
[i
].imm
;
8428 inst
.instruction
|= INDEX_UP
;
8432 if (inst
.operands
[i
].preind
)
8433 inst
.instruction
|= PRE_INDEX
;
8435 if (inst
.operands
[i
].writeback
)
8437 if (inst
.operands
[i
].reg
== REG_PC
)
8439 inst
.error
= _("pc may not be used with write-back");
8444 inst
.error
= _("instruction does not support writeback");
8447 inst
.instruction
|= WRITE_BACK
;
8451 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) reloc_override
;
8452 else if ((inst
.relocs
[0].type
< BFD_RELOC_ARM_ALU_PC_G0_NC
8453 || inst
.relocs
[0].type
> BFD_RELOC_ARM_LDC_SB_G2
)
8454 && inst
.relocs
[0].type
!= BFD_RELOC_ARM_LDR_PC_G0
)
8457 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_CP_OFF_IMM
;
8459 inst
.relocs
[0].type
= BFD_RELOC_ARM_CP_OFF_IMM
;
8462 /* Prefer + for zero encoded value. */
8463 if (!inst
.operands
[i
].negative
)
8464 inst
.instruction
|= INDEX_UP
;
8469 /* Functions for instruction encoding, sorted by sub-architecture.
8470 First some generics; their names are taken from the conventional
8471 bit positions for register arguments in ARM format instructions. */
8481 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8487 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8493 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8494 inst
.instruction
|= inst
.operands
[1].reg
;
8500 inst
.instruction
|= inst
.operands
[0].reg
;
8501 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8507 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8508 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8514 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8515 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8521 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8522 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8526 check_obsolete (const arm_feature_set
*feature
, const char *msg
)
8528 if (ARM_CPU_IS_ANY (cpu_variant
))
8530 as_tsktsk ("%s", msg
);
8533 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, *feature
))
8545 unsigned Rn
= inst
.operands
[2].reg
;
8546 /* Enforce restrictions on SWP instruction. */
8547 if ((inst
.instruction
& 0x0fbfffff) == 0x01000090)
8549 constraint (Rn
== inst
.operands
[0].reg
|| Rn
== inst
.operands
[1].reg
,
8550 _("Rn must not overlap other operands"));
8552 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8554 if (!check_obsolete (&arm_ext_v8
,
8555 _("swp{b} use is obsoleted for ARMv8 and later"))
8556 && warn_on_deprecated
8557 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
))
8558 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8561 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8562 inst
.instruction
|= inst
.operands
[1].reg
;
8563 inst
.instruction
|= Rn
<< 16;
8569 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8570 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8571 inst
.instruction
|= inst
.operands
[2].reg
;
8577 constraint ((inst
.operands
[2].reg
== REG_PC
), BAD_PC
);
8578 constraint (((inst
.relocs
[0].exp
.X_op
!= O_constant
8579 && inst
.relocs
[0].exp
.X_op
!= O_illegal
)
8580 || inst
.relocs
[0].exp
.X_add_number
!= 0),
8582 inst
.instruction
|= inst
.operands
[0].reg
;
8583 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
8584 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
8590 inst
.instruction
|= inst
.operands
[0].imm
;
8596 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8597 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
8600 /* ARM instructions, in alphabetical order by function name (except
8601 that wrapper functions appear immediately after the function they
8604 /* This is a pseudo-op of the form "adr rd, label" to be converted
8605 into a relative address of the form "add rd, pc, #label-.-8". */
8610 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8612 /* Frag hacking will turn this into a sub instruction if the offset turns
8613 out to be negative. */
8614 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
8615 inst
.relocs
[0].pc_rel
= 1;
8616 inst
.relocs
[0].exp
.X_add_number
-= 8;
8618 if (support_interwork
8619 && inst
.relocs
[0].exp
.X_op
== O_symbol
8620 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8621 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8622 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8623 inst
.relocs
[0].exp
.X_add_number
|= 1;
8626 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8627 into a relative address of the form:
8628 add rd, pc, #low(label-.-8)"
8629 add rd, rd, #high(label-.-8)" */
8634 inst
.instruction
|= (inst
.operands
[0].reg
<< 12); /* Rd */
8636 /* Frag hacking will turn this into a sub instruction if the offset turns
8637 out to be negative. */
8638 inst
.relocs
[0].type
= BFD_RELOC_ARM_ADRL_IMMEDIATE
;
8639 inst
.relocs
[0].pc_rel
= 1;
8640 inst
.size
= INSN_SIZE
* 2;
8641 inst
.relocs
[0].exp
.X_add_number
-= 8;
8643 if (support_interwork
8644 && inst
.relocs
[0].exp
.X_op
== O_symbol
8645 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
8646 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
8647 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
8648 inst
.relocs
[0].exp
.X_add_number
|= 1;
8654 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8655 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
8657 if (!inst
.operands
[1].present
)
8658 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
8659 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8660 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
8661 encode_arm_shifter_operand (2);
8667 if (inst
.operands
[0].present
)
8668 inst
.instruction
|= inst
.operands
[0].imm
;
8670 inst
.instruction
|= 0xf;
8676 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
8677 constraint (msb
> 32, _("bit-field extends past end of register"));
8678 /* The instruction encoding stores the LSB and MSB,
8679 not the LSB and width. */
8680 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8681 inst
.instruction
|= inst
.operands
[1].imm
<< 7;
8682 inst
.instruction
|= (msb
- 1) << 16;
8690 /* #0 in second position is alternative syntax for bfc, which is
8691 the same instruction but with REG_PC in the Rm field. */
8692 if (!inst
.operands
[1].isreg
)
8693 inst
.operands
[1].reg
= REG_PC
;
8695 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
8696 constraint (msb
> 32, _("bit-field extends past end of register"));
8697 /* The instruction encoding stores the LSB and MSB,
8698 not the LSB and width. */
8699 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8700 inst
.instruction
|= inst
.operands
[1].reg
;
8701 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8702 inst
.instruction
|= (msb
- 1) << 16;
8708 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
8709 _("bit-field extends past end of register"));
8710 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
8711 inst
.instruction
|= inst
.operands
[1].reg
;
8712 inst
.instruction
|= inst
.operands
[2].imm
<< 7;
8713 inst
.instruction
|= (inst
.operands
[3].imm
- 1) << 16;
8716 /* ARM V5 breakpoint instruction (argument parse)
8717 BKPT <16 bit unsigned immediate>
8718 Instruction is not conditional.
8719 The bit pattern given in insns[] has the COND_ALWAYS condition,
8720 and it is an error if the caller tried to override that. */
8725 /* Top 12 of 16 bits to bits 19:8. */
8726 inst
.instruction
|= (inst
.operands
[0].imm
& 0xfff0) << 4;
8728 /* Bottom 4 of 16 bits to bits 3:0. */
8729 inst
.instruction
|= inst
.operands
[0].imm
& 0xf;
8733 encode_branch (int default_reloc
)
8735 if (inst
.operands
[0].hasreloc
)
8737 constraint (inst
.operands
[0].imm
!= BFD_RELOC_ARM_PLT32
8738 && inst
.operands
[0].imm
!= BFD_RELOC_ARM_TLS_CALL
,
8739 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8740 inst
.relocs
[0].type
= inst
.operands
[0].imm
== BFD_RELOC_ARM_PLT32
8741 ? BFD_RELOC_ARM_PLT32
8742 : thumb_mode
? BFD_RELOC_ARM_THM_TLS_CALL
: BFD_RELOC_ARM_TLS_CALL
;
8745 inst
.relocs
[0].type
= (bfd_reloc_code_real_type
) default_reloc
;
8746 inst
.relocs
[0].pc_rel
= 1;
8753 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8754 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8757 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8764 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
8766 if (inst
.cond
== COND_ALWAYS
)
8767 encode_branch (BFD_RELOC_ARM_PCREL_CALL
);
8769 encode_branch (BFD_RELOC_ARM_PCREL_JUMP
);
8773 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH
);
8776 /* ARM V5 branch-link-exchange instruction (argument parse)
8777 BLX <target_addr> ie BLX(1)
8778 BLX{<condition>} <Rm> ie BLX(2)
8779 Unfortunately, there are two different opcodes for this mnemonic.
8780 So, the insns[].value is not used, and the code here zaps values
8781 into inst.instruction.
8782 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8787 if (inst
.operands
[0].isreg
)
8789 /* Arg is a register; the opcode provided by insns[] is correct.
8790 It is not illegal to do "blx pc", just useless. */
8791 if (inst
.operands
[0].reg
== REG_PC
)
8792 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8794 inst
.instruction
|= inst
.operands
[0].reg
;
8798 /* Arg is an address; this instruction cannot be executed
8799 conditionally, and the opcode must be adjusted.
8800 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8801 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8802 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
8803 inst
.instruction
= 0xfa000000;
8804 encode_branch (BFD_RELOC_ARM_PCREL_BLX
);
8811 bfd_boolean want_reloc
;
8813 if (inst
.operands
[0].reg
== REG_PC
)
8814 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8816 inst
.instruction
|= inst
.operands
[0].reg
;
8817 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8818 it is for ARMv4t or earlier. */
8819 want_reloc
= !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5
);
8820 if (!ARM_FEATURE_ZERO (selected_object_arch
)
8821 && !ARM_CPU_HAS_FEATURE (selected_object_arch
, arm_ext_v5
))
8825 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
8830 inst
.relocs
[0].type
= BFD_RELOC_ARM_V4BX
;
8834 /* ARM v5TEJ. Jump to Jazelle code. */
8839 if (inst
.operands
[0].reg
== REG_PC
)
8840 as_tsktsk (_("use of r15 in bxj is not really useful"));
8842 inst
.instruction
|= inst
.operands
[0].reg
;
8845 /* Co-processor data operation:
8846 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8847 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8851 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8852 inst
.instruction
|= inst
.operands
[1].imm
<< 20;
8853 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
8854 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8855 inst
.instruction
|= inst
.operands
[4].reg
;
8856 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8862 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
8863 encode_arm_shifter_operand (1);
8866 /* Transfer between coprocessor and ARM registers.
8867 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8872 No special properties. */
8874 struct deprecated_coproc_regs_s
8881 arm_feature_set deprecated
;
8882 arm_feature_set obsoleted
;
8883 const char *dep_msg
;
8884 const char *obs_msg
;
8887 #define DEPR_ACCESS_V8 \
8888 N_("This coprocessor register access is deprecated in ARMv8")
8890 /* Table of all deprecated coprocessor registers. */
8891 static struct deprecated_coproc_regs_s deprecated_coproc_regs
[] =
8893 {15, 0, 7, 10, 5, /* CP15DMB. */
8894 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8895 DEPR_ACCESS_V8
, NULL
},
8896 {15, 0, 7, 10, 4, /* CP15DSB. */
8897 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8898 DEPR_ACCESS_V8
, NULL
},
8899 {15, 0, 7, 5, 4, /* CP15ISB. */
8900 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8901 DEPR_ACCESS_V8
, NULL
},
8902 {14, 6, 1, 0, 0, /* TEEHBR. */
8903 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8904 DEPR_ACCESS_V8
, NULL
},
8905 {14, 6, 0, 0, 0, /* TEECR. */
8906 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
), ARM_ARCH_NONE
,
8907 DEPR_ACCESS_V8
, NULL
},
8910 #undef DEPR_ACCESS_V8
8912 static const size_t deprecated_coproc_reg_count
=
8913 sizeof (deprecated_coproc_regs
) / sizeof (deprecated_coproc_regs
[0]);
8921 Rd
= inst
.operands
[2].reg
;
8924 if (inst
.instruction
== 0xee000010
8925 || inst
.instruction
== 0xfe000010)
8927 reject_bad_reg (Rd
);
8928 else if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
8930 constraint (Rd
== REG_SP
, BAD_SP
);
8935 if (inst
.instruction
== 0xe000010)
8936 constraint (Rd
== REG_PC
, BAD_PC
);
8939 for (i
= 0; i
< deprecated_coproc_reg_count
; ++i
)
8941 const struct deprecated_coproc_regs_s
*r
=
8942 deprecated_coproc_regs
+ i
;
8944 if (inst
.operands
[0].reg
== r
->cp
8945 && inst
.operands
[1].imm
== r
->opc1
8946 && inst
.operands
[3].reg
== r
->crn
8947 && inst
.operands
[4].reg
== r
->crm
8948 && inst
.operands
[5].imm
== r
->opc2
)
8950 if (! ARM_CPU_IS_ANY (cpu_variant
)
8951 && warn_on_deprecated
8952 && ARM_CPU_HAS_FEATURE (cpu_variant
, r
->deprecated
))
8953 as_tsktsk ("%s", r
->dep_msg
);
8957 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
8958 inst
.instruction
|= inst
.operands
[1].imm
<< 21;
8959 inst
.instruction
|= Rd
<< 12;
8960 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
8961 inst
.instruction
|= inst
.operands
[4].reg
;
8962 inst
.instruction
|= inst
.operands
[5].imm
<< 5;
8965 /* Transfer between coprocessor register and pair of ARM registers.
8966 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8971 Two XScale instructions are special cases of these:
8973 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8974 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8976 Result unpredictable if Rd or Rn is R15. */
8983 Rd
= inst
.operands
[2].reg
;
8984 Rn
= inst
.operands
[3].reg
;
8988 reject_bad_reg (Rd
);
8989 reject_bad_reg (Rn
);
8993 constraint (Rd
== REG_PC
, BAD_PC
);
8994 constraint (Rn
== REG_PC
, BAD_PC
);
8997 /* Only check the MRRC{2} variants. */
8998 if ((inst
.instruction
& 0x0FF00000) == 0x0C500000)
9000 /* If Rd == Rn, error that the operation is
9001 unpredictable (example MRRC p3,#1,r1,r1,c4). */
9002 constraint (Rd
== Rn
, BAD_OVERLAP
);
9005 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9006 inst
.instruction
|= inst
.operands
[1].imm
<< 4;
9007 inst
.instruction
|= Rd
<< 12;
9008 inst
.instruction
|= Rn
<< 16;
9009 inst
.instruction
|= inst
.operands
[4].reg
;
9015 inst
.instruction
|= inst
.operands
[0].imm
<< 6;
9016 if (inst
.operands
[1].present
)
9018 inst
.instruction
|= CPSI_MMOD
;
9019 inst
.instruction
|= inst
.operands
[1].imm
;
9026 inst
.instruction
|= inst
.operands
[0].imm
;
9032 unsigned Rd
, Rn
, Rm
;
9034 Rd
= inst
.operands
[0].reg
;
9035 Rn
= (inst
.operands
[1].present
9036 ? inst
.operands
[1].reg
: Rd
);
9037 Rm
= inst
.operands
[2].reg
;
9039 constraint ((Rd
== REG_PC
), BAD_PC
);
9040 constraint ((Rn
== REG_PC
), BAD_PC
);
9041 constraint ((Rm
== REG_PC
), BAD_PC
);
9043 inst
.instruction
|= Rd
<< 16;
9044 inst
.instruction
|= Rn
<< 0;
9045 inst
.instruction
|= Rm
<< 8;
9051 /* There is no IT instruction in ARM mode. We
9052 process it to do the validation as if in
9053 thumb mode, just in case the code gets
9054 assembled for thumb using the unified syntax. */
9059 set_it_insn_type (IT_INSN
);
9060 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
9061 now_it
.cc
= inst
.operands
[0].imm
;
9065 /* If there is only one register in the register list,
9066 then return its register number. Otherwise return -1. */
9068 only_one_reg_in_list (int range
)
9070 int i
= ffs (range
) - 1;
9071 return (i
> 15 || range
!= (1 << i
)) ? -1 : i
;
9075 encode_ldmstm(int from_push_pop_mnem
)
9077 int base_reg
= inst
.operands
[0].reg
;
9078 int range
= inst
.operands
[1].imm
;
9081 inst
.instruction
|= base_reg
<< 16;
9082 inst
.instruction
|= range
;
9084 if (inst
.operands
[1].writeback
)
9085 inst
.instruction
|= LDM_TYPE_2_OR_3
;
9087 if (inst
.operands
[0].writeback
)
9089 inst
.instruction
|= WRITE_BACK
;
9090 /* Check for unpredictable uses of writeback. */
9091 if (inst
.instruction
& LOAD_BIT
)
9093 /* Not allowed in LDM type 2. */
9094 if ((inst
.instruction
& LDM_TYPE_2_OR_3
)
9095 && ((range
& (1 << REG_PC
)) == 0))
9096 as_warn (_("writeback of base register is UNPREDICTABLE"));
9097 /* Only allowed if base reg not in list for other types. */
9098 else if (range
& (1 << base_reg
))
9099 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9103 /* Not allowed for type 2. */
9104 if (inst
.instruction
& LDM_TYPE_2_OR_3
)
9105 as_warn (_("writeback of base register is UNPREDICTABLE"));
9106 /* Only allowed if base reg not in list, or first in list. */
9107 else if ((range
& (1 << base_reg
))
9108 && (range
& ((1 << base_reg
) - 1)))
9109 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9113 /* If PUSH/POP has only one register, then use the A2 encoding. */
9114 one_reg
= only_one_reg_in_list (range
);
9115 if (from_push_pop_mnem
&& one_reg
>= 0)
9117 int is_push
= (inst
.instruction
& A_PUSH_POP_OP_MASK
) == A1_OPCODE_PUSH
;
9119 if (is_push
&& one_reg
== 13 /* SP */)
9120 /* PR 22483: The A2 encoding cannot be used when
9121 pushing the stack pointer as this is UNPREDICTABLE. */
9124 inst
.instruction
&= A_COND_MASK
;
9125 inst
.instruction
|= is_push
? A2_OPCODE_PUSH
: A2_OPCODE_POP
;
9126 inst
.instruction
|= one_reg
<< 12;
9133 encode_ldmstm (/*from_push_pop_mnem=*/FALSE
);
9136 /* ARMv5TE load-consecutive (argument parse)
9145 constraint (inst
.operands
[0].reg
% 2 != 0,
9146 _("first transfer register must be even"));
9147 constraint (inst
.operands
[1].present
9148 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9149 _("can only transfer two consecutive registers"));
9150 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9151 constraint (!inst
.operands
[2].isreg
, _("'[' expected"));
9153 if (!inst
.operands
[1].present
)
9154 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
9156 /* encode_arm_addr_mode_3 will diagnose overlap between the base
9157 register and the first register written; we have to diagnose
9158 overlap between the base and the second register written here. */
9160 if (inst
.operands
[2].reg
== inst
.operands
[1].reg
9161 && (inst
.operands
[2].writeback
|| inst
.operands
[2].postind
))
9162 as_warn (_("base register written back, and overlaps "
9163 "second transfer register"));
9165 if (!(inst
.instruction
& V4_STR_BIT
))
9167 /* For an index-register load, the index register must not overlap the
9168 destination (even if not write-back). */
9169 if (inst
.operands
[2].immisreg
9170 && ((unsigned) inst
.operands
[2].imm
== inst
.operands
[0].reg
9171 || (unsigned) inst
.operands
[2].imm
== inst
.operands
[1].reg
))
9172 as_warn (_("index register overlaps transfer register"));
9174 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9175 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE
);
9181 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
9182 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
9183 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
9184 || inst
.operands
[1].negative
9185 /* This can arise if the programmer has written
9187 or if they have mistakenly used a register name as the last
9190 It is very difficult to distinguish between these two cases
9191 because "rX" might actually be a label. ie the register
9192 name has been occluded by a symbol of the same name. So we
9193 just generate a general 'bad addressing mode' type error
9194 message and leave it up to the programmer to discover the
9195 true cause and fix their mistake. */
9196 || (inst
.operands
[1].reg
== REG_PC
),
9199 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9200 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9201 _("offset must be zero in ARM encoding"));
9203 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
9205 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9206 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9207 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9213 constraint (inst
.operands
[0].reg
% 2 != 0,
9214 _("even register required"));
9215 constraint (inst
.operands
[1].present
9216 && inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
9217 _("can only load two consecutive registers"));
9218 /* If op 1 were present and equal to PC, this function wouldn't
9219 have been called in the first place. */
9220 constraint (inst
.operands
[0].reg
== REG_LR
, _("r14 not allowed here"));
9222 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9223 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9226 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9227 which is not a multiple of four is UNPREDICTABLE. */
9229 check_ldr_r15_aligned (void)
9231 constraint (!(inst
.operands
[1].immisreg
)
9232 && (inst
.operands
[0].reg
== REG_PC
9233 && inst
.operands
[1].reg
== REG_PC
9234 && (inst
.relocs
[0].exp
.X_add_number
& 0x3)),
9235 _("ldr to register 15 must be 4-byte aligned"));
9241 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9242 if (!inst
.operands
[1].isreg
)
9243 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/FALSE
))
9245 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE
);
9246 check_ldr_r15_aligned ();
9252 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9254 if (inst
.operands
[1].preind
)
9256 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9257 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9258 _("this instruction requires a post-indexed address"));
9260 inst
.operands
[1].preind
= 0;
9261 inst
.operands
[1].postind
= 1;
9262 inst
.operands
[1].writeback
= 1;
9264 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9265 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE
);
9268 /* Halfword and signed-byte load/store operations. */
9273 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9274 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9275 if (!inst
.operands
[1].isreg
)
9276 if (move_or_literal_pool (0, CONST_ARM
, /*mode_3=*/TRUE
))
9278 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE
);
9284 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9286 if (inst
.operands
[1].preind
)
9288 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9289 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9290 _("this instruction requires a post-indexed address"));
9292 inst
.operands
[1].preind
= 0;
9293 inst
.operands
[1].postind
= 1;
9294 inst
.operands
[1].writeback
= 1;
9296 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9297 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE
);
9300 /* Co-processor register load/store.
9301 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9305 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
9306 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
9307 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
9313 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9314 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9315 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
)
9316 && !(inst
.instruction
& 0x00400000))
9317 as_tsktsk (_("Rd and Rm should be different in mla"));
9319 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9320 inst
.instruction
|= inst
.operands
[1].reg
;
9321 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9322 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9328 constraint (inst
.relocs
[0].type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9329 && inst
.relocs
[0].type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
,
9331 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9332 encode_arm_shifter_operand (1);
9335 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9342 top
= (inst
.instruction
& 0x00400000) != 0;
9343 constraint (top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
,
9344 _(":lower16: not allowed in this instruction"));
9345 constraint (!top
&& inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
,
9346 _(":upper16: not allowed in this instruction"));
9347 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9348 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
9350 imm
= inst
.relocs
[0].exp
.X_add_number
;
9351 /* The value is in two pieces: 0:11, 16:19. */
9352 inst
.instruction
|= (imm
& 0x00000fff);
9353 inst
.instruction
|= (imm
& 0x0000f000) << 4;
9358 do_vfp_nsyn_mrs (void)
9360 if (inst
.operands
[0].isvec
)
9362 if (inst
.operands
[1].reg
!= 1)
9363 first_error (_("operand 1 must be FPSCR"));
9364 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
9365 memset (&inst
.operands
[1], '\0', sizeof (inst
.operands
[1]));
9366 do_vfp_nsyn_opcode ("fmstat");
9368 else if (inst
.operands
[1].isvec
)
9369 do_vfp_nsyn_opcode ("fmrx");
9377 do_vfp_nsyn_msr (void)
9379 if (inst
.operands
[0].isvec
)
9380 do_vfp_nsyn_opcode ("fmxr");
9390 unsigned Rt
= inst
.operands
[0].reg
;
9392 if (thumb_mode
&& Rt
== REG_SP
)
9394 inst
.error
= BAD_SP
;
9398 /* MVFR2 is only valid at ARMv8-A. */
9399 if (inst
.operands
[1].reg
== 5)
9400 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9403 /* APSR_ sets isvec. All other refs to PC are illegal. */
9404 if (!inst
.operands
[0].isvec
&& Rt
== REG_PC
)
9406 inst
.error
= BAD_PC
;
9410 /* If we get through parsing the register name, we just insert the number
9411 generated into the instruction without further validation. */
9412 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
9413 inst
.instruction
|= (Rt
<< 12);
9419 unsigned Rt
= inst
.operands
[1].reg
;
9422 reject_bad_reg (Rt
);
9423 else if (Rt
== REG_PC
)
9425 inst
.error
= BAD_PC
;
9429 /* MVFR2 is only valid for ARMv8-A. */
9430 if (inst
.operands
[0].reg
== 5)
9431 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
9434 /* If we get through parsing the register name, we just insert the number
9435 generated into the instruction without further validation. */
9436 inst
.instruction
|= (inst
.operands
[0].reg
<< 16);
9437 inst
.instruction
|= (Rt
<< 12);
9445 if (do_vfp_nsyn_mrs () == SUCCESS
)
9448 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
9449 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9451 if (inst
.operands
[1].isreg
)
9453 br
= inst
.operands
[1].reg
;
9454 if (((br
& 0x200) == 0) && ((br
& 0xf0000) != 0xf0000))
9455 as_bad (_("bad register for mrs"));
9459 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9460 constraint ((inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
))
9462 _("'APSR', 'CPSR' or 'SPSR' expected"));
9463 br
= (15<<16) | (inst
.operands
[1].imm
& SPSR_BIT
);
9466 inst
.instruction
|= br
;
9469 /* Two possible forms:
9470 "{C|S}PSR_<field>, Rm",
9471 "{C|S}PSR_f, #expression". */
9476 if (do_vfp_nsyn_msr () == SUCCESS
)
9479 inst
.instruction
|= inst
.operands
[0].imm
;
9480 if (inst
.operands
[1].isreg
)
9481 inst
.instruction
|= inst
.operands
[1].reg
;
9484 inst
.instruction
|= INST_IMMEDIATE
;
9485 inst
.relocs
[0].type
= BFD_RELOC_ARM_IMMEDIATE
;
9486 inst
.relocs
[0].pc_rel
= 0;
9493 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
9495 if (!inst
.operands
[2].present
)
9496 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
9497 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9498 inst
.instruction
|= inst
.operands
[1].reg
;
9499 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9501 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
9502 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9503 as_tsktsk (_("Rd and Rm should be different in mul"));
9506 /* Long Multiply Parser
9507 UMULL RdLo, RdHi, Rm, Rs
9508 SMULL RdLo, RdHi, Rm, Rs
9509 UMLAL RdLo, RdHi, Rm, Rs
9510 SMLAL RdLo, RdHi, Rm, Rs. */
9515 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9516 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9517 inst
.instruction
|= inst
.operands
[2].reg
;
9518 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9520 /* rdhi and rdlo must be different. */
9521 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9522 as_tsktsk (_("rdhi and rdlo must be different"));
9524 /* rdhi, rdlo and rm must all be different before armv6. */
9525 if ((inst
.operands
[0].reg
== inst
.operands
[2].reg
9526 || inst
.operands
[1].reg
== inst
.operands
[2].reg
)
9527 && !ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6
))
9528 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9534 if (inst
.operands
[0].present
9535 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6k
))
9537 /* Architectural NOP hints are CPSR sets with no bits selected. */
9538 inst
.instruction
&= 0xf0000000;
9539 inst
.instruction
|= 0x0320f000;
9540 if (inst
.operands
[0].present
)
9541 inst
.instruction
|= inst
.operands
[0].imm
;
9545 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9546 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9547 Condition defaults to COND_ALWAYS.
9548 Error if Rd, Rn or Rm are R15. */
9553 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9554 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9555 inst
.instruction
|= inst
.operands
[2].reg
;
9556 if (inst
.operands
[3].present
)
9557 encode_arm_shift (3);
9560 /* ARM V6 PKHTB (Argument Parse). */
9565 if (!inst
.operands
[3].present
)
9567 /* If the shift specifier is omitted, turn the instruction
9568 into pkhbt rd, rm, rn. */
9569 inst
.instruction
&= 0xfff00010;
9570 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9571 inst
.instruction
|= inst
.operands
[1].reg
;
9572 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9576 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9577 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9578 inst
.instruction
|= inst
.operands
[2].reg
;
9579 encode_arm_shift (3);
9583 /* ARMv5TE: Preload-Cache
9584 MP Extensions: Preload for write
9588 Syntactically, like LDR with B=1, W=0, L=1. */
9593 constraint (!inst
.operands
[0].isreg
,
9594 _("'[' expected after PLD mnemonic"));
9595 constraint (inst
.operands
[0].postind
,
9596 _("post-indexed expression used in preload instruction"));
9597 constraint (inst
.operands
[0].writeback
,
9598 _("writeback used in preload instruction"));
9599 constraint (!inst
.operands
[0].preind
,
9600 _("unindexed addressing used in preload instruction"));
9601 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9604 /* ARMv7: PLI <addr_mode> */
9608 constraint (!inst
.operands
[0].isreg
,
9609 _("'[' expected after PLI mnemonic"));
9610 constraint (inst
.operands
[0].postind
,
9611 _("post-indexed expression used in preload instruction"));
9612 constraint (inst
.operands
[0].writeback
,
9613 _("writeback used in preload instruction"));
9614 constraint (!inst
.operands
[0].preind
,
9615 _("unindexed addressing used in preload instruction"));
9616 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE
);
9617 inst
.instruction
&= ~PRE_INDEX
;
9623 constraint (inst
.operands
[0].writeback
,
9624 _("push/pop do not support {reglist}^"));
9625 inst
.operands
[1] = inst
.operands
[0];
9626 memset (&inst
.operands
[0], 0, sizeof inst
.operands
[0]);
9627 inst
.operands
[0].isreg
= 1;
9628 inst
.operands
[0].writeback
= 1;
9629 inst
.operands
[0].reg
= REG_SP
;
9630 encode_ldmstm (/*from_push_pop_mnem=*/TRUE
);
9633 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9634 word at the specified address and the following word
9636 Unconditionally executed.
9637 Error if Rn is R15. */
9642 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9643 if (inst
.operands
[0].writeback
)
9644 inst
.instruction
|= WRITE_BACK
;
9647 /* ARM V6 ssat (argument parse). */
9652 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9653 inst
.instruction
|= (inst
.operands
[1].imm
- 1) << 16;
9654 inst
.instruction
|= inst
.operands
[2].reg
;
9656 if (inst
.operands
[3].present
)
9657 encode_arm_shift (3);
9660 /* ARM V6 usat (argument parse). */
9665 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9666 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9667 inst
.instruction
|= inst
.operands
[2].reg
;
9669 if (inst
.operands
[3].present
)
9670 encode_arm_shift (3);
9673 /* ARM V6 ssat16 (argument parse). */
9678 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9679 inst
.instruction
|= ((inst
.operands
[1].imm
- 1) << 16);
9680 inst
.instruction
|= inst
.operands
[2].reg
;
9686 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9687 inst
.instruction
|= inst
.operands
[1].imm
<< 16;
9688 inst
.instruction
|= inst
.operands
[2].reg
;
9691 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9692 preserving the other bits.
9694 setend <endian_specifier>, where <endian_specifier> is either
9700 if (warn_on_deprecated
9701 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
9702 as_tsktsk (_("setend use is deprecated for ARMv8"));
9704 if (inst
.operands
[0].imm
)
9705 inst
.instruction
|= 0x200;
9711 unsigned int Rm
= (inst
.operands
[1].present
9712 ? inst
.operands
[1].reg
9713 : inst
.operands
[0].reg
);
9715 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9716 inst
.instruction
|= Rm
;
9717 if (inst
.operands
[2].isreg
) /* Rd, {Rm,} Rs */
9719 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9720 inst
.instruction
|= SHIFT_BY_REG
;
9721 /* PR 12854: Error on extraneous shifts. */
9722 constraint (inst
.operands
[2].shifted
,
9723 _("extraneous shift as part of operand to shift insn"));
9726 inst
.relocs
[0].type
= BFD_RELOC_ARM_SHIFT_IMM
;
9732 inst
.relocs
[0].type
= BFD_RELOC_ARM_SMC
;
9733 inst
.relocs
[0].pc_rel
= 0;
9739 inst
.relocs
[0].type
= BFD_RELOC_ARM_HVC
;
9740 inst
.relocs
[0].pc_rel
= 0;
9746 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
9747 inst
.relocs
[0].pc_rel
= 0;
9753 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9754 _("selected processor does not support SETPAN instruction"));
9756 inst
.instruction
|= ((inst
.operands
[0].imm
& 1) << 9);
9762 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_pan
),
9763 _("selected processor does not support SETPAN instruction"));
9765 inst
.instruction
|= (inst
.operands
[0].imm
<< 3);
9768 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9769 SMLAxy{cond} Rd,Rm,Rs,Rn
9770 SMLAWy{cond} Rd,Rm,Rs,Rn
9771 Error if any register is R15. */
9776 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9777 inst
.instruction
|= inst
.operands
[1].reg
;
9778 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9779 inst
.instruction
|= inst
.operands
[3].reg
<< 12;
9782 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9783 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9784 Error if any register is R15.
9785 Warning if Rdlo == Rdhi. */
9790 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9791 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9792 inst
.instruction
|= inst
.operands
[2].reg
;
9793 inst
.instruction
|= inst
.operands
[3].reg
<< 8;
9795 if (inst
.operands
[0].reg
== inst
.operands
[1].reg
)
9796 as_tsktsk (_("rdhi and rdlo must be different"));
9799 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9800 SMULxy{cond} Rd,Rm,Rs
9801 Error if any register is R15. */
9806 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
9807 inst
.instruction
|= inst
.operands
[1].reg
;
9808 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
9811 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9812 the same for both ARM and Thumb-2. */
9819 if (inst
.operands
[0].present
)
9821 reg
= inst
.operands
[0].reg
;
9822 constraint (reg
!= REG_SP
, _("SRS base register must be r13"));
9827 inst
.instruction
|= reg
<< 16;
9828 inst
.instruction
|= inst
.operands
[1].imm
;
9829 if (inst
.operands
[0].writeback
|| inst
.operands
[1].writeback
)
9830 inst
.instruction
|= WRITE_BACK
;
9833 /* ARM V6 strex (argument parse). */
9838 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9839 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9840 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9841 || inst
.operands
[2].negative
9842 /* See comment in do_ldrex(). */
9843 || (inst
.operands
[2].reg
== REG_PC
),
9846 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9847 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9849 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
9850 || inst
.relocs
[0].exp
.X_add_number
!= 0,
9851 _("offset must be zero in ARM encoding"));
9853 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9854 inst
.instruction
|= inst
.operands
[1].reg
;
9855 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
9856 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
9862 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
9863 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
9864 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
9865 || inst
.operands
[2].negative
,
9868 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9869 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9877 constraint (inst
.operands
[1].reg
% 2 != 0,
9878 _("even register required"));
9879 constraint (inst
.operands
[2].present
9880 && inst
.operands
[2].reg
!= inst
.operands
[1].reg
+ 1,
9881 _("can only store two consecutive registers"));
9882 /* If op 2 were present and equal to PC, this function wouldn't
9883 have been called in the first place. */
9884 constraint (inst
.operands
[1].reg
== REG_LR
, _("r14 not allowed here"));
9886 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9887 || inst
.operands
[0].reg
== inst
.operands
[1].reg
+ 1
9888 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
9891 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9892 inst
.instruction
|= inst
.operands
[1].reg
;
9893 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
9900 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9901 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9909 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
9910 || inst
.operands
[0].reg
== inst
.operands
[2].reg
, BAD_OVERLAP
);
9915 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9916 extends it to 32-bits, and adds the result to a value in another
9917 register. You can specify a rotation by 0, 8, 16, or 24 bits
9918 before extracting the 16-bit value.
9919 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9920 Condition defaults to COND_ALWAYS.
9921 Error if any register uses R15. */
9926 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9927 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9928 inst
.instruction
|= inst
.operands
[2].reg
;
9929 inst
.instruction
|= inst
.operands
[3].imm
<< 10;
9934 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9935 Condition defaults to COND_ALWAYS.
9936 Error if any register uses R15. */
9941 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9942 inst
.instruction
|= inst
.operands
[1].reg
;
9943 inst
.instruction
|= inst
.operands
[2].imm
<< 10;
9946 /* VFP instructions. In a logical order: SP variant first, monad
9947 before dyad, arithmetic then move then load/store. */
9950 do_vfp_sp_monadic (void)
9952 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9953 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9957 do_vfp_sp_dyadic (void)
9959 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9960 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9961 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
9965 do_vfp_sp_compare_z (void)
9967 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9971 do_vfp_dp_sp_cvt (void)
9973 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
9974 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sm
);
9978 do_vfp_sp_dp_cvt (void)
9980 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
9981 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
9985 do_vfp_reg_from_sp (void)
9987 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9988 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sn
);
9992 do_vfp_reg2_from_sp2 (void)
9994 constraint (inst
.operands
[2].imm
!= 2,
9995 _("only two consecutive VFP SP registers allowed here"));
9996 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
9997 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
9998 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Sm
);
10002 do_vfp_sp_from_reg (void)
10004 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sn
);
10005 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10009 do_vfp_sp2_from_reg2 (void)
10011 constraint (inst
.operands
[0].imm
!= 2,
10012 _("only two consecutive VFP SP registers allowed here"));
10013 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sm
);
10014 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10015 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10019 do_vfp_sp_ldst (void)
10021 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10022 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10026 do_vfp_dp_ldst (void)
10028 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10029 encode_arm_cp_address (1, FALSE
, TRUE
, 0);
10034 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type
)
10036 if (inst
.operands
[0].writeback
)
10037 inst
.instruction
|= WRITE_BACK
;
10039 constraint (ldstm_type
!= VFP_LDSTMIA
,
10040 _("this addressing mode requires base-register writeback"));
10041 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10042 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Sd
);
10043 inst
.instruction
|= inst
.operands
[1].imm
;
10047 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type
)
10051 if (inst
.operands
[0].writeback
)
10052 inst
.instruction
|= WRITE_BACK
;
10054 constraint (ldstm_type
!= VFP_LDSTMIA
&& ldstm_type
!= VFP_LDSTMIAX
,
10055 _("this addressing mode requires base-register writeback"));
10057 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10058 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10060 count
= inst
.operands
[1].imm
<< 1;
10061 if (ldstm_type
== VFP_LDSTMIAX
|| ldstm_type
== VFP_LDSTMDBX
)
10064 inst
.instruction
|= count
;
10068 do_vfp_sp_ldstmia (void)
10070 vfp_sp_ldstm (VFP_LDSTMIA
);
10074 do_vfp_sp_ldstmdb (void)
10076 vfp_sp_ldstm (VFP_LDSTMDB
);
10080 do_vfp_dp_ldstmia (void)
10082 vfp_dp_ldstm (VFP_LDSTMIA
);
10086 do_vfp_dp_ldstmdb (void)
10088 vfp_dp_ldstm (VFP_LDSTMDB
);
10092 do_vfp_xp_ldstmia (void)
10094 vfp_dp_ldstm (VFP_LDSTMIAX
);
10098 do_vfp_xp_ldstmdb (void)
10100 vfp_dp_ldstm (VFP_LDSTMDBX
);
10104 do_vfp_dp_rd_rm (void)
10106 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10107 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dm
);
10111 do_vfp_dp_rn_rd (void)
10113 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dn
);
10114 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10118 do_vfp_dp_rd_rn (void)
10120 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10121 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10125 do_vfp_dp_rd_rn_rm (void)
10127 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10128 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dn
);
10129 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dm
);
10133 do_vfp_dp_rd (void)
10135 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10139 do_vfp_dp_rm_rd_rn (void)
10141 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dm
);
10142 encode_arm_vfp_reg (inst
.operands
[1].reg
, VFP_REG_Dd
);
10143 encode_arm_vfp_reg (inst
.operands
[2].reg
, VFP_REG_Dn
);
10146 /* VFPv3 instructions. */
10148 do_vfp_sp_const (void)
10150 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10151 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10152 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10156 do_vfp_dp_const (void)
10158 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10159 inst
.instruction
|= (inst
.operands
[1].imm
& 0xf0) << 12;
10160 inst
.instruction
|= (inst
.operands
[1].imm
& 0x0f);
10164 vfp_conv (int srcsize
)
10166 int immbits
= srcsize
- inst
.operands
[1].imm
;
10168 if (srcsize
== 16 && !(immbits
>= 0 && immbits
<= srcsize
))
10170 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10171 i.e. immbits must be in range 0 - 16. */
10172 inst
.error
= _("immediate value out of range, expected range [0, 16]");
10175 else if (srcsize
== 32 && !(immbits
>= 0 && immbits
< srcsize
))
10177 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10178 i.e. immbits must be in range 0 - 31. */
10179 inst
.error
= _("immediate value out of range, expected range [1, 32]");
10183 inst
.instruction
|= (immbits
& 1) << 5;
10184 inst
.instruction
|= (immbits
>> 1);
10188 do_vfp_sp_conv_16 (void)
10190 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10195 do_vfp_dp_conv_16 (void)
10197 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10202 do_vfp_sp_conv_32 (void)
10204 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
10209 do_vfp_dp_conv_32 (void)
10211 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Dd
);
10215 /* FPA instructions. Also in a logical order. */
10220 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10221 inst
.instruction
|= inst
.operands
[1].reg
;
10225 do_fpa_ldmstm (void)
10227 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10228 switch (inst
.operands
[1].imm
)
10230 case 1: inst
.instruction
|= CP_T_X
; break;
10231 case 2: inst
.instruction
|= CP_T_Y
; break;
10232 case 3: inst
.instruction
|= CP_T_Y
| CP_T_X
; break;
10237 if (inst
.instruction
& (PRE_INDEX
| INDEX_UP
))
10239 /* The instruction specified "ea" or "fd", so we can only accept
10240 [Rn]{!}. The instruction does not really support stacking or
10241 unstacking, so we have to emulate these by setting appropriate
10242 bits and offsets. */
10243 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
10244 || inst
.relocs
[0].exp
.X_add_number
!= 0,
10245 _("this instruction does not support indexing"));
10247 if ((inst
.instruction
& PRE_INDEX
) || inst
.operands
[2].writeback
)
10248 inst
.relocs
[0].exp
.X_add_number
= 12 * inst
.operands
[1].imm
;
10250 if (!(inst
.instruction
& INDEX_UP
))
10251 inst
.relocs
[0].exp
.X_add_number
= -inst
.relocs
[0].exp
.X_add_number
;
10253 if (!(inst
.instruction
& PRE_INDEX
) && inst
.operands
[2].writeback
)
10255 inst
.operands
[2].preind
= 0;
10256 inst
.operands
[2].postind
= 1;
10260 encode_arm_cp_address (2, TRUE
, TRUE
, 0);
10263 /* iWMMXt instructions: strictly in alphabetical order. */
10266 do_iwmmxt_tandorc (void)
10268 constraint (inst
.operands
[0].reg
!= REG_PC
, _("only r15 allowed here"));
10272 do_iwmmxt_textrc (void)
10274 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10275 inst
.instruction
|= inst
.operands
[1].imm
;
10279 do_iwmmxt_textrm (void)
10281 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10282 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10283 inst
.instruction
|= inst
.operands
[2].imm
;
10287 do_iwmmxt_tinsr (void)
10289 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10290 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10291 inst
.instruction
|= inst
.operands
[2].imm
;
10295 do_iwmmxt_tmia (void)
10297 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10298 inst
.instruction
|= inst
.operands
[1].reg
;
10299 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10303 do_iwmmxt_waligni (void)
10305 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10306 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10307 inst
.instruction
|= inst
.operands
[2].reg
;
10308 inst
.instruction
|= inst
.operands
[3].imm
<< 20;
10312 do_iwmmxt_wmerge (void)
10314 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10315 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10316 inst
.instruction
|= inst
.operands
[2].reg
;
10317 inst
.instruction
|= inst
.operands
[3].imm
<< 21;
10321 do_iwmmxt_wmov (void)
10323 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10324 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10325 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10326 inst
.instruction
|= inst
.operands
[1].reg
;
10330 do_iwmmxt_wldstbh (void)
10333 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10335 reloc
= BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
;
10337 reloc
= BFD_RELOC_ARM_CP_OFF_IMM_S2
;
10338 encode_arm_cp_address (1, TRUE
, FALSE
, reloc
);
10342 do_iwmmxt_wldstw (void)
10344 /* RIWR_RIWC clears .isreg for a control register. */
10345 if (!inst
.operands
[0].isreg
)
10347 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
10348 inst
.instruction
|= 0xf0000000;
10351 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10352 encode_arm_cp_address (1, TRUE
, TRUE
, 0);
10356 do_iwmmxt_wldstd (void)
10358 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10359 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
)
10360 && inst
.operands
[1].immisreg
)
10362 inst
.instruction
&= ~0x1a000ff;
10363 inst
.instruction
|= (0xfU
<< 28);
10364 if (inst
.operands
[1].preind
)
10365 inst
.instruction
|= PRE_INDEX
;
10366 if (!inst
.operands
[1].negative
)
10367 inst
.instruction
|= INDEX_UP
;
10368 if (inst
.operands
[1].writeback
)
10369 inst
.instruction
|= WRITE_BACK
;
10370 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10371 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10372 inst
.instruction
|= inst
.operands
[1].imm
;
10375 encode_arm_cp_address (1, TRUE
, FALSE
, 0);
10379 do_iwmmxt_wshufh (void)
10381 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10382 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10383 inst
.instruction
|= ((inst
.operands
[2].imm
& 0xf0) << 16);
10384 inst
.instruction
|= (inst
.operands
[2].imm
& 0x0f);
10388 do_iwmmxt_wzero (void)
10390 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10391 inst
.instruction
|= inst
.operands
[0].reg
;
10392 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10393 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10397 do_iwmmxt_wrwrwr_or_imm5 (void)
10399 if (inst
.operands
[2].isreg
)
10402 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
),
10403 _("immediate operand requires iWMMXt2"));
10405 if (inst
.operands
[2].imm
== 0)
10407 switch ((inst
.instruction
>> 20) & 0xf)
10413 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10414 inst
.operands
[2].imm
= 16;
10415 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0x7 << 20);
10421 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10422 inst
.operands
[2].imm
= 32;
10423 inst
.instruction
= (inst
.instruction
& 0xff0fffff) | (0xb << 20);
10430 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10432 wrn
= (inst
.instruction
>> 16) & 0xf;
10433 inst
.instruction
&= 0xff0fff0f;
10434 inst
.instruction
|= wrn
;
10435 /* Bail out here; the instruction is now assembled. */
10440 /* Map 32 -> 0, etc. */
10441 inst
.operands
[2].imm
&= 0x1f;
10442 inst
.instruction
|= (0xfU
<< 28) | ((inst
.operands
[2].imm
& 0x10) << 4) | (inst
.operands
[2].imm
& 0xf);
10446 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10447 operations first, then control, shift, and load/store. */
10449 /* Insns like "foo X,Y,Z". */
10452 do_mav_triple (void)
10454 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
10455 inst
.instruction
|= inst
.operands
[1].reg
;
10456 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10459 /* Insns like "foo W,X,Y,Z".
10460 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10465 inst
.instruction
|= inst
.operands
[0].reg
<< 5;
10466 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10467 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10468 inst
.instruction
|= inst
.operands
[3].reg
;
10471 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10473 do_mav_dspsc (void)
10475 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10478 /* Maverick shift immediate instructions.
10479 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10480 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10483 do_mav_shift (void)
10485 int imm
= inst
.operands
[2].imm
;
10487 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10488 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10490 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10491 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10492 Bit 4 should be 0. */
10493 imm
= (imm
& 0xf) | ((imm
& 0x70) << 1);
10495 inst
.instruction
|= imm
;
10498 /* XScale instructions. Also sorted arithmetic before move. */
10500 /* Xscale multiply-accumulate (argument parse)
10503 MIAxycc acc0,Rm,Rs. */
10508 inst
.instruction
|= inst
.operands
[1].reg
;
10509 inst
.instruction
|= inst
.operands
[2].reg
<< 12;
10512 /* Xscale move-accumulator-register (argument parse)
10514 MARcc acc0,RdLo,RdHi. */
10519 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
10520 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
10523 /* Xscale move-register-accumulator (argument parse)
10525 MRAcc RdLo,RdHi,acc0. */
10530 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
, BAD_OVERLAP
);
10531 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
10532 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
10535 /* Encoding functions relevant only to Thumb. */
10537 /* inst.operands[i] is a shifted-register operand; encode
10538 it into inst.instruction in the format used by Thumb32. */
10541 encode_thumb32_shifted_operand (int i
)
10543 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10544 unsigned int shift
= inst
.operands
[i
].shift_kind
;
10546 constraint (inst
.operands
[i
].immisreg
,
10547 _("shift by register not allowed in thumb mode"));
10548 inst
.instruction
|= inst
.operands
[i
].reg
;
10549 if (shift
== SHIFT_RRX
)
10550 inst
.instruction
|= SHIFT_ROR
<< 4;
10553 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10554 _("expression too complex"));
10556 constraint (value
> 32
10557 || (value
== 32 && (shift
== SHIFT_LSL
10558 || shift
== SHIFT_ROR
)),
10559 _("shift expression is too large"));
10563 else if (value
== 32)
10566 inst
.instruction
|= shift
<< 4;
10567 inst
.instruction
|= (value
& 0x1c) << 10;
10568 inst
.instruction
|= (value
& 0x03) << 6;
10573 /* inst.operands[i] was set up by parse_address. Encode it into a
10574 Thumb32 format load or store instruction. Reject forms that cannot
10575 be used with such instructions. If is_t is true, reject forms that
10576 cannot be used with a T instruction; if is_d is true, reject forms
10577 that cannot be used with a D instruction. If it is a store insn,
10578 reject PC in Rn. */
10581 encode_thumb32_addr_mode (int i
, bfd_boolean is_t
, bfd_boolean is_d
)
10583 const bfd_boolean is_pc
= (inst
.operands
[i
].reg
== REG_PC
);
10585 constraint (!inst
.operands
[i
].isreg
,
10586 _("Instruction does not support =N addresses"));
10588 inst
.instruction
|= inst
.operands
[i
].reg
<< 16;
10589 if (inst
.operands
[i
].immisreg
)
10591 constraint (is_pc
, BAD_PC_ADDRESSING
);
10592 constraint (is_t
|| is_d
, _("cannot use register index with this instruction"));
10593 constraint (inst
.operands
[i
].negative
,
10594 _("Thumb does not support negative register indexing"));
10595 constraint (inst
.operands
[i
].postind
,
10596 _("Thumb does not support register post-indexing"));
10597 constraint (inst
.operands
[i
].writeback
,
10598 _("Thumb does not support register indexing with writeback"));
10599 constraint (inst
.operands
[i
].shifted
&& inst
.operands
[i
].shift_kind
!= SHIFT_LSL
,
10600 _("Thumb supports only LSL in shifted register indexing"));
10602 inst
.instruction
|= inst
.operands
[i
].imm
;
10603 if (inst
.operands
[i
].shifted
)
10605 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10606 _("expression too complex"));
10607 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10608 || inst
.relocs
[0].exp
.X_add_number
> 3,
10609 _("shift out of range"));
10610 inst
.instruction
|= inst
.relocs
[0].exp
.X_add_number
<< 4;
10612 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10614 else if (inst
.operands
[i
].preind
)
10616 constraint (is_pc
&& inst
.operands
[i
].writeback
, BAD_PC_WRITEBACK
);
10617 constraint (is_t
&& inst
.operands
[i
].writeback
,
10618 _("cannot use writeback with this instruction"));
10619 constraint (is_pc
&& ((inst
.instruction
& THUMB2_LOAD_BIT
) == 0),
10620 BAD_PC_ADDRESSING
);
10624 inst
.instruction
|= 0x01000000;
10625 if (inst
.operands
[i
].writeback
)
10626 inst
.instruction
|= 0x00200000;
10630 inst
.instruction
|= 0x00000c00;
10631 if (inst
.operands
[i
].writeback
)
10632 inst
.instruction
|= 0x00000100;
10634 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10636 else if (inst
.operands
[i
].postind
)
10638 gas_assert (inst
.operands
[i
].writeback
);
10639 constraint (is_pc
, _("cannot use post-indexing with PC-relative addressing"));
10640 constraint (is_t
, _("cannot use post-indexing with this instruction"));
10643 inst
.instruction
|= 0x00200000;
10645 inst
.instruction
|= 0x00000900;
10646 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
10648 else /* unindexed - only for coprocessor */
10649 inst
.error
= _("instruction does not accept unindexed addressing");
10652 /* Table of Thumb instructions which exist in both 16- and 32-bit
10653 encodings (the latter only in post-V6T2 cores). The index is the
10654 value used in the insns table below. When there is more than one
10655 possible 16-bit encoding for the instruction, this table always
10657 Also contains several pseudo-instructions used during relaxation. */
10658 #define T16_32_TAB \
10659 X(_adc, 4140, eb400000), \
10660 X(_adcs, 4140, eb500000), \
10661 X(_add, 1c00, eb000000), \
10662 X(_adds, 1c00, eb100000), \
10663 X(_addi, 0000, f1000000), \
10664 X(_addis, 0000, f1100000), \
10665 X(_add_pc,000f, f20f0000), \
10666 X(_add_sp,000d, f10d0000), \
10667 X(_adr, 000f, f20f0000), \
10668 X(_and, 4000, ea000000), \
10669 X(_ands, 4000, ea100000), \
10670 X(_asr, 1000, fa40f000), \
10671 X(_asrs, 1000, fa50f000), \
10672 X(_b, e000, f000b000), \
10673 X(_bcond, d000, f0008000), \
10674 X(_bf, 0000, f040e001), \
10675 X(_bfcsel,0000, f000e001), \
10676 X(_bfx, 0000, f060e001), \
10677 X(_bfl, 0000, f000c001), \
10678 X(_bflx, 0000, f070e001), \
10679 X(_bic, 4380, ea200000), \
10680 X(_bics, 4380, ea300000), \
10681 X(_cmn, 42c0, eb100f00), \
10682 X(_cmp, 2800, ebb00f00), \
10683 X(_cpsie, b660, f3af8400), \
10684 X(_cpsid, b670, f3af8600), \
10685 X(_cpy, 4600, ea4f0000), \
10686 X(_dec_sp,80dd, f1ad0d00), \
10687 X(_dls, 0000, f040e001), \
10688 X(_eor, 4040, ea800000), \
10689 X(_eors, 4040, ea900000), \
10690 X(_inc_sp,00dd, f10d0d00), \
10691 X(_ldmia, c800, e8900000), \
10692 X(_ldr, 6800, f8500000), \
10693 X(_ldrb, 7800, f8100000), \
10694 X(_ldrh, 8800, f8300000), \
10695 X(_ldrsb, 5600, f9100000), \
10696 X(_ldrsh, 5e00, f9300000), \
10697 X(_ldr_pc,4800, f85f0000), \
10698 X(_ldr_pc2,4800, f85f0000), \
10699 X(_ldr_sp,9800, f85d0000), \
10700 X(_le, 0000, f00fc001), \
10701 X(_lsl, 0000, fa00f000), \
10702 X(_lsls, 0000, fa10f000), \
10703 X(_lsr, 0800, fa20f000), \
10704 X(_lsrs, 0800, fa30f000), \
10705 X(_mov, 2000, ea4f0000), \
10706 X(_movs, 2000, ea5f0000), \
10707 X(_mul, 4340, fb00f000), \
10708 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10709 X(_mvn, 43c0, ea6f0000), \
10710 X(_mvns, 43c0, ea7f0000), \
10711 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10712 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10713 X(_orr, 4300, ea400000), \
10714 X(_orrs, 4300, ea500000), \
10715 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10716 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10717 X(_rev, ba00, fa90f080), \
10718 X(_rev16, ba40, fa90f090), \
10719 X(_revsh, bac0, fa90f0b0), \
10720 X(_ror, 41c0, fa60f000), \
10721 X(_rors, 41c0, fa70f000), \
10722 X(_sbc, 4180, eb600000), \
10723 X(_sbcs, 4180, eb700000), \
10724 X(_stmia, c000, e8800000), \
10725 X(_str, 6000, f8400000), \
10726 X(_strb, 7000, f8000000), \
10727 X(_strh, 8000, f8200000), \
10728 X(_str_sp,9000, f84d0000), \
10729 X(_sub, 1e00, eba00000), \
10730 X(_subs, 1e00, ebb00000), \
10731 X(_subi, 8000, f1a00000), \
10732 X(_subis, 8000, f1b00000), \
10733 X(_sxtb, b240, fa4ff080), \
10734 X(_sxth, b200, fa0ff080), \
10735 X(_tst, 4200, ea100f00), \
10736 X(_uxtb, b2c0, fa5ff080), \
10737 X(_uxth, b280, fa1ff080), \
10738 X(_nop, bf00, f3af8000), \
10739 X(_yield, bf10, f3af8001), \
10740 X(_wfe, bf20, f3af8002), \
10741 X(_wfi, bf30, f3af8003), \
10742 X(_wls, 0000, f040c001), \
10743 X(_sev, bf40, f3af8004), \
10744 X(_sevl, bf50, f3af8005), \
10745 X(_udf, de00, f7f0a000)
10747 /* To catch errors in encoding functions, the codes are all offset by
10748 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10749 as 16-bit instructions. */
10750 #define X(a,b,c) T_MNEM##a
10751 enum t16_32_codes
{ T16_32_OFFSET
= 0xF7FF, T16_32_TAB
};
10754 #define X(a,b,c) 0x##b
10755 static const unsigned short thumb_op16
[] = { T16_32_TAB
};
10756 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10759 #define X(a,b,c) 0x##c
10760 static const unsigned int thumb_op32
[] = { T16_32_TAB
};
10761 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10762 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10766 /* Thumb instruction encoders, in alphabetical order. */
10768 /* ADDW or SUBW. */
10771 do_t_add_sub_w (void)
10775 Rd
= inst
.operands
[0].reg
;
10776 Rn
= inst
.operands
[1].reg
;
10778 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10779 is the SP-{plus,minus}-immediate form of the instruction. */
10781 constraint (Rd
== REG_PC
, BAD_PC
);
10783 reject_bad_reg (Rd
);
10785 inst
.instruction
|= (Rn
<< 16) | (Rd
<< 8);
10786 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10789 /* Parse an add or subtract instruction. We get here with inst.instruction
10790 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10793 do_t_add_sub (void)
10797 Rd
= inst
.operands
[0].reg
;
10798 Rs
= (inst
.operands
[1].present
10799 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
10800 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
10803 set_it_insn_type_last ();
10805 if (unified_syntax
)
10808 bfd_boolean narrow
;
10811 flags
= (inst
.instruction
== T_MNEM_adds
10812 || inst
.instruction
== T_MNEM_subs
);
10814 narrow
= !in_it_block ();
10816 narrow
= in_it_block ();
10817 if (!inst
.operands
[2].isreg
)
10821 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10822 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10824 add
= (inst
.instruction
== T_MNEM_add
10825 || inst
.instruction
== T_MNEM_adds
);
10827 if (inst
.size_req
!= 4)
10829 /* Attempt to use a narrow opcode, with relaxation if
10831 if (Rd
== REG_SP
&& Rs
== REG_SP
&& !flags
)
10832 opcode
= add
? T_MNEM_inc_sp
: T_MNEM_dec_sp
;
10833 else if (Rd
<= 7 && Rs
== REG_SP
&& add
&& !flags
)
10834 opcode
= T_MNEM_add_sp
;
10835 else if (Rd
<= 7 && Rs
== REG_PC
&& add
&& !flags
)
10836 opcode
= T_MNEM_add_pc
;
10837 else if (Rd
<= 7 && Rs
<= 7 && narrow
)
10840 opcode
= add
? T_MNEM_addis
: T_MNEM_subis
;
10842 opcode
= add
? T_MNEM_addi
: T_MNEM_subi
;
10846 inst
.instruction
= THUMB_OP16(opcode
);
10847 inst
.instruction
|= (Rd
<< 4) | Rs
;
10848 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10849 || (inst
.relocs
[0].type
10850 > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
))
10852 if (inst
.size_req
== 2)
10853 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10855 inst
.relax
= opcode
;
10859 constraint (inst
.size_req
== 2, BAD_HIREG
);
10861 if (inst
.size_req
== 4
10862 || (inst
.size_req
!= 2 && !opcode
))
10864 constraint ((inst
.relocs
[0].type
10865 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
10866 && (inst
.relocs
[0].type
10867 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
10868 THUMB1_RELOC_ONLY
);
10871 constraint (add
, BAD_PC
);
10872 constraint (Rs
!= REG_LR
|| inst
.instruction
!= T_MNEM_subs
,
10873 _("only SUBS PC, LR, #const allowed"));
10874 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
10875 _("expression too complex"));
10876 constraint (inst
.relocs
[0].exp
.X_add_number
< 0
10877 || inst
.relocs
[0].exp
.X_add_number
> 0xff,
10878 _("immediate value out of range"));
10879 inst
.instruction
= T2_SUBS_PC_LR
10880 | inst
.relocs
[0].exp
.X_add_number
;
10881 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
10884 else if (Rs
== REG_PC
)
10886 /* Always use addw/subw. */
10887 inst
.instruction
= add
? 0xf20f0000 : 0xf2af0000;
10888 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMM12
;
10892 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10893 inst
.instruction
= (inst
.instruction
& 0xe1ffffff)
10896 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
10898 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_IMM
;
10900 inst
.instruction
|= Rd
<< 8;
10901 inst
.instruction
|= Rs
<< 16;
10906 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
10907 unsigned int shift
= inst
.operands
[2].shift_kind
;
10909 Rn
= inst
.operands
[2].reg
;
10910 /* See if we can do this with a 16-bit instruction. */
10911 if (!inst
.operands
[2].shifted
&& inst
.size_req
!= 4)
10913 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10918 inst
.instruction
= ((inst
.instruction
== T_MNEM_adds
10919 || inst
.instruction
== T_MNEM_add
)
10921 : T_OPCODE_SUB_R3
);
10922 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
10926 if (inst
.instruction
== T_MNEM_add
&& (Rd
== Rs
|| Rd
== Rn
))
10928 /* Thumb-1 cores (except v6-M) require at least one high
10929 register in a narrow non flag setting add. */
10930 if (Rd
> 7 || Rn
> 7
10931 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
)
10932 || ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_msr
))
10939 inst
.instruction
= T_OPCODE_ADD_HI
;
10940 inst
.instruction
|= (Rd
& 8) << 4;
10941 inst
.instruction
|= (Rd
& 7);
10942 inst
.instruction
|= Rn
<< 3;
10948 constraint (Rd
== REG_PC
, BAD_PC
);
10949 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
10950 constraint (Rd
== REG_SP
&& Rs
!= REG_SP
, BAD_SP
);
10951 constraint (Rs
== REG_PC
, BAD_PC
);
10952 reject_bad_reg (Rn
);
10954 /* If we get here, it can't be done in 16 bits. */
10955 constraint (inst
.operands
[2].shifted
&& inst
.operands
[2].immisreg
,
10956 _("shift must be constant"));
10957 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
10958 inst
.instruction
|= Rd
<< 8;
10959 inst
.instruction
|= Rs
<< 16;
10960 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& value
> 3,
10961 _("shift value over 3 not allowed in thumb mode"));
10962 constraint (Rd
== REG_SP
&& Rs
== REG_SP
&& shift
!= SHIFT_LSL
,
10963 _("only LSL shift allowed in thumb mode"));
10964 encode_thumb32_shifted_operand (2);
10969 constraint (inst
.instruction
== T_MNEM_adds
10970 || inst
.instruction
== T_MNEM_subs
,
10973 if (!inst
.operands
[2].isreg
) /* Rd, Rs, #imm */
10975 constraint ((Rd
> 7 && (Rd
!= REG_SP
|| Rs
!= REG_SP
))
10976 || (Rs
> 7 && Rs
!= REG_SP
&& Rs
!= REG_PC
),
10979 inst
.instruction
= (inst
.instruction
== T_MNEM_add
10980 ? 0x0000 : 0x8000);
10981 inst
.instruction
|= (Rd
<< 4) | Rs
;
10982 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
10986 Rn
= inst
.operands
[2].reg
;
10987 constraint (inst
.operands
[2].shifted
, _("unshifted register required"));
10989 /* We now have Rd, Rs, and Rn set to registers. */
10990 if (Rd
> 7 || Rs
> 7 || Rn
> 7)
10992 /* Can't do this for SUB. */
10993 constraint (inst
.instruction
== T_MNEM_sub
, BAD_HIREG
);
10994 inst
.instruction
= T_OPCODE_ADD_HI
;
10995 inst
.instruction
|= (Rd
& 8) << 4;
10996 inst
.instruction
|= (Rd
& 7);
10998 inst
.instruction
|= Rn
<< 3;
11000 inst
.instruction
|= Rs
<< 3;
11002 constraint (1, _("dest must overlap one source register"));
11006 inst
.instruction
= (inst
.instruction
== T_MNEM_add
11007 ? T_OPCODE_ADD_R3
: T_OPCODE_SUB_R3
);
11008 inst
.instruction
|= Rd
| (Rs
<< 3) | (Rn
<< 6);
11018 Rd
= inst
.operands
[0].reg
;
11019 reject_bad_reg (Rd
);
11021 if (unified_syntax
&& inst
.size_req
== 0 && Rd
<= 7)
11023 /* Defer to section relaxation. */
11024 inst
.relax
= inst
.instruction
;
11025 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11026 inst
.instruction
|= Rd
<< 4;
11028 else if (unified_syntax
&& inst
.size_req
!= 2)
11030 /* Generate a 32-bit opcode. */
11031 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11032 inst
.instruction
|= Rd
<< 8;
11033 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_ADD_PC12
;
11034 inst
.relocs
[0].pc_rel
= 1;
11038 /* Generate a 16-bit opcode. */
11039 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11040 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_ADD
;
11041 inst
.relocs
[0].exp
.X_add_number
-= 4; /* PC relative adjust. */
11042 inst
.relocs
[0].pc_rel
= 1;
11043 inst
.instruction
|= Rd
<< 4;
11046 if (inst
.relocs
[0].exp
.X_op
== O_symbol
11047 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11048 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11049 && THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11050 inst
.relocs
[0].exp
.X_add_number
+= 1;
11053 /* Arithmetic instructions for which there is just one 16-bit
11054 instruction encoding, and it allows only two low registers.
11055 For maximal compatibility with ARM syntax, we allow three register
11056 operands even when Thumb-32 instructions are not available, as long
11057 as the first two are identical. For instance, both "sbc r0,r1" and
11058 "sbc r0,r0,r1" are allowed. */
11064 Rd
= inst
.operands
[0].reg
;
11065 Rs
= (inst
.operands
[1].present
11066 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11067 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11068 Rn
= inst
.operands
[2].reg
;
11070 reject_bad_reg (Rd
);
11071 reject_bad_reg (Rs
);
11072 if (inst
.operands
[2].isreg
)
11073 reject_bad_reg (Rn
);
11075 if (unified_syntax
)
11077 if (!inst
.operands
[2].isreg
)
11079 /* For an immediate, we always generate a 32-bit opcode;
11080 section relaxation will shrink it later if possible. */
11081 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11082 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11083 inst
.instruction
|= Rd
<< 8;
11084 inst
.instruction
|= Rs
<< 16;
11085 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11089 bfd_boolean narrow
;
11091 /* See if we can do this with a 16-bit instruction. */
11092 if (THUMB_SETS_FLAGS (inst
.instruction
))
11093 narrow
= !in_it_block ();
11095 narrow
= in_it_block ();
11097 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11099 if (inst
.operands
[2].shifted
)
11101 if (inst
.size_req
== 4)
11107 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11108 inst
.instruction
|= Rd
;
11109 inst
.instruction
|= Rn
<< 3;
11113 /* If we get here, it can't be done in 16 bits. */
11114 constraint (inst
.operands
[2].shifted
11115 && inst
.operands
[2].immisreg
,
11116 _("shift must be constant"));
11117 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11118 inst
.instruction
|= Rd
<< 8;
11119 inst
.instruction
|= Rs
<< 16;
11120 encode_thumb32_shifted_operand (2);
11125 /* On its face this is a lie - the instruction does set the
11126 flags. However, the only supported mnemonic in this mode
11127 says it doesn't. */
11128 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11130 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11131 _("unshifted register required"));
11132 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11133 constraint (Rd
!= Rs
,
11134 _("dest and source1 must be the same register"));
11136 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11137 inst
.instruction
|= Rd
;
11138 inst
.instruction
|= Rn
<< 3;
11142 /* Similarly, but for instructions where the arithmetic operation is
11143 commutative, so we can allow either of them to be different from
11144 the destination operand in a 16-bit instruction. For instance, all
11145 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11152 Rd
= inst
.operands
[0].reg
;
11153 Rs
= (inst
.operands
[1].present
11154 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
11155 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
11156 Rn
= inst
.operands
[2].reg
;
11158 reject_bad_reg (Rd
);
11159 reject_bad_reg (Rs
);
11160 if (inst
.operands
[2].isreg
)
11161 reject_bad_reg (Rn
);
11163 if (unified_syntax
)
11165 if (!inst
.operands
[2].isreg
)
11167 /* For an immediate, we always generate a 32-bit opcode;
11168 section relaxation will shrink it later if possible. */
11169 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11170 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
11171 inst
.instruction
|= Rd
<< 8;
11172 inst
.instruction
|= Rs
<< 16;
11173 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
11177 bfd_boolean narrow
;
11179 /* See if we can do this with a 16-bit instruction. */
11180 if (THUMB_SETS_FLAGS (inst
.instruction
))
11181 narrow
= !in_it_block ();
11183 narrow
= in_it_block ();
11185 if (Rd
> 7 || Rn
> 7 || Rs
> 7)
11187 if (inst
.operands
[2].shifted
)
11189 if (inst
.size_req
== 4)
11196 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11197 inst
.instruction
|= Rd
;
11198 inst
.instruction
|= Rn
<< 3;
11203 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11204 inst
.instruction
|= Rd
;
11205 inst
.instruction
|= Rs
<< 3;
11210 /* If we get here, it can't be done in 16 bits. */
11211 constraint (inst
.operands
[2].shifted
11212 && inst
.operands
[2].immisreg
,
11213 _("shift must be constant"));
11214 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11215 inst
.instruction
|= Rd
<< 8;
11216 inst
.instruction
|= Rs
<< 16;
11217 encode_thumb32_shifted_operand (2);
11222 /* On its face this is a lie - the instruction does set the
11223 flags. However, the only supported mnemonic in this mode
11224 says it doesn't. */
11225 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
11227 constraint (!inst
.operands
[2].isreg
|| inst
.operands
[2].shifted
,
11228 _("unshifted register required"));
11229 constraint (Rd
> 7 || Rs
> 7 || Rn
> 7, BAD_HIREG
);
11231 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11232 inst
.instruction
|= Rd
;
11235 inst
.instruction
|= Rn
<< 3;
11237 inst
.instruction
|= Rs
<< 3;
11239 constraint (1, _("dest must overlap one source register"));
11247 unsigned int msb
= inst
.operands
[1].imm
+ inst
.operands
[2].imm
;
11248 constraint (msb
> 32, _("bit-field extends past end of register"));
11249 /* The instruction encoding stores the LSB and MSB,
11250 not the LSB and width. */
11251 Rd
= inst
.operands
[0].reg
;
11252 reject_bad_reg (Rd
);
11253 inst
.instruction
|= Rd
<< 8;
11254 inst
.instruction
|= (inst
.operands
[1].imm
& 0x1c) << 10;
11255 inst
.instruction
|= (inst
.operands
[1].imm
& 0x03) << 6;
11256 inst
.instruction
|= msb
- 1;
11265 Rd
= inst
.operands
[0].reg
;
11266 reject_bad_reg (Rd
);
11268 /* #0 in second position is alternative syntax for bfc, which is
11269 the same instruction but with REG_PC in the Rm field. */
11270 if (!inst
.operands
[1].isreg
)
11274 Rn
= inst
.operands
[1].reg
;
11275 reject_bad_reg (Rn
);
11278 msb
= inst
.operands
[2].imm
+ inst
.operands
[3].imm
;
11279 constraint (msb
> 32, _("bit-field extends past end of register"));
11280 /* The instruction encoding stores the LSB and MSB,
11281 not the LSB and width. */
11282 inst
.instruction
|= Rd
<< 8;
11283 inst
.instruction
|= Rn
<< 16;
11284 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11285 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11286 inst
.instruction
|= msb
- 1;
11294 Rd
= inst
.operands
[0].reg
;
11295 Rn
= inst
.operands
[1].reg
;
11297 reject_bad_reg (Rd
);
11298 reject_bad_reg (Rn
);
11300 constraint (inst
.operands
[2].imm
+ inst
.operands
[3].imm
> 32,
11301 _("bit-field extends past end of register"));
11302 inst
.instruction
|= Rd
<< 8;
11303 inst
.instruction
|= Rn
<< 16;
11304 inst
.instruction
|= (inst
.operands
[2].imm
& 0x1c) << 10;
11305 inst
.instruction
|= (inst
.operands
[2].imm
& 0x03) << 6;
11306 inst
.instruction
|= inst
.operands
[3].imm
- 1;
11309 /* ARM V5 Thumb BLX (argument parse)
11310 BLX <target_addr> which is BLX(1)
11311 BLX <Rm> which is BLX(2)
11312 Unfortunately, there are two different opcodes for this mnemonic.
11313 So, the insns[].value is not used, and the code here zaps values
11314 into inst.instruction.
11316 ??? How to take advantage of the additional two bits of displacement
11317 available in Thumb32 mode? Need new relocation? */
11322 set_it_insn_type_last ();
11324 if (inst
.operands
[0].isreg
)
11326 constraint (inst
.operands
[0].reg
== REG_PC
, BAD_PC
);
11327 /* We have a register, so this is BLX(2). */
11328 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11332 /* No register. This must be BLX(1). */
11333 inst
.instruction
= 0xf000e800;
11334 encode_branch (BFD_RELOC_THUMB_PCREL_BLX
);
11343 bfd_reloc_code_real_type reloc
;
11346 set_it_insn_type (IF_INSIDE_IT_LAST_INSN
);
11348 if (in_it_block ())
11350 /* Conditional branches inside IT blocks are encoded as unconditional
11352 cond
= COND_ALWAYS
;
11357 if (cond
!= COND_ALWAYS
)
11358 opcode
= T_MNEM_bcond
;
11360 opcode
= inst
.instruction
;
11363 && (inst
.size_req
== 4
11364 || (inst
.size_req
!= 2
11365 && (inst
.operands
[0].hasreloc
11366 || inst
.relocs
[0].exp
.X_op
== O_constant
))))
11368 inst
.instruction
= THUMB_OP32(opcode
);
11369 if (cond
== COND_ALWAYS
)
11370 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
11373 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
),
11374 _("selected architecture does not support "
11375 "wide conditional branch instruction"));
11377 gas_assert (cond
!= 0xF);
11378 inst
.instruction
|= cond
<< 22;
11379 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
11384 inst
.instruction
= THUMB_OP16(opcode
);
11385 if (cond
== COND_ALWAYS
)
11386 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
11389 inst
.instruction
|= cond
<< 8;
11390 reloc
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
11392 /* Allow section relaxation. */
11393 if (unified_syntax
&& inst
.size_req
!= 2)
11394 inst
.relax
= opcode
;
11396 inst
.relocs
[0].type
= reloc
;
11397 inst
.relocs
[0].pc_rel
= 1;
11400 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11401 between the two is the maximum immediate allowed - which is passed in
11404 do_t_bkpt_hlt1 (int range
)
11406 constraint (inst
.cond
!= COND_ALWAYS
,
11407 _("instruction is always unconditional"));
11408 if (inst
.operands
[0].present
)
11410 constraint (inst
.operands
[0].imm
> range
,
11411 _("immediate value out of range"));
11412 inst
.instruction
|= inst
.operands
[0].imm
;
11415 set_it_insn_type (NEUTRAL_IT_INSN
);
11421 do_t_bkpt_hlt1 (63);
11427 do_t_bkpt_hlt1 (255);
11431 do_t_branch23 (void)
11433 set_it_insn_type_last ();
11434 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23
);
11436 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11437 this file. We used to simply ignore the PLT reloc type here --
11438 the branch encoding is now needed to deal with TLSCALL relocs.
11439 So if we see a PLT reloc now, put it back to how it used to be to
11440 keep the preexisting behaviour. */
11441 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_PLT32
)
11442 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
11444 #if defined(OBJ_COFF)
11445 /* If the destination of the branch is a defined symbol which does not have
11446 the THUMB_FUNC attribute, then we must be calling a function which has
11447 the (interfacearm) attribute. We look for the Thumb entry point to that
11448 function and change the branch to refer to that function instead. */
11449 if ( inst
.relocs
[0].exp
.X_op
== O_symbol
11450 && inst
.relocs
[0].exp
.X_add_symbol
!= NULL
11451 && S_IS_DEFINED (inst
.relocs
[0].exp
.X_add_symbol
)
11452 && ! THUMB_IS_FUNC (inst
.relocs
[0].exp
.X_add_symbol
))
11453 inst
.relocs
[0].exp
.X_add_symbol
11454 = find_real_start (inst
.relocs
[0].exp
.X_add_symbol
);
11461 set_it_insn_type_last ();
11462 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11463 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11464 should cause the alignment to be checked once it is known. This is
11465 because BX PC only works if the instruction is word aligned. */
11473 set_it_insn_type_last ();
11474 Rm
= inst
.operands
[0].reg
;
11475 reject_bad_reg (Rm
);
11476 inst
.instruction
|= Rm
<< 16;
11485 Rd
= inst
.operands
[0].reg
;
11486 Rm
= inst
.operands
[1].reg
;
11488 reject_bad_reg (Rd
);
11489 reject_bad_reg (Rm
);
11491 inst
.instruction
|= Rd
<< 8;
11492 inst
.instruction
|= Rm
<< 16;
11493 inst
.instruction
|= Rm
;
11499 set_it_insn_type (OUTSIDE_IT_INSN
);
11505 set_it_insn_type (OUTSIDE_IT_INSN
);
11506 inst
.instruction
|= inst
.operands
[0].imm
;
11512 set_it_insn_type (OUTSIDE_IT_INSN
);
11514 && (inst
.operands
[1].present
|| inst
.size_req
== 4)
11515 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6_notm
))
11517 unsigned int imod
= (inst
.instruction
& 0x0030) >> 4;
11518 inst
.instruction
= 0xf3af8000;
11519 inst
.instruction
|= imod
<< 9;
11520 inst
.instruction
|= inst
.operands
[0].imm
<< 5;
11521 if (inst
.operands
[1].present
)
11522 inst
.instruction
|= 0x100 | inst
.operands
[1].imm
;
11526 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
)
11527 && (inst
.operands
[0].imm
& 4),
11528 _("selected processor does not support 'A' form "
11529 "of this instruction"));
11530 constraint (inst
.operands
[1].present
|| inst
.size_req
== 4,
11531 _("Thumb does not support the 2-argument "
11532 "form of this instruction"));
11533 inst
.instruction
|= inst
.operands
[0].imm
;
11537 /* THUMB CPY instruction (argument parse). */
11542 if (inst
.size_req
== 4)
11544 inst
.instruction
= THUMB_OP32 (T_MNEM_mov
);
11545 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11546 inst
.instruction
|= inst
.operands
[1].reg
;
11550 inst
.instruction
|= (inst
.operands
[0].reg
& 0x8) << 4;
11551 inst
.instruction
|= (inst
.operands
[0].reg
& 0x7);
11552 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11559 set_it_insn_type (OUTSIDE_IT_INSN
);
11560 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11561 inst
.instruction
|= inst
.operands
[0].reg
;
11562 inst
.relocs
[0].pc_rel
= 1;
11563 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH7
;
11569 inst
.instruction
|= inst
.operands
[0].imm
;
11575 unsigned Rd
, Rn
, Rm
;
11577 Rd
= inst
.operands
[0].reg
;
11578 Rn
= (inst
.operands
[1].present
11579 ? inst
.operands
[1].reg
: Rd
);
11580 Rm
= inst
.operands
[2].reg
;
11582 reject_bad_reg (Rd
);
11583 reject_bad_reg (Rn
);
11584 reject_bad_reg (Rm
);
11586 inst
.instruction
|= Rd
<< 8;
11587 inst
.instruction
|= Rn
<< 16;
11588 inst
.instruction
|= Rm
;
11594 if (unified_syntax
&& inst
.size_req
== 4)
11595 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11597 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11603 unsigned int cond
= inst
.operands
[0].imm
;
11605 set_it_insn_type (IT_INSN
);
11606 now_it
.mask
= (inst
.instruction
& 0xf) | 0x10;
11608 now_it
.warn_deprecated
= FALSE
;
11610 /* If the condition is a negative condition, invert the mask. */
11611 if ((cond
& 0x1) == 0x0)
11613 unsigned int mask
= inst
.instruction
& 0x000f;
11615 if ((mask
& 0x7) == 0)
11617 /* No conversion needed. */
11618 now_it
.block_length
= 1;
11620 else if ((mask
& 0x3) == 0)
11623 now_it
.block_length
= 2;
11625 else if ((mask
& 0x1) == 0)
11628 now_it
.block_length
= 3;
11633 now_it
.block_length
= 4;
11636 inst
.instruction
&= 0xfff0;
11637 inst
.instruction
|= mask
;
11640 inst
.instruction
|= cond
<< 4;
11643 /* Helper function used for both push/pop and ldm/stm. */
11645 encode_thumb2_multi (bfd_boolean do_io
, int base
, unsigned mask
,
11646 bfd_boolean writeback
)
11648 bfd_boolean load
, store
;
11650 gas_assert (base
!= -1 || !do_io
);
11651 load
= do_io
&& ((inst
.instruction
& (1 << 20)) != 0);
11652 store
= do_io
&& !load
;
11654 if (mask
& (1 << 13))
11655 inst
.error
= _("SP not allowed in register list");
11657 if (do_io
&& (mask
& (1 << base
)) != 0
11659 inst
.error
= _("having the base register in the register list when "
11660 "using write back is UNPREDICTABLE");
11664 if (mask
& (1 << 15))
11666 if (mask
& (1 << 14))
11667 inst
.error
= _("LR and PC should not both be in register list");
11669 set_it_insn_type_last ();
11674 if (mask
& (1 << 15))
11675 inst
.error
= _("PC not allowed in register list");
11678 if (do_io
&& ((mask
& (mask
- 1)) == 0))
11680 /* Single register transfers implemented as str/ldr. */
11683 if (inst
.instruction
& (1 << 23))
11684 inst
.instruction
= 0x00000b04; /* ia! -> [base], #4 */
11686 inst
.instruction
= 0x00000d04; /* db! -> [base, #-4]! */
11690 if (inst
.instruction
& (1 << 23))
11691 inst
.instruction
= 0x00800000; /* ia -> [base] */
11693 inst
.instruction
= 0x00000c04; /* db -> [base, #-4] */
11696 inst
.instruction
|= 0xf8400000;
11698 inst
.instruction
|= 0x00100000;
11700 mask
= ffs (mask
) - 1;
11703 else if (writeback
)
11704 inst
.instruction
|= WRITE_BACK
;
11706 inst
.instruction
|= mask
;
11708 inst
.instruction
|= base
<< 16;
11714 /* This really doesn't seem worth it. */
11715 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
11716 _("expression too complex"));
11717 constraint (inst
.operands
[1].writeback
,
11718 _("Thumb load/store multiple does not support {reglist}^"));
11720 if (unified_syntax
)
11722 bfd_boolean narrow
;
11726 /* See if we can use a 16-bit instruction. */
11727 if (inst
.instruction
< 0xffff /* not ldmdb/stmdb */
11728 && inst
.size_req
!= 4
11729 && !(inst
.operands
[1].imm
& ~0xff))
11731 mask
= 1 << inst
.operands
[0].reg
;
11733 if (inst
.operands
[0].reg
<= 7)
11735 if (inst
.instruction
== T_MNEM_stmia
11736 ? inst
.operands
[0].writeback
11737 : (inst
.operands
[0].writeback
11738 == !(inst
.operands
[1].imm
& mask
)))
11740 if (inst
.instruction
== T_MNEM_stmia
11741 && (inst
.operands
[1].imm
& mask
)
11742 && (inst
.operands
[1].imm
& (mask
- 1)))
11743 as_warn (_("value stored for r%d is UNKNOWN"),
11744 inst
.operands
[0].reg
);
11746 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11747 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11748 inst
.instruction
|= inst
.operands
[1].imm
;
11751 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11753 /* This means 1 register in reg list one of 3 situations:
11754 1. Instruction is stmia, but without writeback.
11755 2. lmdia without writeback, but with Rn not in
11757 3. ldmia with writeback, but with Rn in reglist.
11758 Case 3 is UNPREDICTABLE behaviour, so we handle
11759 case 1 and 2 which can be converted into a 16-bit
11760 str or ldr. The SP cases are handled below. */
11761 unsigned long opcode
;
11762 /* First, record an error for Case 3. */
11763 if (inst
.operands
[1].imm
& mask
11764 && inst
.operands
[0].writeback
)
11766 _("having the base register in the register list when "
11767 "using write back is UNPREDICTABLE");
11769 opcode
= (inst
.instruction
== T_MNEM_stmia
? T_MNEM_str
11771 inst
.instruction
= THUMB_OP16 (opcode
);
11772 inst
.instruction
|= inst
.operands
[0].reg
<< 3;
11773 inst
.instruction
|= (ffs (inst
.operands
[1].imm
)-1);
11777 else if (inst
.operands
[0] .reg
== REG_SP
)
11779 if (inst
.operands
[0].writeback
)
11782 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11783 ? T_MNEM_push
: T_MNEM_pop
);
11784 inst
.instruction
|= inst
.operands
[1].imm
;
11787 else if ((inst
.operands
[1].imm
& (inst
.operands
[1].imm
-1)) == 0)
11790 THUMB_OP16 (inst
.instruction
== T_MNEM_stmia
11791 ? T_MNEM_str_sp
: T_MNEM_ldr_sp
);
11792 inst
.instruction
|= ((ffs (inst
.operands
[1].imm
)-1) << 8);
11800 if (inst
.instruction
< 0xffff)
11801 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
11803 encode_thumb2_multi (TRUE
/* do_io */, inst
.operands
[0].reg
,
11804 inst
.operands
[1].imm
,
11805 inst
.operands
[0].writeback
);
11810 constraint (inst
.operands
[0].reg
> 7
11811 || (inst
.operands
[1].imm
& ~0xff), BAD_HIREG
);
11812 constraint (inst
.instruction
!= T_MNEM_ldmia
11813 && inst
.instruction
!= T_MNEM_stmia
,
11814 _("Thumb-2 instruction only valid in unified syntax"));
11815 if (inst
.instruction
== T_MNEM_stmia
)
11817 if (!inst
.operands
[0].writeback
)
11818 as_warn (_("this instruction will write back the base register"));
11819 if ((inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
))
11820 && (inst
.operands
[1].imm
& ((1 << inst
.operands
[0].reg
) - 1)))
11821 as_warn (_("value stored for r%d is UNKNOWN"),
11822 inst
.operands
[0].reg
);
11826 if (!inst
.operands
[0].writeback
11827 && !(inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11828 as_warn (_("this instruction will write back the base register"));
11829 else if (inst
.operands
[0].writeback
11830 && (inst
.operands
[1].imm
& (1 << inst
.operands
[0].reg
)))
11831 as_warn (_("this instruction will not write back the base register"));
11834 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11835 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
11836 inst
.instruction
|= inst
.operands
[1].imm
;
11843 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].preind
11844 || inst
.operands
[1].postind
|| inst
.operands
[1].writeback
11845 || inst
.operands
[1].immisreg
|| inst
.operands
[1].shifted
11846 || inst
.operands
[1].negative
,
11849 constraint ((inst
.operands
[1].reg
== REG_PC
), BAD_PC
);
11851 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11852 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
11853 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
11859 if (!inst
.operands
[1].present
)
11861 constraint (inst
.operands
[0].reg
== REG_LR
,
11862 _("r14 not allowed as first register "
11863 "when second register is omitted"));
11864 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
11866 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11869 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11870 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
11871 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
11877 unsigned long opcode
;
11880 if (inst
.operands
[0].isreg
11881 && !inst
.operands
[0].preind
11882 && inst
.operands
[0].reg
== REG_PC
)
11883 set_it_insn_type_last ();
11885 opcode
= inst
.instruction
;
11886 if (unified_syntax
)
11888 if (!inst
.operands
[1].isreg
)
11890 if (opcode
<= 0xffff)
11891 inst
.instruction
= THUMB_OP32 (opcode
);
11892 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
11895 if (inst
.operands
[1].isreg
11896 && !inst
.operands
[1].writeback
11897 && !inst
.operands
[1].shifted
&& !inst
.operands
[1].postind
11898 && !inst
.operands
[1].negative
&& inst
.operands
[0].reg
<= 7
11899 && opcode
<= 0xffff
11900 && inst
.size_req
!= 4)
11902 /* Insn may have a 16-bit form. */
11903 Rn
= inst
.operands
[1].reg
;
11904 if (inst
.operands
[1].immisreg
)
11906 inst
.instruction
= THUMB_OP16 (opcode
);
11908 if (Rn
<= 7 && inst
.operands
[1].imm
<= 7)
11910 else if (opcode
!= T_MNEM_ldr
&& opcode
!= T_MNEM_str
)
11911 reject_bad_reg (inst
.operands
[1].imm
);
11913 else if ((Rn
<= 7 && opcode
!= T_MNEM_ldrsh
11914 && opcode
!= T_MNEM_ldrsb
)
11915 || ((Rn
== REG_PC
|| Rn
== REG_SP
) && opcode
== T_MNEM_ldr
)
11916 || (Rn
== REG_SP
&& opcode
== T_MNEM_str
))
11923 if (inst
.relocs
[0].pc_rel
)
11924 opcode
= T_MNEM_ldr_pc2
;
11926 opcode
= T_MNEM_ldr_pc
;
11930 if (opcode
== T_MNEM_ldr
)
11931 opcode
= T_MNEM_ldr_sp
;
11933 opcode
= T_MNEM_str_sp
;
11935 inst
.instruction
= inst
.operands
[0].reg
<< 8;
11939 inst
.instruction
= inst
.operands
[0].reg
;
11940 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
11942 inst
.instruction
|= THUMB_OP16 (opcode
);
11943 if (inst
.size_req
== 2)
11944 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
11946 inst
.relax
= opcode
;
11950 /* Definitely a 32-bit variant. */
11952 /* Warning for Erratum 752419. */
11953 if (opcode
== T_MNEM_ldr
11954 && inst
.operands
[0].reg
== REG_SP
11955 && inst
.operands
[1].writeback
== 1
11956 && !inst
.operands
[1].immisreg
)
11958 if (no_cpu_selected ()
11959 || (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7
)
11960 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
)
11961 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7r
)))
11962 as_warn (_("This instruction may be unpredictable "
11963 "if executed on M-profile cores "
11964 "with interrupts enabled."));
11967 /* Do some validations regarding addressing modes. */
11968 if (inst
.operands
[1].immisreg
)
11969 reject_bad_reg (inst
.operands
[1].imm
);
11971 constraint (inst
.operands
[1].writeback
== 1
11972 && inst
.operands
[0].reg
== inst
.operands
[1].reg
,
11975 inst
.instruction
= THUMB_OP32 (opcode
);
11976 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
11977 encode_thumb32_addr_mode (1, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
11978 check_ldr_r15_aligned ();
11982 constraint (inst
.operands
[0].reg
> 7, BAD_HIREG
);
11984 if (inst
.instruction
== T_MNEM_ldrsh
|| inst
.instruction
== T_MNEM_ldrsb
)
11986 /* Only [Rn,Rm] is acceptable. */
11987 constraint (inst
.operands
[1].reg
> 7 || inst
.operands
[1].imm
> 7, BAD_HIREG
);
11988 constraint (!inst
.operands
[1].isreg
|| !inst
.operands
[1].immisreg
11989 || inst
.operands
[1].postind
|| inst
.operands
[1].shifted
11990 || inst
.operands
[1].negative
,
11991 _("Thumb does not support this addressing mode"));
11992 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11996 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
11997 if (!inst
.operands
[1].isreg
)
11998 if (move_or_literal_pool (0, CONST_THUMB
, /*mode_3=*/FALSE
))
12001 constraint (!inst
.operands
[1].preind
12002 || inst
.operands
[1].shifted
12003 || inst
.operands
[1].writeback
,
12004 _("Thumb does not support this addressing mode"));
12005 if (inst
.operands
[1].reg
== REG_PC
|| inst
.operands
[1].reg
== REG_SP
)
12007 constraint (inst
.instruction
& 0x0600,
12008 _("byte or halfword not valid for base register"));
12009 constraint (inst
.operands
[1].reg
== REG_PC
12010 && !(inst
.instruction
& THUMB_LOAD_BIT
),
12011 _("r15 based store not allowed"));
12012 constraint (inst
.operands
[1].immisreg
,
12013 _("invalid base register for register offset"));
12015 if (inst
.operands
[1].reg
== REG_PC
)
12016 inst
.instruction
= T_OPCODE_LDR_PC
;
12017 else if (inst
.instruction
& THUMB_LOAD_BIT
)
12018 inst
.instruction
= T_OPCODE_LDR_SP
;
12020 inst
.instruction
= T_OPCODE_STR_SP
;
12022 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12023 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12027 constraint (inst
.operands
[1].reg
> 7, BAD_HIREG
);
12028 if (!inst
.operands
[1].immisreg
)
12030 /* Immediate offset. */
12031 inst
.instruction
|= inst
.operands
[0].reg
;
12032 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12033 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_OFFSET
;
12037 /* Register offset. */
12038 constraint (inst
.operands
[1].imm
> 7, BAD_HIREG
);
12039 constraint (inst
.operands
[1].negative
,
12040 _("Thumb does not support this addressing mode"));
12043 switch (inst
.instruction
)
12045 case T_OPCODE_STR_IW
: inst
.instruction
= T_OPCODE_STR_RW
; break;
12046 case T_OPCODE_STR_IH
: inst
.instruction
= T_OPCODE_STR_RH
; break;
12047 case T_OPCODE_STR_IB
: inst
.instruction
= T_OPCODE_STR_RB
; break;
12048 case T_OPCODE_LDR_IW
: inst
.instruction
= T_OPCODE_LDR_RW
; break;
12049 case T_OPCODE_LDR_IH
: inst
.instruction
= T_OPCODE_LDR_RH
; break;
12050 case T_OPCODE_LDR_IB
: inst
.instruction
= T_OPCODE_LDR_RB
; break;
12051 case 0x5600 /* ldrsb */:
12052 case 0x5e00 /* ldrsh */: break;
12056 inst
.instruction
|= inst
.operands
[0].reg
;
12057 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12058 inst
.instruction
|= inst
.operands
[1].imm
<< 6;
12064 if (!inst
.operands
[1].present
)
12066 inst
.operands
[1].reg
= inst
.operands
[0].reg
+ 1;
12067 constraint (inst
.operands
[0].reg
== REG_LR
,
12068 _("r14 not allowed here"));
12069 constraint (inst
.operands
[0].reg
== REG_R12
,
12070 _("r12 not allowed here"));
12073 if (inst
.operands
[2].writeback
12074 && (inst
.operands
[0].reg
== inst
.operands
[2].reg
12075 || inst
.operands
[1].reg
== inst
.operands
[2].reg
))
12076 as_warn (_("base register written back, and overlaps "
12077 "one of transfer registers"));
12079 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12080 inst
.instruction
|= inst
.operands
[1].reg
<< 8;
12081 encode_thumb32_addr_mode (2, /*is_t=*/FALSE
, /*is_d=*/TRUE
);
12087 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
12088 encode_thumb32_addr_mode (1, /*is_t=*/TRUE
, /*is_d=*/FALSE
);
12094 unsigned Rd
, Rn
, Rm
, Ra
;
12096 Rd
= inst
.operands
[0].reg
;
12097 Rn
= inst
.operands
[1].reg
;
12098 Rm
= inst
.operands
[2].reg
;
12099 Ra
= inst
.operands
[3].reg
;
12101 reject_bad_reg (Rd
);
12102 reject_bad_reg (Rn
);
12103 reject_bad_reg (Rm
);
12104 reject_bad_reg (Ra
);
12106 inst
.instruction
|= Rd
<< 8;
12107 inst
.instruction
|= Rn
<< 16;
12108 inst
.instruction
|= Rm
;
12109 inst
.instruction
|= Ra
<< 12;
12115 unsigned RdLo
, RdHi
, Rn
, Rm
;
12117 RdLo
= inst
.operands
[0].reg
;
12118 RdHi
= inst
.operands
[1].reg
;
12119 Rn
= inst
.operands
[2].reg
;
12120 Rm
= inst
.operands
[3].reg
;
12122 reject_bad_reg (RdLo
);
12123 reject_bad_reg (RdHi
);
12124 reject_bad_reg (Rn
);
12125 reject_bad_reg (Rm
);
12127 inst
.instruction
|= RdLo
<< 12;
12128 inst
.instruction
|= RdHi
<< 8;
12129 inst
.instruction
|= Rn
<< 16;
12130 inst
.instruction
|= Rm
;
12134 do_t_mov_cmp (void)
12138 Rn
= inst
.operands
[0].reg
;
12139 Rm
= inst
.operands
[1].reg
;
12142 set_it_insn_type_last ();
12144 if (unified_syntax
)
12146 int r0off
= (inst
.instruction
== T_MNEM_mov
12147 || inst
.instruction
== T_MNEM_movs
) ? 8 : 16;
12148 unsigned long opcode
;
12149 bfd_boolean narrow
;
12150 bfd_boolean low_regs
;
12152 low_regs
= (Rn
<= 7 && Rm
<= 7);
12153 opcode
= inst
.instruction
;
12154 if (in_it_block ())
12155 narrow
= opcode
!= T_MNEM_movs
;
12157 narrow
= opcode
!= T_MNEM_movs
|| low_regs
;
12158 if (inst
.size_req
== 4
12159 || inst
.operands
[1].shifted
)
12162 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
12163 if (opcode
== T_MNEM_movs
&& inst
.operands
[1].isreg
12164 && !inst
.operands
[1].shifted
12168 inst
.instruction
= T2_SUBS_PC_LR
;
12172 if (opcode
== T_MNEM_cmp
)
12174 constraint (Rn
== REG_PC
, BAD_PC
);
12177 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
12179 warn_deprecated_sp (Rm
);
12180 /* R15 was documented as a valid choice for Rm in ARMv6,
12181 but as UNPREDICTABLE in ARMv7. ARM's proprietary
12182 tools reject R15, so we do too. */
12183 constraint (Rm
== REG_PC
, BAD_PC
);
12186 reject_bad_reg (Rm
);
12188 else if (opcode
== T_MNEM_mov
12189 || opcode
== T_MNEM_movs
)
12191 if (inst
.operands
[1].isreg
)
12193 if (opcode
== T_MNEM_movs
)
12195 reject_bad_reg (Rn
);
12196 reject_bad_reg (Rm
);
12200 /* This is mov.n. */
12201 if ((Rn
== REG_SP
|| Rn
== REG_PC
)
12202 && (Rm
== REG_SP
|| Rm
== REG_PC
))
12204 as_tsktsk (_("Use of r%u as a source register is "
12205 "deprecated when r%u is the destination "
12206 "register."), Rm
, Rn
);
12211 /* This is mov.w. */
12212 constraint (Rn
== REG_PC
, BAD_PC
);
12213 constraint (Rm
== REG_PC
, BAD_PC
);
12214 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
12215 constraint (Rn
== REG_SP
&& Rm
== REG_SP
, BAD_SP
);
12219 reject_bad_reg (Rn
);
12222 if (!inst
.operands
[1].isreg
)
12224 /* Immediate operand. */
12225 if (!in_it_block () && opcode
== T_MNEM_mov
)
12227 if (low_regs
&& narrow
)
12229 inst
.instruction
= THUMB_OP16 (opcode
);
12230 inst
.instruction
|= Rn
<< 8;
12231 if (inst
.relocs
[0].type
< BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12232 || inst
.relocs
[0].type
> BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
12234 if (inst
.size_req
== 2)
12235 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12237 inst
.relax
= opcode
;
12242 constraint ((inst
.relocs
[0].type
12243 >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
)
12244 && (inst
.relocs
[0].type
12245 <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
) ,
12246 THUMB1_RELOC_ONLY
);
12248 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12249 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12250 inst
.instruction
|= Rn
<< r0off
;
12251 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12254 else if (inst
.operands
[1].shifted
&& inst
.operands
[1].immisreg
12255 && (inst
.instruction
== T_MNEM_mov
12256 || inst
.instruction
== T_MNEM_movs
))
12258 /* Register shifts are encoded as separate shift instructions. */
12259 bfd_boolean flags
= (inst
.instruction
== T_MNEM_movs
);
12261 if (in_it_block ())
12266 if (inst
.size_req
== 4)
12269 if (!low_regs
|| inst
.operands
[1].imm
> 7)
12275 switch (inst
.operands
[1].shift_kind
)
12278 opcode
= narrow
? T_OPCODE_LSL_R
: THUMB_OP32 (T_MNEM_lsl
);
12281 opcode
= narrow
? T_OPCODE_ASR_R
: THUMB_OP32 (T_MNEM_asr
);
12284 opcode
= narrow
? T_OPCODE_LSR_R
: THUMB_OP32 (T_MNEM_lsr
);
12287 opcode
= narrow
? T_OPCODE_ROR_R
: THUMB_OP32 (T_MNEM_ror
);
12293 inst
.instruction
= opcode
;
12296 inst
.instruction
|= Rn
;
12297 inst
.instruction
|= inst
.operands
[1].imm
<< 3;
12302 inst
.instruction
|= CONDS_BIT
;
12304 inst
.instruction
|= Rn
<< 8;
12305 inst
.instruction
|= Rm
<< 16;
12306 inst
.instruction
|= inst
.operands
[1].imm
;
12311 /* Some mov with immediate shift have narrow variants.
12312 Register shifts are handled above. */
12313 if (low_regs
&& inst
.operands
[1].shifted
12314 && (inst
.instruction
== T_MNEM_mov
12315 || inst
.instruction
== T_MNEM_movs
))
12317 if (in_it_block ())
12318 narrow
= (inst
.instruction
== T_MNEM_mov
);
12320 narrow
= (inst
.instruction
== T_MNEM_movs
);
12325 switch (inst
.operands
[1].shift_kind
)
12327 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
12328 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
12329 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
12330 default: narrow
= FALSE
; break;
12336 inst
.instruction
|= Rn
;
12337 inst
.instruction
|= Rm
<< 3;
12338 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
12342 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12343 inst
.instruction
|= Rn
<< r0off
;
12344 encode_thumb32_shifted_operand (1);
12348 switch (inst
.instruction
)
12351 /* In v4t or v5t a move of two lowregs produces unpredictable
12352 results. Don't allow this. */
12355 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6
),
12356 "MOV Rd, Rs with two low registers is not "
12357 "permitted on this architecture");
12358 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
12362 inst
.instruction
= T_OPCODE_MOV_HR
;
12363 inst
.instruction
|= (Rn
& 0x8) << 4;
12364 inst
.instruction
|= (Rn
& 0x7);
12365 inst
.instruction
|= Rm
<< 3;
12369 /* We know we have low registers at this point.
12370 Generate LSLS Rd, Rs, #0. */
12371 inst
.instruction
= T_OPCODE_LSL_I
;
12372 inst
.instruction
|= Rn
;
12373 inst
.instruction
|= Rm
<< 3;
12379 inst
.instruction
= T_OPCODE_CMP_LR
;
12380 inst
.instruction
|= Rn
;
12381 inst
.instruction
|= Rm
<< 3;
12385 inst
.instruction
= T_OPCODE_CMP_HR
;
12386 inst
.instruction
|= (Rn
& 0x8) << 4;
12387 inst
.instruction
|= (Rn
& 0x7);
12388 inst
.instruction
|= Rm
<< 3;
12395 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12397 /* PR 10443: Do not silently ignore shifted operands. */
12398 constraint (inst
.operands
[1].shifted
,
12399 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12401 if (inst
.operands
[1].isreg
)
12403 if (Rn
< 8 && Rm
< 8)
12405 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12406 since a MOV instruction produces unpredictable results. */
12407 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12408 inst
.instruction
= T_OPCODE_ADD_I3
;
12410 inst
.instruction
= T_OPCODE_CMP_LR
;
12412 inst
.instruction
|= Rn
;
12413 inst
.instruction
|= Rm
<< 3;
12417 if (inst
.instruction
== T_OPCODE_MOV_I8
)
12418 inst
.instruction
= T_OPCODE_MOV_HR
;
12420 inst
.instruction
= T_OPCODE_CMP_HR
;
12426 constraint (Rn
> 7,
12427 _("only lo regs allowed with immediate"));
12428 inst
.instruction
|= Rn
<< 8;
12429 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_IMM
;
12440 top
= (inst
.instruction
& 0x00800000) != 0;
12441 if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVW
)
12443 constraint (top
, _(":lower16: not allowed in this instruction"));
12444 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVW
;
12446 else if (inst
.relocs
[0].type
== BFD_RELOC_ARM_MOVT
)
12448 constraint (!top
, _(":upper16: not allowed in this instruction"));
12449 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_MOVT
;
12452 Rd
= inst
.operands
[0].reg
;
12453 reject_bad_reg (Rd
);
12455 inst
.instruction
|= Rd
<< 8;
12456 if (inst
.relocs
[0].type
== BFD_RELOC_UNUSED
)
12458 imm
= inst
.relocs
[0].exp
.X_add_number
;
12459 inst
.instruction
|= (imm
& 0xf000) << 4;
12460 inst
.instruction
|= (imm
& 0x0800) << 15;
12461 inst
.instruction
|= (imm
& 0x0700) << 4;
12462 inst
.instruction
|= (imm
& 0x00ff);
12467 do_t_mvn_tst (void)
12471 Rn
= inst
.operands
[0].reg
;
12472 Rm
= inst
.operands
[1].reg
;
12474 if (inst
.instruction
== T_MNEM_cmp
12475 || inst
.instruction
== T_MNEM_cmn
)
12476 constraint (Rn
== REG_PC
, BAD_PC
);
12478 reject_bad_reg (Rn
);
12479 reject_bad_reg (Rm
);
12481 if (unified_syntax
)
12483 int r0off
= (inst
.instruction
== T_MNEM_mvn
12484 || inst
.instruction
== T_MNEM_mvns
) ? 8 : 16;
12485 bfd_boolean narrow
;
12487 if (inst
.size_req
== 4
12488 || inst
.instruction
> 0xffff
12489 || inst
.operands
[1].shifted
12490 || Rn
> 7 || Rm
> 7)
12492 else if (inst
.instruction
== T_MNEM_cmn
12493 || inst
.instruction
== T_MNEM_tst
)
12495 else if (THUMB_SETS_FLAGS (inst
.instruction
))
12496 narrow
= !in_it_block ();
12498 narrow
= in_it_block ();
12500 if (!inst
.operands
[1].isreg
)
12502 /* For an immediate, we always generate a 32-bit opcode;
12503 section relaxation will shrink it later if possible. */
12504 if (inst
.instruction
< 0xffff)
12505 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12506 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12507 inst
.instruction
|= Rn
<< r0off
;
12508 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12512 /* See if we can do this with a 16-bit instruction. */
12515 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12516 inst
.instruction
|= Rn
;
12517 inst
.instruction
|= Rm
<< 3;
12521 constraint (inst
.operands
[1].shifted
12522 && inst
.operands
[1].immisreg
,
12523 _("shift must be constant"));
12524 if (inst
.instruction
< 0xffff)
12525 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12526 inst
.instruction
|= Rn
<< r0off
;
12527 encode_thumb32_shifted_operand (1);
12533 constraint (inst
.instruction
> 0xffff
12534 || inst
.instruction
== T_MNEM_mvns
, BAD_THUMB32
);
12535 constraint (!inst
.operands
[1].isreg
|| inst
.operands
[1].shifted
,
12536 _("unshifted register required"));
12537 constraint (Rn
> 7 || Rm
> 7,
12540 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12541 inst
.instruction
|= Rn
;
12542 inst
.instruction
|= Rm
<< 3;
12551 if (do_vfp_nsyn_mrs () == SUCCESS
)
12554 Rd
= inst
.operands
[0].reg
;
12555 reject_bad_reg (Rd
);
12556 inst
.instruction
|= Rd
<< 8;
12558 if (inst
.operands
[1].isreg
)
12560 unsigned br
= inst
.operands
[1].reg
;
12561 if (((br
& 0x200) == 0) && ((br
& 0xf000) != 0xf000))
12562 as_bad (_("bad register for mrs"));
12564 inst
.instruction
|= br
& (0xf << 16);
12565 inst
.instruction
|= (br
& 0x300) >> 4;
12566 inst
.instruction
|= (br
& SPSR_BIT
) >> 2;
12570 int flags
= inst
.operands
[1].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12572 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12574 /* PR gas/12698: The constraint is only applied for m_profile.
12575 If the user has specified -march=all, we want to ignore it as
12576 we are building for any CPU type, including non-m variants. */
12577 bfd_boolean m_profile
=
12578 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12579 constraint ((flags
!= 0) && m_profile
, _("selected processor does "
12580 "not support requested special purpose register"));
12583 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12585 constraint ((flags
& ~SPSR_BIT
) != (PSR_c
|PSR_f
),
12586 _("'APSR', 'CPSR' or 'SPSR' expected"));
12588 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12589 inst
.instruction
|= inst
.operands
[1].imm
& 0xff;
12590 inst
.instruction
|= 0xf0000;
12600 if (do_vfp_nsyn_msr () == SUCCESS
)
12603 constraint (!inst
.operands
[1].isreg
,
12604 _("Thumb encoding does not support an immediate here"));
12606 if (inst
.operands
[0].isreg
)
12607 flags
= (int)(inst
.operands
[0].reg
);
12609 flags
= inst
.operands
[0].imm
;
12611 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_m
))
12613 int bits
= inst
.operands
[0].imm
& (PSR_c
|PSR_x
|PSR_s
|PSR_f
|SPSR_BIT
);
12615 /* PR gas/12698: The constraint is only applied for m_profile.
12616 If the user has specified -march=all, we want to ignore it as
12617 we are building for any CPU type, including non-m variants. */
12618 bfd_boolean m_profile
=
12619 !ARM_FEATURE_CORE_EQUAL (selected_cpu
, arm_arch_any
);
12620 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12621 && (bits
& ~(PSR_s
| PSR_f
)) != 0)
12622 || (!ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6_dsp
)
12623 && bits
!= PSR_f
)) && m_profile
,
12624 _("selected processor does not support requested special "
12625 "purpose register"));
12628 constraint ((flags
& 0xff) != 0, _("selected processor does not support "
12629 "requested special purpose register"));
12631 Rn
= inst
.operands
[1].reg
;
12632 reject_bad_reg (Rn
);
12634 inst
.instruction
|= (flags
& SPSR_BIT
) >> 2;
12635 inst
.instruction
|= (flags
& 0xf0000) >> 8;
12636 inst
.instruction
|= (flags
& 0x300) >> 4;
12637 inst
.instruction
|= (flags
& 0xff);
12638 inst
.instruction
|= Rn
<< 16;
12644 bfd_boolean narrow
;
12645 unsigned Rd
, Rn
, Rm
;
12647 if (!inst
.operands
[2].present
)
12648 inst
.operands
[2].reg
= inst
.operands
[0].reg
;
12650 Rd
= inst
.operands
[0].reg
;
12651 Rn
= inst
.operands
[1].reg
;
12652 Rm
= inst
.operands
[2].reg
;
12654 if (unified_syntax
)
12656 if (inst
.size_req
== 4
12662 else if (inst
.instruction
== T_MNEM_muls
)
12663 narrow
= !in_it_block ();
12665 narrow
= in_it_block ();
12669 constraint (inst
.instruction
== T_MNEM_muls
, BAD_THUMB32
);
12670 constraint (Rn
> 7 || Rm
> 7,
12677 /* 16-bit MULS/Conditional MUL. */
12678 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12679 inst
.instruction
|= Rd
;
12682 inst
.instruction
|= Rm
<< 3;
12684 inst
.instruction
|= Rn
<< 3;
12686 constraint (1, _("dest must overlap one source register"));
12690 constraint (inst
.instruction
!= T_MNEM_mul
,
12691 _("Thumb-2 MUL must not set flags"));
12693 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12694 inst
.instruction
|= Rd
<< 8;
12695 inst
.instruction
|= Rn
<< 16;
12696 inst
.instruction
|= Rm
<< 0;
12698 reject_bad_reg (Rd
);
12699 reject_bad_reg (Rn
);
12700 reject_bad_reg (Rm
);
12707 unsigned RdLo
, RdHi
, Rn
, Rm
;
12709 RdLo
= inst
.operands
[0].reg
;
12710 RdHi
= inst
.operands
[1].reg
;
12711 Rn
= inst
.operands
[2].reg
;
12712 Rm
= inst
.operands
[3].reg
;
12714 reject_bad_reg (RdLo
);
12715 reject_bad_reg (RdHi
);
12716 reject_bad_reg (Rn
);
12717 reject_bad_reg (Rm
);
12719 inst
.instruction
|= RdLo
<< 12;
12720 inst
.instruction
|= RdHi
<< 8;
12721 inst
.instruction
|= Rn
<< 16;
12722 inst
.instruction
|= Rm
;
12725 as_tsktsk (_("rdhi and rdlo must be different"));
12731 set_it_insn_type (NEUTRAL_IT_INSN
);
12733 if (unified_syntax
)
12735 if (inst
.size_req
== 4 || inst
.operands
[0].imm
> 15)
12737 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12738 inst
.instruction
|= inst
.operands
[0].imm
;
12742 /* PR9722: Check for Thumb2 availability before
12743 generating a thumb2 nop instruction. */
12744 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v6t2
))
12746 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12747 inst
.instruction
|= inst
.operands
[0].imm
<< 4;
12750 inst
.instruction
= 0x46c0;
12755 constraint (inst
.operands
[0].present
,
12756 _("Thumb does not support NOP with hints"));
12757 inst
.instruction
= 0x46c0;
12764 if (unified_syntax
)
12766 bfd_boolean narrow
;
12768 if (THUMB_SETS_FLAGS (inst
.instruction
))
12769 narrow
= !in_it_block ();
12771 narrow
= in_it_block ();
12772 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
12774 if (inst
.size_req
== 4)
12779 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12780 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
12781 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
12785 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12786 inst
.instruction
|= inst
.operands
[0].reg
;
12787 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12792 constraint (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7,
12794 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
12796 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12797 inst
.instruction
|= inst
.operands
[0].reg
;
12798 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
12807 Rd
= inst
.operands
[0].reg
;
12808 Rn
= inst
.operands
[1].present
? inst
.operands
[1].reg
: Rd
;
12810 reject_bad_reg (Rd
);
12811 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12812 reject_bad_reg (Rn
);
12814 inst
.instruction
|= Rd
<< 8;
12815 inst
.instruction
|= Rn
<< 16;
12817 if (!inst
.operands
[2].isreg
)
12819 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
12820 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
12826 Rm
= inst
.operands
[2].reg
;
12827 reject_bad_reg (Rm
);
12829 constraint (inst
.operands
[2].shifted
12830 && inst
.operands
[2].immisreg
,
12831 _("shift must be constant"));
12832 encode_thumb32_shifted_operand (2);
12839 unsigned Rd
, Rn
, Rm
;
12841 Rd
= inst
.operands
[0].reg
;
12842 Rn
= inst
.operands
[1].reg
;
12843 Rm
= inst
.operands
[2].reg
;
12845 reject_bad_reg (Rd
);
12846 reject_bad_reg (Rn
);
12847 reject_bad_reg (Rm
);
12849 inst
.instruction
|= Rd
<< 8;
12850 inst
.instruction
|= Rn
<< 16;
12851 inst
.instruction
|= Rm
;
12852 if (inst
.operands
[3].present
)
12854 unsigned int val
= inst
.relocs
[0].exp
.X_add_number
;
12855 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
12856 _("expression too complex"));
12857 inst
.instruction
|= (val
& 0x1c) << 10;
12858 inst
.instruction
|= (val
& 0x03) << 6;
12865 if (!inst
.operands
[3].present
)
12869 inst
.instruction
&= ~0x00000020;
12871 /* PR 10168. Swap the Rm and Rn registers. */
12872 Rtmp
= inst
.operands
[1].reg
;
12873 inst
.operands
[1].reg
= inst
.operands
[2].reg
;
12874 inst
.operands
[2].reg
= Rtmp
;
12882 if (inst
.operands
[0].immisreg
)
12883 reject_bad_reg (inst
.operands
[0].imm
);
12885 encode_thumb32_addr_mode (0, /*is_t=*/FALSE
, /*is_d=*/FALSE
);
12889 do_t_push_pop (void)
12893 constraint (inst
.operands
[0].writeback
,
12894 _("push/pop do not support {reglist}^"));
12895 constraint (inst
.relocs
[0].type
!= BFD_RELOC_UNUSED
,
12896 _("expression too complex"));
12898 mask
= inst
.operands
[0].imm
;
12899 if (inst
.size_req
!= 4 && (mask
& ~0xff) == 0)
12900 inst
.instruction
= THUMB_OP16 (inst
.instruction
) | mask
;
12901 else if (inst
.size_req
!= 4
12902 && (mask
& ~0xff) == (1U << (inst
.instruction
== T_MNEM_push
12903 ? REG_LR
: REG_PC
)))
12905 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12906 inst
.instruction
|= THUMB_PP_PC_LR
;
12907 inst
.instruction
|= mask
& 0xff;
12909 else if (unified_syntax
)
12911 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12912 encode_thumb2_multi (TRUE
/* do_io */, 13, mask
, TRUE
);
12916 inst
.error
= _("invalid register list to push/pop instruction");
12924 if (unified_syntax
)
12925 encode_thumb2_multi (FALSE
/* do_io */, -1, inst
.operands
[0].imm
, FALSE
);
12928 inst
.error
= _("invalid register list to push/pop instruction");
12934 do_t_vscclrm (void)
12936 if (inst
.operands
[0].issingle
)
12938 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1) << 22;
12939 inst
.instruction
|= (inst
.operands
[0].reg
& 0x1e) << 11;
12940 inst
.instruction
|= inst
.operands
[0].imm
;
12944 inst
.instruction
|= (inst
.operands
[0].reg
& 0x10) << 18;
12945 inst
.instruction
|= (inst
.operands
[0].reg
& 0xf) << 12;
12946 inst
.instruction
|= 1 << 8;
12947 inst
.instruction
|= inst
.operands
[0].imm
<< 1;
12956 Rd
= inst
.operands
[0].reg
;
12957 Rm
= inst
.operands
[1].reg
;
12959 reject_bad_reg (Rd
);
12960 reject_bad_reg (Rm
);
12962 inst
.instruction
|= Rd
<< 8;
12963 inst
.instruction
|= Rm
<< 16;
12964 inst
.instruction
|= Rm
;
12972 Rd
= inst
.operands
[0].reg
;
12973 Rm
= inst
.operands
[1].reg
;
12975 reject_bad_reg (Rd
);
12976 reject_bad_reg (Rm
);
12978 if (Rd
<= 7 && Rm
<= 7
12979 && inst
.size_req
!= 4)
12981 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
12982 inst
.instruction
|= Rd
;
12983 inst
.instruction
|= Rm
<< 3;
12985 else if (unified_syntax
)
12987 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
12988 inst
.instruction
|= Rd
<< 8;
12989 inst
.instruction
|= Rm
<< 16;
12990 inst
.instruction
|= Rm
;
12993 inst
.error
= BAD_HIREG
;
13001 Rd
= inst
.operands
[0].reg
;
13002 Rm
= inst
.operands
[1].reg
;
13004 reject_bad_reg (Rd
);
13005 reject_bad_reg (Rm
);
13007 inst
.instruction
|= Rd
<< 8;
13008 inst
.instruction
|= Rm
;
13016 Rd
= inst
.operands
[0].reg
;
13017 Rs
= (inst
.operands
[1].present
13018 ? inst
.operands
[1].reg
/* Rd, Rs, foo */
13019 : inst
.operands
[0].reg
); /* Rd, foo -> Rd, Rd, foo */
13021 reject_bad_reg (Rd
);
13022 reject_bad_reg (Rs
);
13023 if (inst
.operands
[2].isreg
)
13024 reject_bad_reg (inst
.operands
[2].reg
);
13026 inst
.instruction
|= Rd
<< 8;
13027 inst
.instruction
|= Rs
<< 16;
13028 if (!inst
.operands
[2].isreg
)
13030 bfd_boolean narrow
;
13032 if ((inst
.instruction
& 0x00100000) != 0)
13033 narrow
= !in_it_block ();
13035 narrow
= in_it_block ();
13037 if (Rd
> 7 || Rs
> 7)
13040 if (inst
.size_req
== 4 || !unified_syntax
)
13043 if (inst
.relocs
[0].exp
.X_op
!= O_constant
13044 || inst
.relocs
[0].exp
.X_add_number
!= 0)
13047 /* Turn rsb #0 into 16-bit neg. We should probably do this via
13048 relaxation, but it doesn't seem worth the hassle. */
13051 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13052 inst
.instruction
= THUMB_OP16 (T_MNEM_negs
);
13053 inst
.instruction
|= Rs
<< 3;
13054 inst
.instruction
|= Rd
;
13058 inst
.instruction
= (inst
.instruction
& 0xe1ffffff) | 0x10000000;
13059 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
13063 encode_thumb32_shifted_operand (2);
13069 if (warn_on_deprecated
13070 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13071 as_tsktsk (_("setend use is deprecated for ARMv8"));
13073 set_it_insn_type (OUTSIDE_IT_INSN
);
13074 if (inst
.operands
[0].imm
)
13075 inst
.instruction
|= 0x8;
13081 if (!inst
.operands
[1].present
)
13082 inst
.operands
[1].reg
= inst
.operands
[0].reg
;
13084 if (unified_syntax
)
13086 bfd_boolean narrow
;
13089 switch (inst
.instruction
)
13092 case T_MNEM_asrs
: shift_kind
= SHIFT_ASR
; break;
13094 case T_MNEM_lsls
: shift_kind
= SHIFT_LSL
; break;
13096 case T_MNEM_lsrs
: shift_kind
= SHIFT_LSR
; break;
13098 case T_MNEM_rors
: shift_kind
= SHIFT_ROR
; break;
13102 if (THUMB_SETS_FLAGS (inst
.instruction
))
13103 narrow
= !in_it_block ();
13105 narrow
= in_it_block ();
13106 if (inst
.operands
[0].reg
> 7 || inst
.operands
[1].reg
> 7)
13108 if (!inst
.operands
[2].isreg
&& shift_kind
== SHIFT_ROR
)
13110 if (inst
.operands
[2].isreg
13111 && (inst
.operands
[1].reg
!= inst
.operands
[0].reg
13112 || inst
.operands
[2].reg
> 7))
13114 if (inst
.size_req
== 4)
13117 reject_bad_reg (inst
.operands
[0].reg
);
13118 reject_bad_reg (inst
.operands
[1].reg
);
13122 if (inst
.operands
[2].isreg
)
13124 reject_bad_reg (inst
.operands
[2].reg
);
13125 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13126 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13127 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13128 inst
.instruction
|= inst
.operands
[2].reg
;
13130 /* PR 12854: Error on extraneous shifts. */
13131 constraint (inst
.operands
[2].shifted
,
13132 _("extraneous shift as part of operand to shift insn"));
13136 inst
.operands
[1].shifted
= 1;
13137 inst
.operands
[1].shift_kind
= shift_kind
;
13138 inst
.instruction
= THUMB_OP32 (THUMB_SETS_FLAGS (inst
.instruction
)
13139 ? T_MNEM_movs
: T_MNEM_mov
);
13140 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13141 encode_thumb32_shifted_operand (1);
13142 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
13143 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13148 if (inst
.operands
[2].isreg
)
13150 switch (shift_kind
)
13152 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13153 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13154 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13155 case SHIFT_ROR
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13159 inst
.instruction
|= inst
.operands
[0].reg
;
13160 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13162 /* PR 12854: Error on extraneous shifts. */
13163 constraint (inst
.operands
[2].shifted
,
13164 _("extraneous shift as part of operand to shift insn"));
13168 switch (shift_kind
)
13170 case SHIFT_ASR
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13171 case SHIFT_LSL
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13172 case SHIFT_LSR
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13175 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13176 inst
.instruction
|= inst
.operands
[0].reg
;
13177 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13183 constraint (inst
.operands
[0].reg
> 7
13184 || inst
.operands
[1].reg
> 7, BAD_HIREG
);
13185 constraint (THUMB_SETS_FLAGS (inst
.instruction
), BAD_THUMB32
);
13187 if (inst
.operands
[2].isreg
) /* Rd, {Rs,} Rn */
13189 constraint (inst
.operands
[2].reg
> 7, BAD_HIREG
);
13190 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
13191 _("source1 and dest must be same register"));
13193 switch (inst
.instruction
)
13195 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_R
; break;
13196 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_R
; break;
13197 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_R
; break;
13198 case T_MNEM_ror
: inst
.instruction
= T_OPCODE_ROR_R
; break;
13202 inst
.instruction
|= inst
.operands
[0].reg
;
13203 inst
.instruction
|= inst
.operands
[2].reg
<< 3;
13205 /* PR 12854: Error on extraneous shifts. */
13206 constraint (inst
.operands
[2].shifted
,
13207 _("extraneous shift as part of operand to shift insn"));
13211 switch (inst
.instruction
)
13213 case T_MNEM_asr
: inst
.instruction
= T_OPCODE_ASR_I
; break;
13214 case T_MNEM_lsl
: inst
.instruction
= T_OPCODE_LSL_I
; break;
13215 case T_MNEM_lsr
: inst
.instruction
= T_OPCODE_LSR_I
; break;
13216 case T_MNEM_ror
: inst
.error
= _("ror #imm not supported"); return;
13219 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_SHIFT
;
13220 inst
.instruction
|= inst
.operands
[0].reg
;
13221 inst
.instruction
|= inst
.operands
[1].reg
<< 3;
13229 unsigned Rd
, Rn
, Rm
;
13231 Rd
= inst
.operands
[0].reg
;
13232 Rn
= inst
.operands
[1].reg
;
13233 Rm
= inst
.operands
[2].reg
;
13235 reject_bad_reg (Rd
);
13236 reject_bad_reg (Rn
);
13237 reject_bad_reg (Rm
);
13239 inst
.instruction
|= Rd
<< 8;
13240 inst
.instruction
|= Rn
<< 16;
13241 inst
.instruction
|= Rm
;
13247 unsigned Rd
, Rn
, Rm
;
13249 Rd
= inst
.operands
[0].reg
;
13250 Rm
= inst
.operands
[1].reg
;
13251 Rn
= inst
.operands
[2].reg
;
13253 reject_bad_reg (Rd
);
13254 reject_bad_reg (Rn
);
13255 reject_bad_reg (Rm
);
13257 inst
.instruction
|= Rd
<< 8;
13258 inst
.instruction
|= Rn
<< 16;
13259 inst
.instruction
|= Rm
;
13265 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13266 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v7a
),
13267 _("SMC is not permitted on this architecture"));
13268 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13269 _("expression too complex"));
13270 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13271 inst
.instruction
|= (value
& 0xf000) >> 12;
13272 inst
.instruction
|= (value
& 0x0ff0);
13273 inst
.instruction
|= (value
& 0x000f) << 16;
13274 /* PR gas/15623: SMC instructions must be last in an IT block. */
13275 set_it_insn_type_last ();
13281 unsigned int value
= inst
.relocs
[0].exp
.X_add_number
;
13283 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13284 inst
.instruction
|= (value
& 0x0fff);
13285 inst
.instruction
|= (value
& 0xf000) << 4;
13289 do_t_ssat_usat (int bias
)
13293 Rd
= inst
.operands
[0].reg
;
13294 Rn
= inst
.operands
[2].reg
;
13296 reject_bad_reg (Rd
);
13297 reject_bad_reg (Rn
);
13299 inst
.instruction
|= Rd
<< 8;
13300 inst
.instruction
|= inst
.operands
[1].imm
- bias
;
13301 inst
.instruction
|= Rn
<< 16;
13303 if (inst
.operands
[3].present
)
13305 offsetT shift_amount
= inst
.relocs
[0].exp
.X_add_number
;
13307 inst
.relocs
[0].type
= BFD_RELOC_UNUSED
;
13309 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
13310 _("expression too complex"));
13312 if (shift_amount
!= 0)
13314 constraint (shift_amount
> 31,
13315 _("shift expression is too large"));
13317 if (inst
.operands
[3].shift_kind
== SHIFT_ASR
)
13318 inst
.instruction
|= 0x00200000; /* sh bit. */
13320 inst
.instruction
|= (shift_amount
& 0x1c) << 10;
13321 inst
.instruction
|= (shift_amount
& 0x03) << 6;
13329 do_t_ssat_usat (1);
13337 Rd
= inst
.operands
[0].reg
;
13338 Rn
= inst
.operands
[2].reg
;
13340 reject_bad_reg (Rd
);
13341 reject_bad_reg (Rn
);
13343 inst
.instruction
|= Rd
<< 8;
13344 inst
.instruction
|= inst
.operands
[1].imm
- 1;
13345 inst
.instruction
|= Rn
<< 16;
13351 constraint (!inst
.operands
[2].isreg
|| !inst
.operands
[2].preind
13352 || inst
.operands
[2].postind
|| inst
.operands
[2].writeback
13353 || inst
.operands
[2].immisreg
|| inst
.operands
[2].shifted
13354 || inst
.operands
[2].negative
,
13357 constraint (inst
.operands
[2].reg
== REG_PC
, BAD_PC
);
13359 inst
.instruction
|= inst
.operands
[0].reg
<< 8;
13360 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13361 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
13362 inst
.relocs
[0].type
= BFD_RELOC_ARM_T32_OFFSET_U8
;
13368 if (!inst
.operands
[2].present
)
13369 inst
.operands
[2].reg
= inst
.operands
[1].reg
+ 1;
13371 constraint (inst
.operands
[0].reg
== inst
.operands
[1].reg
13372 || inst
.operands
[0].reg
== inst
.operands
[2].reg
13373 || inst
.operands
[0].reg
== inst
.operands
[3].reg
,
13376 inst
.instruction
|= inst
.operands
[0].reg
;
13377 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
13378 inst
.instruction
|= inst
.operands
[2].reg
<< 8;
13379 inst
.instruction
|= inst
.operands
[3].reg
<< 16;
13385 unsigned Rd
, Rn
, Rm
;
13387 Rd
= inst
.operands
[0].reg
;
13388 Rn
= inst
.operands
[1].reg
;
13389 Rm
= inst
.operands
[2].reg
;
13391 reject_bad_reg (Rd
);
13392 reject_bad_reg (Rn
);
13393 reject_bad_reg (Rm
);
13395 inst
.instruction
|= Rd
<< 8;
13396 inst
.instruction
|= Rn
<< 16;
13397 inst
.instruction
|= Rm
;
13398 inst
.instruction
|= inst
.operands
[3].imm
<< 4;
13406 Rd
= inst
.operands
[0].reg
;
13407 Rm
= inst
.operands
[1].reg
;
13409 reject_bad_reg (Rd
);
13410 reject_bad_reg (Rm
);
13412 if (inst
.instruction
<= 0xffff
13413 && inst
.size_req
!= 4
13414 && Rd
<= 7 && Rm
<= 7
13415 && (!inst
.operands
[2].present
|| inst
.operands
[2].imm
== 0))
13417 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13418 inst
.instruction
|= Rd
;
13419 inst
.instruction
|= Rm
<< 3;
13421 else if (unified_syntax
)
13423 if (inst
.instruction
<= 0xffff)
13424 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13425 inst
.instruction
|= Rd
<< 8;
13426 inst
.instruction
|= Rm
;
13427 inst
.instruction
|= inst
.operands
[2].imm
<< 4;
13431 constraint (inst
.operands
[2].present
&& inst
.operands
[2].imm
!= 0,
13432 _("Thumb encoding does not support rotation"));
13433 constraint (1, BAD_HIREG
);
13440 inst
.relocs
[0].type
= BFD_RELOC_ARM_SWI
;
13449 half
= (inst
.instruction
& 0x10) != 0;
13450 set_it_insn_type_last ();
13451 constraint (inst
.operands
[0].immisreg
,
13452 _("instruction requires register index"));
13454 Rn
= inst
.operands
[0].reg
;
13455 Rm
= inst
.operands
[0].imm
;
13457 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
))
13458 constraint (Rn
== REG_SP
, BAD_SP
);
13459 reject_bad_reg (Rm
);
13461 constraint (!half
&& inst
.operands
[0].shifted
,
13462 _("instruction does not allow shifted index"));
13463 inst
.instruction
|= (Rn
<< 16) | Rm
;
13469 if (!inst
.operands
[0].present
)
13470 inst
.operands
[0].imm
= 0;
13472 if ((unsigned int) inst
.operands
[0].imm
> 255 || inst
.size_req
== 4)
13474 constraint (inst
.size_req
== 2,
13475 _("immediate value out of range"));
13476 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13477 inst
.instruction
|= (inst
.operands
[0].imm
& 0xf000u
) << 4;
13478 inst
.instruction
|= (inst
.operands
[0].imm
& 0x0fffu
) << 0;
13482 inst
.instruction
= THUMB_OP16 (inst
.instruction
);
13483 inst
.instruction
|= inst
.operands
[0].imm
;
13486 set_it_insn_type (NEUTRAL_IT_INSN
);
13493 do_t_ssat_usat (0);
13501 Rd
= inst
.operands
[0].reg
;
13502 Rn
= inst
.operands
[2].reg
;
13504 reject_bad_reg (Rd
);
13505 reject_bad_reg (Rn
);
13507 inst
.instruction
|= Rd
<< 8;
13508 inst
.instruction
|= inst
.operands
[1].imm
;
13509 inst
.instruction
|= Rn
<< 16;
13512 /* Checking the range of the branch offset (VAL) with NBITS bits
13513 and IS_SIGNED signedness. Also checks the LSB to be 0. */
13515 v8_1_branch_value_check (int val
, int nbits
, int is_signed
)
13517 gas_assert (nbits
> 0 && nbits
<= 32);
13520 int cmp
= (1 << (nbits
- 1));
13521 if ((val
< -cmp
) || (val
>= cmp
) || (val
& 0x01))
13526 if ((val
<= 0) || (val
>= (1 << nbits
)) || (val
& 0x1))
13532 /* For branches in Armv8.1-M Mainline. */
13534 do_t_branch_future (void)
13536 unsigned long insn
= inst
.instruction
;
13538 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13539 if (inst
.operands
[0].hasreloc
== 0)
13541 if (v8_1_branch_value_check (inst
.operands
[0].imm
, 5, FALSE
) == FAIL
)
13542 as_bad (BAD_BRANCH_OFF
);
13544 inst
.instruction
|= ((inst
.operands
[0].imm
& 0x1f) >> 1) << 23;
13548 inst
.relocs
[0].type
= BFD_RELOC_THUMB_PCREL_BRANCH5
;
13549 inst
.relocs
[0].pc_rel
= 1;
13555 if (inst
.operands
[1].hasreloc
== 0)
13557 int val
= inst
.operands
[1].imm
;
13558 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 17, TRUE
) == FAIL
)
13559 as_bad (BAD_BRANCH_OFF
);
13561 int immA
= (val
& 0x0001f000) >> 12;
13562 int immB
= (val
& 0x00000ffc) >> 2;
13563 int immC
= (val
& 0x00000002) >> 1;
13564 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13568 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF17
;
13569 inst
.relocs
[1].pc_rel
= 1;
13574 if (inst
.operands
[1].hasreloc
== 0)
13576 int val
= inst
.operands
[1].imm
;
13577 if (v8_1_branch_value_check (inst
.operands
[1].imm
, 19, TRUE
) == FAIL
)
13578 as_bad (BAD_BRANCH_OFF
);
13580 int immA
= (val
& 0x0007f000) >> 12;
13581 int immB
= (val
& 0x00000ffc) >> 2;
13582 int immC
= (val
& 0x00000002) >> 1;
13583 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13587 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF19
;
13588 inst
.relocs
[1].pc_rel
= 1;
13592 case T_MNEM_bfcsel
:
13594 if (inst
.operands
[1].hasreloc
== 0)
13596 int val
= inst
.operands
[1].imm
;
13597 int immA
= (val
& 0x00001000) >> 12;
13598 int immB
= (val
& 0x00000ffc) >> 2;
13599 int immC
= (val
& 0x00000002) >> 1;
13600 inst
.instruction
|= (immA
<< 16) | (immB
<< 1) | (immC
<< 11);
13604 inst
.relocs
[1].type
= BFD_RELOC_ARM_THUMB_BF13
;
13605 inst
.relocs
[1].pc_rel
= 1;
13609 if (inst
.operands
[2].hasreloc
== 0)
13611 constraint ((inst
.operands
[0].hasreloc
!= 0), BAD_ARGS
);
13612 int val2
= inst
.operands
[2].imm
;
13613 int val0
= inst
.operands
[0].imm
& 0x1f;
13614 int diff
= val2
- val0
;
13616 inst
.instruction
|= 1 << 17; /* T bit. */
13617 else if (diff
!= 2)
13618 as_bad (_("out of range label-relative fixup value"));
13622 constraint ((inst
.operands
[0].hasreloc
== 0), BAD_ARGS
);
13623 inst
.relocs
[2].type
= BFD_RELOC_THUMB_PCREL_BFCSEL
;
13624 inst
.relocs
[2].pc_rel
= 1;
13628 constraint (inst
.cond
!= COND_ALWAYS
, BAD_COND
);
13629 inst
.instruction
|= (inst
.operands
[3].imm
& 0xf) << 18;
13634 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
13641 /* Helper function for do_t_loloop to handle relocations. */
13643 v8_1_loop_reloc (int is_le
)
13645 if (inst
.relocs
[0].exp
.X_op
== O_constant
)
13647 int value
= inst
.relocs
[0].exp
.X_add_number
;
13648 value
= (is_le
) ? -value
: value
;
13650 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
13651 as_bad (BAD_BRANCH_OFF
);
13655 immh
= (value
& 0x00000ffc) >> 2;
13656 imml
= (value
& 0x00000002) >> 1;
13658 inst
.instruction
|= (imml
<< 11) | (immh
<< 1);
13662 inst
.relocs
[0].type
= BFD_RELOC_ARM_THUMB_LOOP12
;
13663 inst
.relocs
[0].pc_rel
= 1;
13667 /* To handle the Scalar Low Overhead Loop instructions
13668 in Armv8.1-M Mainline. */
13672 unsigned long insn
= inst
.instruction
;
13674 set_it_insn_type (OUTSIDE_IT_INSN
);
13675 inst
.instruction
= THUMB_OP32 (inst
.instruction
);
13681 if (!inst
.operands
[0].present
)
13682 inst
.instruction
|= 1 << 21;
13684 v8_1_loop_reloc (TRUE
);
13688 v8_1_loop_reloc (FALSE
);
13689 /* Fall through. */
13691 constraint (inst
.operands
[1].isreg
!= 1, BAD_ARGS
);
13692 inst
.instruction
|= (inst
.operands
[1].reg
<< 16);
13699 /* Neon instruction encoder helpers. */
13701 /* Encodings for the different types for various Neon opcodes. */
13703 /* An "invalid" code for the following tables. */
13706 struct neon_tab_entry
13709 unsigned float_or_poly
;
13710 unsigned scalar_or_imm
;
13713 /* Map overloaded Neon opcodes to their respective encodings. */
13714 #define NEON_ENC_TAB \
13715 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13716 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13717 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13718 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13719 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13720 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13721 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13722 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13723 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13724 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13725 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13726 /* Register variants of the following two instructions are encoded as
13727 vcge / vcgt with the operands reversed. */ \
13728 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13729 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13730 X(vfma, N_INV, 0x0000c10, N_INV), \
13731 X(vfms, N_INV, 0x0200c10, N_INV), \
13732 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13733 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13734 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13735 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13736 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13737 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13738 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13739 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13740 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13741 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13742 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13743 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13744 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13745 X(vshl, 0x0000400, N_INV, 0x0800510), \
13746 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13747 X(vand, 0x0000110, N_INV, 0x0800030), \
13748 X(vbic, 0x0100110, N_INV, 0x0800030), \
13749 X(veor, 0x1000110, N_INV, N_INV), \
13750 X(vorn, 0x0300110, N_INV, 0x0800010), \
13751 X(vorr, 0x0200110, N_INV, 0x0800010), \
13752 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13753 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13754 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13755 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13756 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13757 X(vst1, 0x0000000, 0x0800000, N_INV), \
13758 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13759 X(vst2, 0x0000100, 0x0800100, N_INV), \
13760 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13761 X(vst3, 0x0000200, 0x0800200, N_INV), \
13762 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13763 X(vst4, 0x0000300, 0x0800300, N_INV), \
13764 X(vmovn, 0x1b20200, N_INV, N_INV), \
13765 X(vtrn, 0x1b20080, N_INV, N_INV), \
13766 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13767 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13768 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13769 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13770 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13771 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13772 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13773 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13774 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13775 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13776 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13777 X(vseleq, 0xe000a00, N_INV, N_INV), \
13778 X(vselvs, 0xe100a00, N_INV, N_INV), \
13779 X(vselge, 0xe200a00, N_INV, N_INV), \
13780 X(vselgt, 0xe300a00, N_INV, N_INV), \
13781 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13782 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13783 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13784 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13785 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13786 X(aes, 0x3b00300, N_INV, N_INV), \
13787 X(sha3op, 0x2000c00, N_INV, N_INV), \
13788 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13789 X(sha2op, 0x3ba0380, N_INV, N_INV)
13793 #define X(OPC,I,F,S) N_MNEM_##OPC
13798 static const struct neon_tab_entry neon_enc_tab
[] =
13800 #define X(OPC,I,F,S) { (I), (F), (S) }
13805 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13806 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13807 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13808 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13809 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13810 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13811 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13812 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13813 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13814 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13815 #define NEON_ENC_SINGLE_(X) \
13816 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13817 #define NEON_ENC_DOUBLE_(X) \
13818 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13819 #define NEON_ENC_FPV8_(X) \
13820 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13822 #define NEON_ENCODE(type, inst) \
13825 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13826 inst.is_neon = 1; \
13830 #define check_neon_suffixes \
13833 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13835 as_bad (_("invalid neon suffix for non neon instruction")); \
13841 /* Define shapes for instruction operands. The following mnemonic characters
13842 are used in this table:
13844 F - VFP S<n> register
13845 D - Neon D<n> register
13846 Q - Neon Q<n> register
13850 L - D<n> register list
13852 This table is used to generate various data:
13853 - enumerations of the form NS_DDR to be used as arguments to
13855 - a table classifying shapes into single, double, quad, mixed.
13856 - a table used to drive neon_select_shape. */
13858 #define NEON_SHAPE_DEF \
13859 X(3, (D, D, D), DOUBLE), \
13860 X(3, (Q, Q, Q), QUAD), \
13861 X(3, (D, D, I), DOUBLE), \
13862 X(3, (Q, Q, I), QUAD), \
13863 X(3, (D, D, S), DOUBLE), \
13864 X(3, (Q, Q, S), QUAD), \
13865 X(2, (D, D), DOUBLE), \
13866 X(2, (Q, Q), QUAD), \
13867 X(2, (D, S), DOUBLE), \
13868 X(2, (Q, S), QUAD), \
13869 X(2, (D, R), DOUBLE), \
13870 X(2, (Q, R), QUAD), \
13871 X(2, (D, I), DOUBLE), \
13872 X(2, (Q, I), QUAD), \
13873 X(3, (D, L, D), DOUBLE), \
13874 X(2, (D, Q), MIXED), \
13875 X(2, (Q, D), MIXED), \
13876 X(3, (D, Q, I), MIXED), \
13877 X(3, (Q, D, I), MIXED), \
13878 X(3, (Q, D, D), MIXED), \
13879 X(3, (D, Q, Q), MIXED), \
13880 X(3, (Q, Q, D), MIXED), \
13881 X(3, (Q, D, S), MIXED), \
13882 X(3, (D, Q, S), MIXED), \
13883 X(4, (D, D, D, I), DOUBLE), \
13884 X(4, (Q, Q, Q, I), QUAD), \
13885 X(4, (D, D, S, I), DOUBLE), \
13886 X(4, (Q, Q, S, I), QUAD), \
13887 X(2, (F, F), SINGLE), \
13888 X(3, (F, F, F), SINGLE), \
13889 X(2, (F, I), SINGLE), \
13890 X(2, (F, D), MIXED), \
13891 X(2, (D, F), MIXED), \
13892 X(3, (F, F, I), MIXED), \
13893 X(4, (R, R, F, F), SINGLE), \
13894 X(4, (F, F, R, R), SINGLE), \
13895 X(3, (D, R, R), DOUBLE), \
13896 X(3, (R, R, D), DOUBLE), \
13897 X(2, (S, R), SINGLE), \
13898 X(2, (R, S), SINGLE), \
13899 X(2, (F, R), SINGLE), \
13900 X(2, (R, F), SINGLE), \
13901 /* Half float shape supported so far. */\
13902 X (2, (H, D), MIXED), \
13903 X (2, (D, H), MIXED), \
13904 X (2, (H, F), MIXED), \
13905 X (2, (F, H), MIXED), \
13906 X (2, (H, H), HALF), \
13907 X (2, (H, R), HALF), \
13908 X (2, (R, H), HALF), \
13909 X (2, (H, I), HALF), \
13910 X (3, (H, H, H), HALF), \
13911 X (3, (H, F, I), MIXED), \
13912 X (3, (F, H, I), MIXED), \
13913 X (3, (D, H, H), MIXED), \
13914 X (3, (D, H, S), MIXED)
13916 #define S2(A,B) NS_##A##B
13917 #define S3(A,B,C) NS_##A##B##C
13918 #define S4(A,B,C,D) NS_##A##B##C##D
13920 #define X(N, L, C) S##N L
13933 enum neon_shape_class
13942 #define X(N, L, C) SC_##C
13944 static enum neon_shape_class neon_shape_class
[] =
13963 /* Register widths of above. */
13964 static unsigned neon_shape_el_size
[] =
13976 struct neon_shape_info
13979 enum neon_shape_el el
[NEON_MAX_TYPE_ELS
];
13982 #define S2(A,B) { SE_##A, SE_##B }
13983 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13984 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13986 #define X(N, L, C) { N, S##N L }
13988 static struct neon_shape_info neon_shape_tab
[] =
13998 /* Bit masks used in type checking given instructions.
13999 'N_EQK' means the type must be the same as (or based on in some way) the key
14000 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14001 set, various other bits can be set as well in order to modify the meaning of
14002 the type constraint. */
14004 enum neon_type_mask
14028 N_KEY
= 0x1000000, /* Key element (main type specifier). */
14029 N_EQK
= 0x2000000, /* Given operand has the same type & size as the key. */
14030 N_VFP
= 0x4000000, /* VFP mode: operand size must match register width. */
14031 N_UNT
= 0x8000000, /* Must be explicitly untyped. */
14032 N_DBL
= 0x0000001, /* If N_EQK, this operand is twice the size. */
14033 N_HLF
= 0x0000002, /* If N_EQK, this operand is half the size. */
14034 N_SGN
= 0x0000004, /* If N_EQK, this operand is forced to be signed. */
14035 N_UNS
= 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
14036 N_INT
= 0x0000010, /* If N_EQK, this operand is forced to be integer. */
14037 N_FLT
= 0x0000020, /* If N_EQK, this operand is forced to be float. */
14038 N_SIZ
= 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
14040 N_MAX_NONSPECIAL
= N_P64
14043 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
14045 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
14046 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
14047 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
14048 #define N_S_32 (N_S8 | N_S16 | N_S32)
14049 #define N_F_16_32 (N_F16 | N_F32)
14050 #define N_SUF_32 (N_SU_32 | N_F_16_32)
14051 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
14052 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
14053 #define N_F_ALL (N_F16 | N_F32 | N_F64)
14055 /* Pass this as the first type argument to neon_check_type to ignore types
14057 #define N_IGNORE_TYPE (N_KEY | N_EQK)
14059 /* Select a "shape" for the current instruction (describing register types or
14060 sizes) from a list of alternatives. Return NS_NULL if the current instruction
14061 doesn't fit. For non-polymorphic shapes, checking is usually done as a
14062 function of operand parsing, so this function doesn't need to be called.
14063 Shapes should be listed in order of decreasing length. */
14065 static enum neon_shape
14066 neon_select_shape (enum neon_shape shape
, ...)
14069 enum neon_shape first_shape
= shape
;
14071 /* Fix missing optional operands. FIXME: we don't know at this point how
14072 many arguments we should have, so this makes the assumption that we have
14073 > 1. This is true of all current Neon opcodes, I think, but may not be
14074 true in the future. */
14075 if (!inst
.operands
[1].present
)
14076 inst
.operands
[1] = inst
.operands
[0];
14078 va_start (ap
, shape
);
14080 for (; shape
!= NS_NULL
; shape
= (enum neon_shape
) va_arg (ap
, int))
14085 for (j
= 0; j
< neon_shape_tab
[shape
].els
; j
++)
14087 if (!inst
.operands
[j
].present
)
14093 switch (neon_shape_tab
[shape
].el
[j
])
14095 /* If a .f16, .16, .u16, .s16 type specifier is given over
14096 a VFP single precision register operand, it's essentially
14097 means only half of the register is used.
14099 If the type specifier is given after the mnemonics, the
14100 information is stored in inst.vectype. If the type specifier
14101 is given after register operand, the information is stored
14102 in inst.operands[].vectype.
14104 When there is only one type specifier, and all the register
14105 operands are the same type of hardware register, the type
14106 specifier applies to all register operands.
14108 If no type specifier is given, the shape is inferred from
14109 operand information.
14112 vadd.f16 s0, s1, s2: NS_HHH
14113 vabs.f16 s0, s1: NS_HH
14114 vmov.f16 s0, r1: NS_HR
14115 vmov.f16 r0, s1: NS_RH
14116 vcvt.f16 r0, s1: NS_RH
14117 vcvt.f16.s32 s2, s2, #29: NS_HFI
14118 vcvt.f16.s32 s2, s2: NS_HF
14121 if (!(inst
.operands
[j
].isreg
14122 && inst
.operands
[j
].isvec
14123 && inst
.operands
[j
].issingle
14124 && !inst
.operands
[j
].isquad
14125 && ((inst
.vectype
.elems
== 1
14126 && inst
.vectype
.el
[0].size
== 16)
14127 || (inst
.vectype
.elems
> 1
14128 && inst
.vectype
.el
[j
].size
== 16)
14129 || (inst
.vectype
.elems
== 0
14130 && inst
.operands
[j
].vectype
.type
!= NT_invtype
14131 && inst
.operands
[j
].vectype
.size
== 16))))
14136 if (!(inst
.operands
[j
].isreg
14137 && inst
.operands
[j
].isvec
14138 && inst
.operands
[j
].issingle
14139 && !inst
.operands
[j
].isquad
14140 && ((inst
.vectype
.elems
== 1 && inst
.vectype
.el
[0].size
== 32)
14141 || (inst
.vectype
.elems
> 1 && inst
.vectype
.el
[j
].size
== 32)
14142 || (inst
.vectype
.elems
== 0
14143 && (inst
.operands
[j
].vectype
.size
== 32
14144 || inst
.operands
[j
].vectype
.type
== NT_invtype
)))))
14149 if (!(inst
.operands
[j
].isreg
14150 && inst
.operands
[j
].isvec
14151 && !inst
.operands
[j
].isquad
14152 && !inst
.operands
[j
].issingle
))
14157 if (!(inst
.operands
[j
].isreg
14158 && !inst
.operands
[j
].isvec
))
14163 if (!(inst
.operands
[j
].isreg
14164 && inst
.operands
[j
].isvec
14165 && inst
.operands
[j
].isquad
14166 && !inst
.operands
[j
].issingle
))
14171 if (!(!inst
.operands
[j
].isreg
14172 && !inst
.operands
[j
].isscalar
))
14177 if (!(!inst
.operands
[j
].isreg
14178 && inst
.operands
[j
].isscalar
))
14188 if (matches
&& (j
>= ARM_IT_MAX_OPERANDS
|| !inst
.operands
[j
].present
))
14189 /* We've matched all the entries in the shape table, and we don't
14190 have any left over operands which have not been matched. */
14196 if (shape
== NS_NULL
&& first_shape
!= NS_NULL
)
14197 first_error (_("invalid instruction shape"));
14202 /* True if SHAPE is predominantly a quadword operation (most of the time, this
14203 means the Q bit should be set). */
14206 neon_quad (enum neon_shape shape
)
14208 return neon_shape_class
[shape
] == SC_QUAD
;
14212 neon_modify_type_size (unsigned typebits
, enum neon_el_type
*g_type
,
14215 /* Allow modification to be made to types which are constrained to be
14216 based on the key element, based on bits set alongside N_EQK. */
14217 if ((typebits
& N_EQK
) != 0)
14219 if ((typebits
& N_HLF
) != 0)
14221 else if ((typebits
& N_DBL
) != 0)
14223 if ((typebits
& N_SGN
) != 0)
14224 *g_type
= NT_signed
;
14225 else if ((typebits
& N_UNS
) != 0)
14226 *g_type
= NT_unsigned
;
14227 else if ((typebits
& N_INT
) != 0)
14228 *g_type
= NT_integer
;
14229 else if ((typebits
& N_FLT
) != 0)
14230 *g_type
= NT_float
;
14231 else if ((typebits
& N_SIZ
) != 0)
14232 *g_type
= NT_untyped
;
14236 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
14237 operand type, i.e. the single type specified in a Neon instruction when it
14238 is the only one given. */
14240 static struct neon_type_el
14241 neon_type_promote (struct neon_type_el
*key
, unsigned thisarg
)
14243 struct neon_type_el dest
= *key
;
14245 gas_assert ((thisarg
& N_EQK
) != 0);
14247 neon_modify_type_size (thisarg
, &dest
.type
, &dest
.size
);
14252 /* Convert Neon type and size into compact bitmask representation. */
14254 static enum neon_type_mask
14255 type_chk_of_el_type (enum neon_el_type type
, unsigned size
)
14262 case 8: return N_8
;
14263 case 16: return N_16
;
14264 case 32: return N_32
;
14265 case 64: return N_64
;
14273 case 8: return N_I8
;
14274 case 16: return N_I16
;
14275 case 32: return N_I32
;
14276 case 64: return N_I64
;
14284 case 16: return N_F16
;
14285 case 32: return N_F32
;
14286 case 64: return N_F64
;
14294 case 8: return N_P8
;
14295 case 16: return N_P16
;
14296 case 64: return N_P64
;
14304 case 8: return N_S8
;
14305 case 16: return N_S16
;
14306 case 32: return N_S32
;
14307 case 64: return N_S64
;
14315 case 8: return N_U8
;
14316 case 16: return N_U16
;
14317 case 32: return N_U32
;
14318 case 64: return N_U64
;
14329 /* Convert compact Neon bitmask type representation to a type and size. Only
14330 handles the case where a single bit is set in the mask. */
14333 el_type_of_type_chk (enum neon_el_type
*type
, unsigned *size
,
14334 enum neon_type_mask mask
)
14336 if ((mask
& N_EQK
) != 0)
14339 if ((mask
& (N_S8
| N_U8
| N_I8
| N_8
| N_P8
)) != 0)
14341 else if ((mask
& (N_S16
| N_U16
| N_I16
| N_16
| N_F16
| N_P16
)) != 0)
14343 else if ((mask
& (N_S32
| N_U32
| N_I32
| N_32
| N_F32
)) != 0)
14345 else if ((mask
& (N_S64
| N_U64
| N_I64
| N_64
| N_F64
| N_P64
)) != 0)
14350 if ((mask
& (N_S8
| N_S16
| N_S32
| N_S64
)) != 0)
14352 else if ((mask
& (N_U8
| N_U16
| N_U32
| N_U64
)) != 0)
14353 *type
= NT_unsigned
;
14354 else if ((mask
& (N_I8
| N_I16
| N_I32
| N_I64
)) != 0)
14355 *type
= NT_integer
;
14356 else if ((mask
& (N_8
| N_16
| N_32
| N_64
)) != 0)
14357 *type
= NT_untyped
;
14358 else if ((mask
& (N_P8
| N_P16
| N_P64
)) != 0)
14360 else if ((mask
& (N_F_ALL
)) != 0)
14368 /* Modify a bitmask of allowed types. This is only needed for type
14372 modify_types_allowed (unsigned allowed
, unsigned mods
)
14375 enum neon_el_type type
;
14381 for (i
= 1; i
<= N_MAX_NONSPECIAL
; i
<<= 1)
14383 if (el_type_of_type_chk (&type
, &size
,
14384 (enum neon_type_mask
) (allowed
& i
)) == SUCCESS
)
14386 neon_modify_type_size (mods
, &type
, &size
);
14387 destmask
|= type_chk_of_el_type (type
, size
);
14394 /* Check type and return type classification.
14395 The manual states (paraphrase): If one datatype is given, it indicates the
14397 - the second operand, if there is one
14398 - the operand, if there is no second operand
14399 - the result, if there are no operands.
14400 This isn't quite good enough though, so we use a concept of a "key" datatype
14401 which is set on a per-instruction basis, which is the one which matters when
14402 only one data type is written.
14403 Note: this function has side-effects (e.g. filling in missing operands). All
14404 Neon instructions should call it before performing bit encoding. */
14406 static struct neon_type_el
14407 neon_check_type (unsigned els
, enum neon_shape ns
, ...)
14410 unsigned i
, pass
, key_el
= 0;
14411 unsigned types
[NEON_MAX_TYPE_ELS
];
14412 enum neon_el_type k_type
= NT_invtype
;
14413 unsigned k_size
= -1u;
14414 struct neon_type_el badtype
= {NT_invtype
, -1};
14415 unsigned key_allowed
= 0;
14417 /* Optional registers in Neon instructions are always (not) in operand 1.
14418 Fill in the missing operand here, if it was omitted. */
14419 if (els
> 1 && !inst
.operands
[1].present
)
14420 inst
.operands
[1] = inst
.operands
[0];
14422 /* Suck up all the varargs. */
14424 for (i
= 0; i
< els
; i
++)
14426 unsigned thisarg
= va_arg (ap
, unsigned);
14427 if (thisarg
== N_IGNORE_TYPE
)
14432 types
[i
] = thisarg
;
14433 if ((thisarg
& N_KEY
) != 0)
14438 if (inst
.vectype
.elems
> 0)
14439 for (i
= 0; i
< els
; i
++)
14440 if (inst
.operands
[i
].vectype
.type
!= NT_invtype
)
14442 first_error (_("types specified in both the mnemonic and operands"));
14446 /* Duplicate inst.vectype elements here as necessary.
14447 FIXME: No idea if this is exactly the same as the ARM assembler,
14448 particularly when an insn takes one register and one non-register
14450 if (inst
.vectype
.elems
== 1 && els
> 1)
14453 inst
.vectype
.elems
= els
;
14454 inst
.vectype
.el
[key_el
] = inst
.vectype
.el
[0];
14455 for (j
= 0; j
< els
; j
++)
14457 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14460 else if (inst
.vectype
.elems
== 0 && els
> 0)
14463 /* No types were given after the mnemonic, so look for types specified
14464 after each operand. We allow some flexibility here; as long as the
14465 "key" operand has a type, we can infer the others. */
14466 for (j
= 0; j
< els
; j
++)
14467 if (inst
.operands
[j
].vectype
.type
!= NT_invtype
)
14468 inst
.vectype
.el
[j
] = inst
.operands
[j
].vectype
;
14470 if (inst
.operands
[key_el
].vectype
.type
!= NT_invtype
)
14472 for (j
= 0; j
< els
; j
++)
14473 if (inst
.operands
[j
].vectype
.type
== NT_invtype
)
14474 inst
.vectype
.el
[j
] = neon_type_promote (&inst
.vectype
.el
[key_el
],
14479 first_error (_("operand types can't be inferred"));
14483 else if (inst
.vectype
.elems
!= els
)
14485 first_error (_("type specifier has the wrong number of parts"));
14489 for (pass
= 0; pass
< 2; pass
++)
14491 for (i
= 0; i
< els
; i
++)
14493 unsigned thisarg
= types
[i
];
14494 unsigned types_allowed
= ((thisarg
& N_EQK
) != 0 && pass
!= 0)
14495 ? modify_types_allowed (key_allowed
, thisarg
) : thisarg
;
14496 enum neon_el_type g_type
= inst
.vectype
.el
[i
].type
;
14497 unsigned g_size
= inst
.vectype
.el
[i
].size
;
14499 /* Decay more-specific signed & unsigned types to sign-insensitive
14500 integer types if sign-specific variants are unavailable. */
14501 if ((g_type
== NT_signed
|| g_type
== NT_unsigned
)
14502 && (types_allowed
& N_SU_ALL
) == 0)
14503 g_type
= NT_integer
;
14505 /* If only untyped args are allowed, decay any more specific types to
14506 them. Some instructions only care about signs for some element
14507 sizes, so handle that properly. */
14508 if (((types_allowed
& N_UNT
) == 0)
14509 && ((g_size
== 8 && (types_allowed
& N_8
) != 0)
14510 || (g_size
== 16 && (types_allowed
& N_16
) != 0)
14511 || (g_size
== 32 && (types_allowed
& N_32
) != 0)
14512 || (g_size
== 64 && (types_allowed
& N_64
) != 0)))
14513 g_type
= NT_untyped
;
14517 if ((thisarg
& N_KEY
) != 0)
14521 key_allowed
= thisarg
& ~N_KEY
;
14523 /* Check architecture constraint on FP16 extension. */
14525 && k_type
== NT_float
14526 && ! ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14528 inst
.error
= _(BAD_FP16
);
14535 if ((thisarg
& N_VFP
) != 0)
14537 enum neon_shape_el regshape
;
14538 unsigned regwidth
, match
;
14540 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14543 first_error (_("invalid instruction shape"));
14546 regshape
= neon_shape_tab
[ns
].el
[i
];
14547 regwidth
= neon_shape_el_size
[regshape
];
14549 /* In VFP mode, operands must match register widths. If we
14550 have a key operand, use its width, else use the width of
14551 the current operand. */
14557 /* FP16 will use a single precision register. */
14558 if (regwidth
== 32 && match
== 16)
14560 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
))
14564 inst
.error
= _(BAD_FP16
);
14569 if (regwidth
!= match
)
14571 first_error (_("operand size must match register width"));
14576 if ((thisarg
& N_EQK
) == 0)
14578 unsigned given_type
= type_chk_of_el_type (g_type
, g_size
);
14580 if ((given_type
& types_allowed
) == 0)
14582 first_error (_("bad type in Neon instruction"));
14588 enum neon_el_type mod_k_type
= k_type
;
14589 unsigned mod_k_size
= k_size
;
14590 neon_modify_type_size (thisarg
, &mod_k_type
, &mod_k_size
);
14591 if (g_type
!= mod_k_type
|| g_size
!= mod_k_size
)
14593 first_error (_("inconsistent types in Neon instruction"));
14601 return inst
.vectype
.el
[key_el
];
14604 /* Neon-style VFP instruction forwarding. */
14606 /* Thumb VFP instructions have 0xE in the condition field. */
14609 do_vfp_cond_or_thumb (void)
14614 inst
.instruction
|= 0xe0000000;
14616 inst
.instruction
|= inst
.cond
<< 28;
14619 /* Look up and encode a simple mnemonic, for use as a helper function for the
14620 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14621 etc. It is assumed that operand parsing has already been done, and that the
14622 operands are in the form expected by the given opcode (this isn't necessarily
14623 the same as the form in which they were parsed, hence some massaging must
14624 take place before this function is called).
14625 Checks current arch version against that in the looked-up opcode. */
14628 do_vfp_nsyn_opcode (const char *opname
)
14630 const struct asm_opcode
*opcode
;
14632 opcode
= (const struct asm_opcode
*) hash_find (arm_ops_hsh
, opname
);
14637 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
,
14638 thumb_mode
? *opcode
->tvariant
: *opcode
->avariant
),
14645 inst
.instruction
= opcode
->tvalue
;
14646 opcode
->tencode ();
14650 inst
.instruction
= (inst
.cond
<< 28) | opcode
->avalue
;
14651 opcode
->aencode ();
14656 do_vfp_nsyn_add_sub (enum neon_shape rs
)
14658 int is_add
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vadd
;
14660 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14663 do_vfp_nsyn_opcode ("fadds");
14665 do_vfp_nsyn_opcode ("fsubs");
14667 /* ARMv8.2 fp16 instruction. */
14669 do_scalar_fp16_v82_encode ();
14674 do_vfp_nsyn_opcode ("faddd");
14676 do_vfp_nsyn_opcode ("fsubd");
14680 /* Check operand types to see if this is a VFP instruction, and if so call
14684 try_vfp_nsyn (int args
, void (*pfn
) (enum neon_shape
))
14686 enum neon_shape rs
;
14687 struct neon_type_el et
;
14692 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14693 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14697 rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14698 et
= neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14699 N_F_ALL
| N_KEY
| N_VFP
);
14706 if (et
.type
!= NT_invtype
)
14717 do_vfp_nsyn_mla_mls (enum neon_shape rs
)
14719 int is_mla
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vmla
;
14721 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14724 do_vfp_nsyn_opcode ("fmacs");
14726 do_vfp_nsyn_opcode ("fnmacs");
14728 /* ARMv8.2 fp16 instruction. */
14730 do_scalar_fp16_v82_encode ();
14735 do_vfp_nsyn_opcode ("fmacd");
14737 do_vfp_nsyn_opcode ("fnmacd");
14742 do_vfp_nsyn_fma_fms (enum neon_shape rs
)
14744 int is_fma
= (inst
.instruction
& 0x0fffffff) == N_MNEM_vfma
;
14746 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14749 do_vfp_nsyn_opcode ("ffmas");
14751 do_vfp_nsyn_opcode ("ffnmas");
14753 /* ARMv8.2 fp16 instruction. */
14755 do_scalar_fp16_v82_encode ();
14760 do_vfp_nsyn_opcode ("ffmad");
14762 do_vfp_nsyn_opcode ("ffnmad");
14767 do_vfp_nsyn_mul (enum neon_shape rs
)
14769 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14771 do_vfp_nsyn_opcode ("fmuls");
14773 /* ARMv8.2 fp16 instruction. */
14775 do_scalar_fp16_v82_encode ();
14778 do_vfp_nsyn_opcode ("fmuld");
14782 do_vfp_nsyn_abs_neg (enum neon_shape rs
)
14784 int is_neg
= (inst
.instruction
& 0x80) != 0;
14785 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_VFP
| N_KEY
);
14787 if (rs
== NS_FF
|| rs
== NS_HH
)
14790 do_vfp_nsyn_opcode ("fnegs");
14792 do_vfp_nsyn_opcode ("fabss");
14794 /* ARMv8.2 fp16 instruction. */
14796 do_scalar_fp16_v82_encode ();
14801 do_vfp_nsyn_opcode ("fnegd");
14803 do_vfp_nsyn_opcode ("fabsd");
14807 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14808 insns belong to Neon, and are handled elsewhere. */
14811 do_vfp_nsyn_ldm_stm (int is_dbmode
)
14813 int is_ldm
= (inst
.instruction
& (1 << 20)) != 0;
14817 do_vfp_nsyn_opcode ("fldmdbs");
14819 do_vfp_nsyn_opcode ("fldmias");
14824 do_vfp_nsyn_opcode ("fstmdbs");
14826 do_vfp_nsyn_opcode ("fstmias");
14831 do_vfp_nsyn_sqrt (void)
14833 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14834 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14836 if (rs
== NS_FF
|| rs
== NS_HH
)
14838 do_vfp_nsyn_opcode ("fsqrts");
14840 /* ARMv8.2 fp16 instruction. */
14842 do_scalar_fp16_v82_encode ();
14845 do_vfp_nsyn_opcode ("fsqrtd");
14849 do_vfp_nsyn_div (void)
14851 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14852 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14853 N_F_ALL
| N_KEY
| N_VFP
);
14855 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14857 do_vfp_nsyn_opcode ("fdivs");
14859 /* ARMv8.2 fp16 instruction. */
14861 do_scalar_fp16_v82_encode ();
14864 do_vfp_nsyn_opcode ("fdivd");
14868 do_vfp_nsyn_nmul (void)
14870 enum neon_shape rs
= neon_select_shape (NS_HHH
, NS_FFF
, NS_DDD
, NS_NULL
);
14871 neon_check_type (3, rs
, N_EQK
| N_VFP
, N_EQK
| N_VFP
,
14872 N_F_ALL
| N_KEY
| N_VFP
);
14874 if (rs
== NS_FFF
|| rs
== NS_HHH
)
14876 NEON_ENCODE (SINGLE
, inst
);
14877 do_vfp_sp_dyadic ();
14879 /* ARMv8.2 fp16 instruction. */
14881 do_scalar_fp16_v82_encode ();
14885 NEON_ENCODE (DOUBLE
, inst
);
14886 do_vfp_dp_rd_rn_rm ();
14888 do_vfp_cond_or_thumb ();
14893 do_vfp_nsyn_cmp (void)
14895 enum neon_shape rs
;
14896 if (inst
.operands
[1].isreg
)
14898 rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_NULL
);
14899 neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
| N_VFP
);
14901 if (rs
== NS_FF
|| rs
== NS_HH
)
14903 NEON_ENCODE (SINGLE
, inst
);
14904 do_vfp_sp_monadic ();
14908 NEON_ENCODE (DOUBLE
, inst
);
14909 do_vfp_dp_rd_rm ();
14914 rs
= neon_select_shape (NS_HI
, NS_FI
, NS_DI
, NS_NULL
);
14915 neon_check_type (2, rs
, N_F_ALL
| N_KEY
| N_VFP
, N_EQK
);
14917 switch (inst
.instruction
& 0x0fffffff)
14920 inst
.instruction
+= N_MNEM_vcmpz
- N_MNEM_vcmp
;
14923 inst
.instruction
+= N_MNEM_vcmpez
- N_MNEM_vcmpe
;
14929 if (rs
== NS_FI
|| rs
== NS_HI
)
14931 NEON_ENCODE (SINGLE
, inst
);
14932 do_vfp_sp_compare_z ();
14936 NEON_ENCODE (DOUBLE
, inst
);
14940 do_vfp_cond_or_thumb ();
14942 /* ARMv8.2 fp16 instruction. */
14943 if (rs
== NS_HI
|| rs
== NS_HH
)
14944 do_scalar_fp16_v82_encode ();
14948 nsyn_insert_sp (void)
14950 inst
.operands
[1] = inst
.operands
[0];
14951 memset (&inst
.operands
[0], '\0', sizeof (inst
.operands
[0]));
14952 inst
.operands
[0].reg
= REG_SP
;
14953 inst
.operands
[0].isreg
= 1;
14954 inst
.operands
[0].writeback
= 1;
14955 inst
.operands
[0].present
= 1;
14959 do_vfp_nsyn_push (void)
14963 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14964 _("register list must contain at least 1 and at most 16 "
14967 if (inst
.operands
[1].issingle
)
14968 do_vfp_nsyn_opcode ("fstmdbs");
14970 do_vfp_nsyn_opcode ("fstmdbd");
14974 do_vfp_nsyn_pop (void)
14978 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
14979 _("register list must contain at least 1 and at most 16 "
14982 if (inst
.operands
[1].issingle
)
14983 do_vfp_nsyn_opcode ("fldmias");
14985 do_vfp_nsyn_opcode ("fldmiad");
14988 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14989 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14992 neon_dp_fixup (struct arm_it
* insn
)
14994 unsigned int i
= insn
->instruction
;
14999 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
15010 insn
->instruction
= i
;
15013 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15017 neon_logbits (unsigned x
)
15019 return ffs (x
) - 4;
15022 #define LOW4(R) ((R) & 0xf)
15023 #define HI1(R) (((R) >> 4) & 1)
15025 /* Encode insns with bit pattern:
15027 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15028 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
15030 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
15031 different meaning for some instruction. */
15034 neon_three_same (int isquad
, int ubit
, int size
)
15036 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15037 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15038 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15039 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15040 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
15041 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
15042 inst
.instruction
|= (isquad
!= 0) << 6;
15043 inst
.instruction
|= (ubit
!= 0) << 24;
15045 inst
.instruction
|= neon_logbits (size
) << 20;
15047 neon_dp_fixup (&inst
);
15050 /* Encode instructions of the form:
15052 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
15053 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
15055 Don't write size if SIZE == -1. */
15058 neon_two_same (int qbit
, int ubit
, int size
)
15060 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15061 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15062 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15063 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15064 inst
.instruction
|= (qbit
!= 0) << 6;
15065 inst
.instruction
|= (ubit
!= 0) << 24;
15068 inst
.instruction
|= neon_logbits (size
) << 18;
15070 neon_dp_fixup (&inst
);
15073 /* Neon instruction encoders, in approximate order of appearance. */
15076 do_neon_dyadic_i_su (void)
15078 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15079 struct neon_type_el et
= neon_check_type (3, rs
,
15080 N_EQK
, N_EQK
, N_SU_32
| N_KEY
);
15081 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15085 do_neon_dyadic_i64_su (void)
15087 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15088 struct neon_type_el et
= neon_check_type (3, rs
,
15089 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15090 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15094 neon_imm_shift (int write_ubit
, int uval
, int isquad
, struct neon_type_el et
,
15097 unsigned size
= et
.size
>> 3;
15098 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15099 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15100 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15101 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15102 inst
.instruction
|= (isquad
!= 0) << 6;
15103 inst
.instruction
|= immbits
<< 16;
15104 inst
.instruction
|= (size
>> 3) << 7;
15105 inst
.instruction
|= (size
& 0x7) << 19;
15107 inst
.instruction
|= (uval
!= 0) << 24;
15109 neon_dp_fixup (&inst
);
15113 do_neon_shl_imm (void)
15115 if (!inst
.operands
[2].isreg
)
15117 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15118 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_KEY
| N_I_ALL
);
15119 int imm
= inst
.operands
[2].imm
;
15121 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15122 _("immediate out of range for shift"));
15123 NEON_ENCODE (IMMED
, inst
);
15124 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15128 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15129 struct neon_type_el et
= neon_check_type (3, rs
,
15130 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15133 /* VSHL/VQSHL 3-register variants have syntax such as:
15135 whereas other 3-register operations encoded by neon_three_same have
15138 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
15140 tmp
= inst
.operands
[2].reg
;
15141 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15142 inst
.operands
[1].reg
= tmp
;
15143 NEON_ENCODE (INTEGER
, inst
);
15144 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15149 do_neon_qshl_imm (void)
15151 if (!inst
.operands
[2].isreg
)
15153 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15154 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
15155 int imm
= inst
.operands
[2].imm
;
15157 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15158 _("immediate out of range for shift"));
15159 NEON_ENCODE (IMMED
, inst
);
15160 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
, imm
);
15164 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15165 struct neon_type_el et
= neon_check_type (3, rs
,
15166 N_EQK
, N_SU_ALL
| N_KEY
, N_EQK
| N_SGN
);
15169 /* See note in do_neon_shl_imm. */
15170 tmp
= inst
.operands
[2].reg
;
15171 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15172 inst
.operands
[1].reg
= tmp
;
15173 NEON_ENCODE (INTEGER
, inst
);
15174 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15179 do_neon_rshl (void)
15181 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15182 struct neon_type_el et
= neon_check_type (3, rs
,
15183 N_EQK
, N_EQK
, N_SU_ALL
| N_KEY
);
15186 tmp
= inst
.operands
[2].reg
;
15187 inst
.operands
[2].reg
= inst
.operands
[1].reg
;
15188 inst
.operands
[1].reg
= tmp
;
15189 neon_three_same (neon_quad (rs
), et
.type
== NT_unsigned
, et
.size
);
15193 neon_cmode_for_logic_imm (unsigned immediate
, unsigned *immbits
, int size
)
15195 /* Handle .I8 pseudo-instructions. */
15198 /* Unfortunately, this will make everything apart from zero out-of-range.
15199 FIXME is this the intended semantics? There doesn't seem much point in
15200 accepting .I8 if so. */
15201 immediate
|= immediate
<< 8;
15207 if (immediate
== (immediate
& 0x000000ff))
15209 *immbits
= immediate
;
15212 else if (immediate
== (immediate
& 0x0000ff00))
15214 *immbits
= immediate
>> 8;
15217 else if (immediate
== (immediate
& 0x00ff0000))
15219 *immbits
= immediate
>> 16;
15222 else if (immediate
== (immediate
& 0xff000000))
15224 *immbits
= immediate
>> 24;
15227 if ((immediate
& 0xffff) != (immediate
>> 16))
15228 goto bad_immediate
;
15229 immediate
&= 0xffff;
15232 if (immediate
== (immediate
& 0x000000ff))
15234 *immbits
= immediate
;
15237 else if (immediate
== (immediate
& 0x0000ff00))
15239 *immbits
= immediate
>> 8;
15244 first_error (_("immediate value out of range"));
15249 do_neon_logic (void)
15251 if (inst
.operands
[2].present
&& inst
.operands
[2].isreg
)
15253 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15254 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15255 /* U bit and size field were set as part of the bitmask. */
15256 NEON_ENCODE (INTEGER
, inst
);
15257 neon_three_same (neon_quad (rs
), 0, -1);
15261 const int three_ops_form
= (inst
.operands
[2].present
15262 && !inst
.operands
[2].isreg
);
15263 const int immoperand
= (three_ops_form
? 2 : 1);
15264 enum neon_shape rs
= (three_ops_form
15265 ? neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
)
15266 : neon_select_shape (NS_DI
, NS_QI
, NS_NULL
));
15267 struct neon_type_el et
= neon_check_type (2, rs
,
15268 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
15269 enum neon_opc opcode
= (enum neon_opc
) inst
.instruction
& 0x0fffffff;
15273 if (et
.type
== NT_invtype
)
15276 if (three_ops_form
)
15277 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
15278 _("first and second operands shall be the same register"));
15280 NEON_ENCODE (IMMED
, inst
);
15282 immbits
= inst
.operands
[immoperand
].imm
;
15285 /* .i64 is a pseudo-op, so the immediate must be a repeating
15287 if (immbits
!= (inst
.operands
[immoperand
].regisimm
?
15288 inst
.operands
[immoperand
].reg
: 0))
15290 /* Set immbits to an invalid constant. */
15291 immbits
= 0xdeadbeef;
15298 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15302 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15306 /* Pseudo-instruction for VBIC. */
15307 neon_invert_size (&immbits
, 0, et
.size
);
15308 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15312 /* Pseudo-instruction for VORR. */
15313 neon_invert_size (&immbits
, 0, et
.size
);
15314 cmode
= neon_cmode_for_logic_imm (immbits
, &immbits
, et
.size
);
15324 inst
.instruction
|= neon_quad (rs
) << 6;
15325 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15326 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15327 inst
.instruction
|= cmode
<< 8;
15328 neon_write_immbits (immbits
);
15330 neon_dp_fixup (&inst
);
15335 do_neon_bitfield (void)
15337 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15338 neon_check_type (3, rs
, N_IGNORE_TYPE
);
15339 neon_three_same (neon_quad (rs
), 0, -1);
15343 neon_dyadic_misc (enum neon_el_type ubit_meaning
, unsigned types
,
15346 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15347 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
| destbits
, N_EQK
,
15349 if (et
.type
== NT_float
)
15351 NEON_ENCODE (FLOAT
, inst
);
15352 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15356 NEON_ENCODE (INTEGER
, inst
);
15357 neon_three_same (neon_quad (rs
), et
.type
== ubit_meaning
, et
.size
);
15362 do_neon_dyadic_if_su (void)
15364 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15368 do_neon_dyadic_if_su_d (void)
15370 /* This version only allow D registers, but that constraint is enforced during
15371 operand parsing so we don't need to do anything extra here. */
15372 neon_dyadic_misc (NT_unsigned
, N_SUF_32
, 0);
15376 do_neon_dyadic_if_i_d (void)
15378 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15379 affected if we specify unsigned args. */
15380 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15383 enum vfp_or_neon_is_neon_bits
15386 NEON_CHECK_ARCH
= 2,
15387 NEON_CHECK_ARCH8
= 4
15390 /* Call this function if an instruction which may have belonged to the VFP or
15391 Neon instruction sets, but turned out to be a Neon instruction (due to the
15392 operand types involved, etc.). We have to check and/or fix-up a couple of
15395 - Make sure the user hasn't attempted to make a Neon instruction
15397 - Alter the value in the condition code field if necessary.
15398 - Make sure that the arch supports Neon instructions.
15400 Which of these operations take place depends on bits from enum
15401 vfp_or_neon_is_neon_bits.
15403 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
15404 current instruction's condition is COND_ALWAYS, the condition field is
15405 changed to inst.uncond_value. This is necessary because instructions shared
15406 between VFP and Neon may be conditional for the VFP variants only, and the
15407 unconditional Neon version must have, e.g., 0xF in the condition field. */
15410 vfp_or_neon_is_neon (unsigned check
)
15412 /* Conditions are always legal in Thumb mode (IT blocks). */
15413 if (!thumb_mode
&& (check
& NEON_CHECK_CC
))
15415 if (inst
.cond
!= COND_ALWAYS
)
15417 first_error (_(BAD_COND
));
15420 if (inst
.uncond_value
!= -1)
15421 inst
.instruction
|= inst
.uncond_value
<< 28;
15424 if ((check
& NEON_CHECK_ARCH
)
15425 && !mark_feature_used (&fpu_neon_ext_v1
))
15427 first_error (_(BAD_FPU
));
15431 if ((check
& NEON_CHECK_ARCH8
)
15432 && !mark_feature_used (&fpu_neon_ext_armv8
))
15434 first_error (_(BAD_FPU
));
15442 do_neon_addsub_if_i (void)
15444 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub
) == SUCCESS
)
15447 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15450 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15451 affected if we specify unsigned args. */
15452 neon_dyadic_misc (NT_untyped
, N_IF_32
| N_I64
, 0);
15455 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15457 V<op> A,B (A is operand 0, B is operand 2)
15462 so handle that case specially. */
15465 neon_exchange_operands (void)
15467 if (inst
.operands
[1].present
)
15469 void *scratch
= xmalloc (sizeof (inst
.operands
[0]));
15471 /* Swap operands[1] and operands[2]. */
15472 memcpy (scratch
, &inst
.operands
[1], sizeof (inst
.operands
[0]));
15473 inst
.operands
[1] = inst
.operands
[2];
15474 memcpy (&inst
.operands
[2], scratch
, sizeof (inst
.operands
[0]));
15479 inst
.operands
[1] = inst
.operands
[2];
15480 inst
.operands
[2] = inst
.operands
[0];
15485 neon_compare (unsigned regtypes
, unsigned immtypes
, int invert
)
15487 if (inst
.operands
[2].isreg
)
15490 neon_exchange_operands ();
15491 neon_dyadic_misc (NT_unsigned
, regtypes
, N_SIZ
);
15495 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15496 struct neon_type_el et
= neon_check_type (2, rs
,
15497 N_EQK
| N_SIZ
, immtypes
| N_KEY
);
15499 NEON_ENCODE (IMMED
, inst
);
15500 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15501 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15502 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15503 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15504 inst
.instruction
|= neon_quad (rs
) << 6;
15505 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15506 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15508 neon_dp_fixup (&inst
);
15515 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, FALSE
);
15519 do_neon_cmp_inv (void)
15521 neon_compare (N_SUF_32
, N_S_32
| N_F_16_32
, TRUE
);
15527 neon_compare (N_IF_32
, N_IF_32
, FALSE
);
15530 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15531 scalars, which are encoded in 5 bits, M : Rm.
15532 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15533 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15536 Dot Product instructions are similar to multiply instructions except elsize
15537 should always be 32.
15539 This function translates SCALAR, which is GAS's internal encoding of indexed
15540 scalar register, to raw encoding. There is also register and index range
15541 check based on ELSIZE. */
15544 neon_scalar_for_mul (unsigned scalar
, unsigned elsize
)
15546 unsigned regno
= NEON_SCALAR_REG (scalar
);
15547 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
15552 if (regno
> 7 || elno
> 3)
15554 return regno
| (elno
<< 3);
15557 if (regno
> 15 || elno
> 1)
15559 return regno
| (elno
<< 4);
15563 first_error (_("scalar out of range for multiply instruction"));
15569 /* Encode multiply / multiply-accumulate scalar instructions. */
15572 neon_mul_mac (struct neon_type_el et
, int ubit
)
15576 /* Give a more helpful error message if we have an invalid type. */
15577 if (et
.type
== NT_invtype
)
15580 scalar
= neon_scalar_for_mul (inst
.operands
[2].reg
, et
.size
);
15581 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15582 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15583 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
15584 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
15585 inst
.instruction
|= LOW4 (scalar
);
15586 inst
.instruction
|= HI1 (scalar
) << 5;
15587 inst
.instruction
|= (et
.type
== NT_float
) << 8;
15588 inst
.instruction
|= neon_logbits (et
.size
) << 20;
15589 inst
.instruction
|= (ubit
!= 0) << 24;
15591 neon_dp_fixup (&inst
);
15595 do_neon_mac_maybe_scalar (void)
15597 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls
) == SUCCESS
)
15600 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15603 if (inst
.operands
[2].isscalar
)
15605 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15606 struct neon_type_el et
= neon_check_type (3, rs
,
15607 N_EQK
, N_EQK
, N_I16
| N_I32
| N_F_16_32
| N_KEY
);
15608 NEON_ENCODE (SCALAR
, inst
);
15609 neon_mul_mac (et
, neon_quad (rs
));
15613 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15614 affected if we specify unsigned args. */
15615 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15620 do_neon_fmac (void)
15622 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms
) == SUCCESS
)
15625 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15628 neon_dyadic_misc (NT_untyped
, N_IF_32
, 0);
15634 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15635 struct neon_type_el et
= neon_check_type (3, rs
,
15636 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
15637 neon_three_same (neon_quad (rs
), 0, et
.size
);
15640 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15641 same types as the MAC equivalents. The polynomial type for this instruction
15642 is encoded the same as the integer type. */
15647 if (try_vfp_nsyn (3, do_vfp_nsyn_mul
) == SUCCESS
)
15650 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15653 if (inst
.operands
[2].isscalar
)
15654 do_neon_mac_maybe_scalar ();
15656 neon_dyadic_misc (NT_poly
, N_I8
| N_I16
| N_I32
| N_F16
| N_F32
| N_P8
, 0);
15660 do_neon_qdmulh (void)
15662 if (inst
.operands
[2].isscalar
)
15664 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15665 struct neon_type_el et
= neon_check_type (3, rs
,
15666 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15667 NEON_ENCODE (SCALAR
, inst
);
15668 neon_mul_mac (et
, neon_quad (rs
));
15672 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15673 struct neon_type_el et
= neon_check_type (3, rs
,
15674 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15675 NEON_ENCODE (INTEGER
, inst
);
15676 /* The U bit (rounding) comes from bit mask. */
15677 neon_three_same (neon_quad (rs
), 0, et
.size
);
15682 do_neon_qrdmlah (void)
15684 /* Check we're on the correct architecture. */
15685 if (!mark_feature_used (&fpu_neon_ext_armv8
))
15687 _("instruction form not available on this architecture.");
15688 else if (!mark_feature_used (&fpu_neon_ext_v8_1
))
15690 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15691 record_feature_use (&fpu_neon_ext_v8_1
);
15694 if (inst
.operands
[2].isscalar
)
15696 enum neon_shape rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
15697 struct neon_type_el et
= neon_check_type (3, rs
,
15698 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15699 NEON_ENCODE (SCALAR
, inst
);
15700 neon_mul_mac (et
, neon_quad (rs
));
15704 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15705 struct neon_type_el et
= neon_check_type (3, rs
,
15706 N_EQK
, N_EQK
, N_S16
| N_S32
| N_KEY
);
15707 NEON_ENCODE (INTEGER
, inst
);
15708 /* The U bit (rounding) comes from bit mask. */
15709 neon_three_same (neon_quad (rs
), 0, et
.size
);
15714 do_neon_fcmp_absolute (void)
15716 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15717 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15718 N_F_16_32
| N_KEY
);
15719 /* Size field comes from bit mask. */
15720 neon_three_same (neon_quad (rs
), 1, et
.size
== 16 ? (int) et
.size
: -1);
15724 do_neon_fcmp_absolute_inv (void)
15726 neon_exchange_operands ();
15727 do_neon_fcmp_absolute ();
15731 do_neon_step (void)
15733 enum neon_shape rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
15734 struct neon_type_el et
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
15735 N_F_16_32
| N_KEY
);
15736 neon_three_same (neon_quad (rs
), 0, et
.size
== 16 ? (int) et
.size
: -1);
15740 do_neon_abs_neg (void)
15742 enum neon_shape rs
;
15743 struct neon_type_el et
;
15745 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg
) == SUCCESS
)
15748 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
15751 rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
15752 et
= neon_check_type (2, rs
, N_EQK
, N_S_32
| N_F_16_32
| N_KEY
);
15754 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15755 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15756 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15757 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15758 inst
.instruction
|= neon_quad (rs
) << 6;
15759 inst
.instruction
|= (et
.type
== NT_float
) << 10;
15760 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15762 neon_dp_fixup (&inst
);
15768 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15769 struct neon_type_el et
= neon_check_type (2, rs
,
15770 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15771 int imm
= inst
.operands
[2].imm
;
15772 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15773 _("immediate out of range for insert"));
15774 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15780 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15781 struct neon_type_el et
= neon_check_type (2, rs
,
15782 N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
15783 int imm
= inst
.operands
[2].imm
;
15784 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15785 _("immediate out of range for insert"));
15786 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, et
.size
- imm
);
15790 do_neon_qshlu_imm (void)
15792 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
15793 struct neon_type_el et
= neon_check_type (2, rs
,
15794 N_EQK
| N_UNS
, N_S8
| N_S16
| N_S32
| N_S64
| N_KEY
);
15795 int imm
= inst
.operands
[2].imm
;
15796 constraint (imm
< 0 || (unsigned)imm
>= et
.size
,
15797 _("immediate out of range for shift"));
15798 /* Only encodes the 'U present' variant of the instruction.
15799 In this case, signed types have OP (bit 8) set to 0.
15800 Unsigned types have OP set to 1. */
15801 inst
.instruction
|= (et
.type
== NT_unsigned
) << 8;
15802 /* The rest of the bits are the same as other immediate shifts. */
15803 neon_imm_shift (FALSE
, 0, neon_quad (rs
), et
, imm
);
15807 do_neon_qmovn (void)
15809 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15810 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15811 /* Saturating move where operands can be signed or unsigned, and the
15812 destination has the same signedness. */
15813 NEON_ENCODE (INTEGER
, inst
);
15814 if (et
.type
== NT_unsigned
)
15815 inst
.instruction
|= 0xc0;
15817 inst
.instruction
|= 0x80;
15818 neon_two_same (0, 1, et
.size
/ 2);
15822 do_neon_qmovun (void)
15824 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15825 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15826 /* Saturating move with unsigned results. Operands must be signed. */
15827 NEON_ENCODE (INTEGER
, inst
);
15828 neon_two_same (0, 1, et
.size
/ 2);
15832 do_neon_rshift_sat_narrow (void)
15834 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15835 or unsigned. If operands are unsigned, results must also be unsigned. */
15836 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15837 N_EQK
| N_HLF
, N_SU_16_64
| N_KEY
);
15838 int imm
= inst
.operands
[2].imm
;
15839 /* This gets the bounds check, size encoding and immediate bits calculation
15843 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15844 VQMOVN.I<size> <Dd>, <Qm>. */
15847 inst
.operands
[2].present
= 0;
15848 inst
.instruction
= N_MNEM_vqmovn
;
15853 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15854 _("immediate out of range"));
15855 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, et
.size
- imm
);
15859 do_neon_rshift_sat_narrow_u (void)
15861 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15862 or unsigned. If operands are unsigned, results must also be unsigned. */
15863 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15864 N_EQK
| N_HLF
| N_UNS
, N_S16
| N_S32
| N_S64
| N_KEY
);
15865 int imm
= inst
.operands
[2].imm
;
15866 /* This gets the bounds check, size encoding and immediate bits calculation
15870 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15871 VQMOVUN.I<size> <Dd>, <Qm>. */
15874 inst
.operands
[2].present
= 0;
15875 inst
.instruction
= N_MNEM_vqmovun
;
15880 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15881 _("immediate out of range"));
15882 /* FIXME: The manual is kind of unclear about what value U should have in
15883 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15885 neon_imm_shift (TRUE
, 1, 0, et
, et
.size
- imm
);
15889 do_neon_movn (void)
15891 struct neon_type_el et
= neon_check_type (2, NS_DQ
,
15892 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15893 NEON_ENCODE (INTEGER
, inst
);
15894 neon_two_same (0, 1, et
.size
/ 2);
15898 do_neon_rshift_narrow (void)
15900 struct neon_type_el et
= neon_check_type (2, NS_DQI
,
15901 N_EQK
| N_HLF
, N_I16
| N_I32
| N_I64
| N_KEY
);
15902 int imm
= inst
.operands
[2].imm
;
15903 /* This gets the bounds check, size encoding and immediate bits calculation
15907 /* If immediate is zero then we are a pseudo-instruction for
15908 VMOVN.I<size> <Dd>, <Qm> */
15911 inst
.operands
[2].present
= 0;
15912 inst
.instruction
= N_MNEM_vmovn
;
15917 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
15918 _("immediate out of range for narrowing operation"));
15919 neon_imm_shift (FALSE
, 0, 0, et
, et
.size
- imm
);
15923 do_neon_shll (void)
15925 /* FIXME: Type checking when lengthening. */
15926 struct neon_type_el et
= neon_check_type (2, NS_QDI
,
15927 N_EQK
| N_DBL
, N_I8
| N_I16
| N_I32
| N_KEY
);
15928 unsigned imm
= inst
.operands
[2].imm
;
15930 if (imm
== et
.size
)
15932 /* Maximum shift variant. */
15933 NEON_ENCODE (INTEGER
, inst
);
15934 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
15935 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
15936 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
15937 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
15938 inst
.instruction
|= neon_logbits (et
.size
) << 18;
15940 neon_dp_fixup (&inst
);
15944 /* A more-specific type check for non-max versions. */
15945 et
= neon_check_type (2, NS_QDI
,
15946 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
15947 NEON_ENCODE (IMMED
, inst
);
15948 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, 0, et
, imm
);
15952 /* Check the various types for the VCVT instruction, and return which version
15953 the current instruction is. */
15955 #define CVT_FLAVOUR_VAR \
15956 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15957 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15958 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15959 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15960 /* Half-precision conversions. */ \
15961 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15962 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15963 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15964 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15965 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15966 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15967 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15968 Compared with single/double precision variants, only the co-processor \
15969 field is different, so the encoding flow is reused here. */ \
15970 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15971 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15972 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15973 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15974 /* VFP instructions. */ \
15975 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15976 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15977 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15978 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15979 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15980 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15981 /* VFP instructions with bitshift. */ \
15982 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15983 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15984 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15985 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15986 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15987 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15988 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15989 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15991 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15992 neon_cvt_flavour_##C,
15994 /* The different types of conversions we can do. */
15995 enum neon_cvt_flavour
15998 neon_cvt_flavour_invalid
,
15999 neon_cvt_flavour_first_fp
= neon_cvt_flavour_f32_f64
16004 static enum neon_cvt_flavour
16005 get_neon_cvt_flavour (enum neon_shape rs
)
16007 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
16008 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
16009 if (et.type != NT_invtype) \
16011 inst.error = NULL; \
16012 return (neon_cvt_flavour_##C); \
16015 struct neon_type_el et
;
16016 unsigned whole_reg
= (rs
== NS_FFI
|| rs
== NS_FD
|| rs
== NS_DF
16017 || rs
== NS_FF
) ? N_VFP
: 0;
16018 /* The instruction versions which take an immediate take one register
16019 argument, which is extended to the width of the full register. Thus the
16020 "source" and "destination" registers must have the same width. Hack that
16021 here by making the size equal to the key (wider, in this case) operand. */
16022 unsigned key
= (rs
== NS_QQI
|| rs
== NS_DDI
|| rs
== NS_FFI
) ? N_KEY
: 0;
16026 return neon_cvt_flavour_invalid
;
16041 /* Neon-syntax VFP conversions. */
16044 do_vfp_nsyn_cvt (enum neon_shape rs
, enum neon_cvt_flavour flavour
)
16046 const char *opname
= 0;
16048 if (rs
== NS_DDI
|| rs
== NS_QQI
|| rs
== NS_FFI
16049 || rs
== NS_FHI
|| rs
== NS_HFI
)
16051 /* Conversions with immediate bitshift. */
16052 const char *enc
[] =
16054 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
16060 if (flavour
< (int) ARRAY_SIZE (enc
))
16062 opname
= enc
[flavour
];
16063 constraint (inst
.operands
[0].reg
!= inst
.operands
[1].reg
,
16064 _("operands 0 and 1 must be the same register"));
16065 inst
.operands
[1] = inst
.operands
[2];
16066 memset (&inst
.operands
[2], '\0', sizeof (inst
.operands
[2]));
16071 /* Conversions without bitshift. */
16072 const char *enc
[] =
16074 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
16080 if (flavour
< (int) ARRAY_SIZE (enc
))
16081 opname
= enc
[flavour
];
16085 do_vfp_nsyn_opcode (opname
);
16087 /* ARMv8.2 fp16 VCVT instruction. */
16088 if (flavour
== neon_cvt_flavour_s32_f16
16089 || flavour
== neon_cvt_flavour_u32_f16
16090 || flavour
== neon_cvt_flavour_f16_u32
16091 || flavour
== neon_cvt_flavour_f16_s32
)
16092 do_scalar_fp16_v82_encode ();
16096 do_vfp_nsyn_cvtz (void)
16098 enum neon_shape rs
= neon_select_shape (NS_FH
, NS_FF
, NS_FD
, NS_NULL
);
16099 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16100 const char *enc
[] =
16102 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
16108 if (flavour
< (int) ARRAY_SIZE (enc
) && enc
[flavour
])
16109 do_vfp_nsyn_opcode (enc
[flavour
]);
16113 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour
,
16114 enum neon_cvt_mode mode
)
16119 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
16120 D register operands. */
16121 if (flavour
== neon_cvt_flavour_s32_f64
16122 || flavour
== neon_cvt_flavour_u32_f64
)
16123 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16126 if (flavour
== neon_cvt_flavour_s32_f16
16127 || flavour
== neon_cvt_flavour_u32_f16
)
16128 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
),
16131 set_it_insn_type (OUTSIDE_IT_INSN
);
16135 case neon_cvt_flavour_s32_f64
:
16139 case neon_cvt_flavour_s32_f32
:
16143 case neon_cvt_flavour_s32_f16
:
16147 case neon_cvt_flavour_u32_f64
:
16151 case neon_cvt_flavour_u32_f32
:
16155 case neon_cvt_flavour_u32_f16
:
16160 first_error (_("invalid instruction shape"));
16166 case neon_cvt_mode_a
: rm
= 0; break;
16167 case neon_cvt_mode_n
: rm
= 1; break;
16168 case neon_cvt_mode_p
: rm
= 2; break;
16169 case neon_cvt_mode_m
: rm
= 3; break;
16170 default: first_error (_("invalid rounding mode")); return;
16173 NEON_ENCODE (FPV8
, inst
);
16174 encode_arm_vfp_reg (inst
.operands
[0].reg
, VFP_REG_Sd
);
16175 encode_arm_vfp_reg (inst
.operands
[1].reg
, sz
== 1 ? VFP_REG_Dm
: VFP_REG_Sm
);
16176 inst
.instruction
|= sz
<< 8;
16178 /* ARMv8.2 fp16 VCVT instruction. */
16179 if (flavour
== neon_cvt_flavour_s32_f16
16180 ||flavour
== neon_cvt_flavour_u32_f16
)
16181 do_scalar_fp16_v82_encode ();
16182 inst
.instruction
|= op
<< 7;
16183 inst
.instruction
|= rm
<< 16;
16184 inst
.instruction
|= 0xf0000000;
16185 inst
.is_neon
= TRUE
;
16189 do_neon_cvt_1 (enum neon_cvt_mode mode
)
16191 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_FFI
, NS_DD
, NS_QQ
,
16192 NS_FD
, NS_DF
, NS_FF
, NS_QD
, NS_DQ
,
16193 NS_FH
, NS_HF
, NS_FHI
, NS_HFI
,
16195 enum neon_cvt_flavour flavour
= get_neon_cvt_flavour (rs
);
16197 if (flavour
== neon_cvt_flavour_invalid
)
16200 /* PR11109: Handle round-to-zero for VCVT conversions. */
16201 if (mode
== neon_cvt_mode_z
16202 && ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_vfp_v2
)
16203 && (flavour
== neon_cvt_flavour_s16_f16
16204 || flavour
== neon_cvt_flavour_u16_f16
16205 || flavour
== neon_cvt_flavour_s32_f32
16206 || flavour
== neon_cvt_flavour_u32_f32
16207 || flavour
== neon_cvt_flavour_s32_f64
16208 || flavour
== neon_cvt_flavour_u32_f64
)
16209 && (rs
== NS_FD
|| rs
== NS_FF
))
16211 do_vfp_nsyn_cvtz ();
16215 /* ARMv8.2 fp16 VCVT conversions. */
16216 if (mode
== neon_cvt_mode_z
16217 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16
)
16218 && (flavour
== neon_cvt_flavour_s32_f16
16219 || flavour
== neon_cvt_flavour_u32_f16
)
16222 do_vfp_nsyn_cvtz ();
16223 do_scalar_fp16_v82_encode ();
16227 /* VFP rather than Neon conversions. */
16228 if (flavour
>= neon_cvt_flavour_first_fp
)
16230 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16231 do_vfp_nsyn_cvt (rs
, flavour
);
16233 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16244 unsigned enctab
[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
16245 0x0000100, 0x1000100, 0x0, 0x1000000};
16247 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16250 /* Fixed-point conversion with #0 immediate is encoded as an
16251 integer conversion. */
16252 if (inst
.operands
[2].present
&& inst
.operands
[2].imm
== 0)
16254 NEON_ENCODE (IMMED
, inst
);
16255 if (flavour
!= neon_cvt_flavour_invalid
)
16256 inst
.instruction
|= enctab
[flavour
];
16257 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16258 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16259 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16260 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16261 inst
.instruction
|= neon_quad (rs
) << 6;
16262 inst
.instruction
|= 1 << 21;
16263 if (flavour
< neon_cvt_flavour_s16_f16
)
16265 inst
.instruction
|= 1 << 21;
16266 immbits
= 32 - inst
.operands
[2].imm
;
16267 inst
.instruction
|= immbits
<< 16;
16271 inst
.instruction
|= 3 << 20;
16272 immbits
= 16 - inst
.operands
[2].imm
;
16273 inst
.instruction
|= immbits
<< 16;
16274 inst
.instruction
&= ~(1 << 9);
16277 neon_dp_fixup (&inst
);
16283 if (mode
!= neon_cvt_mode_x
&& mode
!= neon_cvt_mode_z
)
16285 NEON_ENCODE (FLOAT
, inst
);
16286 set_it_insn_type (OUTSIDE_IT_INSN
);
16288 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
16291 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16292 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16293 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16294 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16295 inst
.instruction
|= neon_quad (rs
) << 6;
16296 inst
.instruction
|= (flavour
== neon_cvt_flavour_u16_f16
16297 || flavour
== neon_cvt_flavour_u32_f32
) << 7;
16298 inst
.instruction
|= mode
<< 8;
16299 if (flavour
== neon_cvt_flavour_u16_f16
16300 || flavour
== neon_cvt_flavour_s16_f16
)
16301 /* Mask off the original size bits and reencode them. */
16302 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff) | (1 << 18));
16305 inst
.instruction
|= 0xfc000000;
16307 inst
.instruction
|= 0xf0000000;
16313 unsigned enctab
[] = { 0x100, 0x180, 0x0, 0x080,
16314 0x100, 0x180, 0x0, 0x080};
16316 NEON_ENCODE (INTEGER
, inst
);
16318 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16321 if (flavour
!= neon_cvt_flavour_invalid
)
16322 inst
.instruction
|= enctab
[flavour
];
16324 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16325 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16326 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16327 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16328 inst
.instruction
|= neon_quad (rs
) << 6;
16329 if (flavour
>= neon_cvt_flavour_s16_f16
16330 && flavour
<= neon_cvt_flavour_f16_u16
)
16331 /* Half precision. */
16332 inst
.instruction
|= 1 << 18;
16334 inst
.instruction
|= 2 << 18;
16336 neon_dp_fixup (&inst
);
16341 /* Half-precision conversions for Advanced SIMD -- neon. */
16344 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16348 && (inst
.vectype
.el
[0].size
!= 16 || inst
.vectype
.el
[1].size
!= 32))
16350 as_bad (_("operand size must match register width"));
16355 && ((inst
.vectype
.el
[0].size
!= 32 || inst
.vectype
.el
[1].size
!= 16)))
16357 as_bad (_("operand size must match register width"));
16362 inst
.instruction
= 0x3b60600;
16364 inst
.instruction
= 0x3b60700;
16366 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16367 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16368 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16369 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16370 neon_dp_fixup (&inst
);
16374 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
16375 if (mode
== neon_cvt_mode_x
|| mode
== neon_cvt_mode_z
)
16376 do_vfp_nsyn_cvt (rs
, flavour
);
16378 do_vfp_nsyn_cvt_fpv8 (flavour
, mode
);
16383 do_neon_cvtr (void)
16385 do_neon_cvt_1 (neon_cvt_mode_x
);
16391 do_neon_cvt_1 (neon_cvt_mode_z
);
16395 do_neon_cvta (void)
16397 do_neon_cvt_1 (neon_cvt_mode_a
);
16401 do_neon_cvtn (void)
16403 do_neon_cvt_1 (neon_cvt_mode_n
);
16407 do_neon_cvtp (void)
16409 do_neon_cvt_1 (neon_cvt_mode_p
);
16413 do_neon_cvtm (void)
16415 do_neon_cvt_1 (neon_cvt_mode_m
);
16419 do_neon_cvttb_2 (bfd_boolean t
, bfd_boolean to
, bfd_boolean is_double
)
16422 mark_feature_used (&fpu_vfp_ext_armv8
);
16424 encode_arm_vfp_reg (inst
.operands
[0].reg
,
16425 (is_double
&& !to
) ? VFP_REG_Dd
: VFP_REG_Sd
);
16426 encode_arm_vfp_reg (inst
.operands
[1].reg
,
16427 (is_double
&& to
) ? VFP_REG_Dm
: VFP_REG_Sm
);
16428 inst
.instruction
|= to
? 0x10000 : 0;
16429 inst
.instruction
|= t
? 0x80 : 0;
16430 inst
.instruction
|= is_double
? 0x100 : 0;
16431 do_vfp_cond_or_thumb ();
16435 do_neon_cvttb_1 (bfd_boolean t
)
16437 enum neon_shape rs
= neon_select_shape (NS_HF
, NS_HD
, NS_FH
, NS_FF
, NS_FD
,
16438 NS_DF
, NS_DH
, NS_NULL
);
16442 else if (neon_check_type (2, rs
, N_F16
, N_F32
| N_VFP
).type
!= NT_invtype
)
16445 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/FALSE
);
16447 else if (neon_check_type (2, rs
, N_F32
| N_VFP
, N_F16
).type
!= NT_invtype
)
16450 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/FALSE
);
16452 else if (neon_check_type (2, rs
, N_F16
, N_F64
| N_VFP
).type
!= NT_invtype
)
16454 /* The VCVTB and VCVTT instructions with D-register operands
16455 don't work for SP only targets. */
16456 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16460 do_neon_cvttb_2 (t
, /*to=*/TRUE
, /*is_double=*/TRUE
);
16462 else if (neon_check_type (2, rs
, N_F64
| N_VFP
, N_F16
).type
!= NT_invtype
)
16464 /* The VCVTB and VCVTT instructions with D-register operands
16465 don't work for SP only targets. */
16466 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
16470 do_neon_cvttb_2 (t
, /*to=*/FALSE
, /*is_double=*/TRUE
);
16477 do_neon_cvtb (void)
16479 do_neon_cvttb_1 (FALSE
);
16484 do_neon_cvtt (void)
16486 do_neon_cvttb_1 (TRUE
);
16490 neon_move_immediate (void)
16492 enum neon_shape rs
= neon_select_shape (NS_DI
, NS_QI
, NS_NULL
);
16493 struct neon_type_el et
= neon_check_type (2, rs
,
16494 N_I8
| N_I16
| N_I32
| N_I64
| N_F32
| N_KEY
, N_EQK
);
16495 unsigned immlo
, immhi
= 0, immbits
;
16496 int op
, cmode
, float_p
;
16498 constraint (et
.type
== NT_invtype
,
16499 _("operand size must be specified for immediate VMOV"));
16501 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16502 op
= (inst
.instruction
& (1 << 5)) != 0;
16504 immlo
= inst
.operands
[1].imm
;
16505 if (inst
.operands
[1].regisimm
)
16506 immhi
= inst
.operands
[1].reg
;
16508 constraint (et
.size
< 32 && (immlo
& ~((1 << et
.size
) - 1)) != 0,
16509 _("immediate has bits set outside the operand size"));
16511 float_p
= inst
.operands
[1].immisfloat
;
16513 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
, &op
,
16514 et
.size
, et
.type
)) == FAIL
)
16516 /* Invert relevant bits only. */
16517 neon_invert_size (&immlo
, &immhi
, et
.size
);
16518 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16519 with one or the other; those cases are caught by
16520 neon_cmode_for_move_imm. */
16522 if ((cmode
= neon_cmode_for_move_imm (immlo
, immhi
, float_p
, &immbits
,
16523 &op
, et
.size
, et
.type
)) == FAIL
)
16525 first_error (_("immediate out of range"));
16530 inst
.instruction
&= ~(1 << 5);
16531 inst
.instruction
|= op
<< 5;
16533 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16534 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16535 inst
.instruction
|= neon_quad (rs
) << 6;
16536 inst
.instruction
|= cmode
<< 8;
16538 neon_write_immbits (immbits
);
16544 if (inst
.operands
[1].isreg
)
16546 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16548 NEON_ENCODE (INTEGER
, inst
);
16549 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16550 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16551 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16552 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16553 inst
.instruction
|= neon_quad (rs
) << 6;
16557 NEON_ENCODE (IMMED
, inst
);
16558 neon_move_immediate ();
16561 neon_dp_fixup (&inst
);
16564 /* Encode instructions of form:
16566 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16567 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16570 neon_mixed_length (struct neon_type_el et
, unsigned size
)
16572 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16573 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16574 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16575 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16576 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16577 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16578 inst
.instruction
|= (et
.type
== NT_unsigned
) << 24;
16579 inst
.instruction
|= neon_logbits (size
) << 20;
16581 neon_dp_fixup (&inst
);
16585 do_neon_dyadic_long (void)
16587 /* FIXME: Type checking for lengthening op. */
16588 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16589 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16590 neon_mixed_length (et
, et
.size
);
16594 do_neon_abal (void)
16596 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16597 N_EQK
| N_INT
| N_DBL
, N_EQK
, N_SU_32
| N_KEY
);
16598 neon_mixed_length (et
, et
.size
);
16602 neon_mac_reg_scalar_long (unsigned regtypes
, unsigned scalartypes
)
16604 if (inst
.operands
[2].isscalar
)
16606 struct neon_type_el et
= neon_check_type (3, NS_QDS
,
16607 N_EQK
| N_DBL
, N_EQK
, regtypes
| N_KEY
);
16608 NEON_ENCODE (SCALAR
, inst
);
16609 neon_mul_mac (et
, et
.type
== NT_unsigned
);
16613 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16614 N_EQK
| N_DBL
, N_EQK
, scalartypes
| N_KEY
);
16615 NEON_ENCODE (INTEGER
, inst
);
16616 neon_mixed_length (et
, et
.size
);
16621 do_neon_mac_maybe_scalar_long (void)
16623 neon_mac_reg_scalar_long (N_S16
| N_S32
| N_U16
| N_U32
, N_SU_32
);
16626 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16627 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16630 neon_scalar_for_fmac_fp16_long (unsigned scalar
, unsigned quad_p
)
16632 unsigned regno
= NEON_SCALAR_REG (scalar
);
16633 unsigned elno
= NEON_SCALAR_INDEX (scalar
);
16637 if (regno
> 7 || elno
> 3)
16640 return ((regno
& 0x7)
16641 | ((elno
& 0x1) << 3)
16642 | (((elno
>> 1) & 0x1) << 5));
16646 if (regno
> 15 || elno
> 1)
16649 return (((regno
& 0x1) << 5)
16650 | ((regno
>> 1) & 0x7)
16651 | ((elno
& 0x1) << 3));
16655 first_error (_("scalar out of range for multiply instruction"));
16660 do_neon_fmac_maybe_scalar_long (int subtype
)
16662 enum neon_shape rs
;
16664 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16665 field (bits[21:20]) has different meaning. For scalar index variant, it's
16666 used to differentiate add and subtract, otherwise it's with fixed value
16670 if (inst
.cond
!= COND_ALWAYS
)
16671 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16672 "behaviour is UNPREDICTABLE"));
16674 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_fp16_fml
),
16677 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
16680 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16681 be a scalar index register. */
16682 if (inst
.operands
[2].isscalar
)
16684 high8
= 0xfe000000;
16687 rs
= neon_select_shape (NS_DHS
, NS_QDS
, NS_NULL
);
16691 high8
= 0xfc000000;
16694 inst
.instruction
|= (0x1 << 23);
16695 rs
= neon_select_shape (NS_DHH
, NS_QDD
, NS_NULL
);
16698 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_F16
);
16700 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16701 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16702 so we simply pass -1 as size. */
16703 unsigned quad_p
= (rs
== NS_QDD
|| rs
== NS_QDS
);
16704 neon_three_same (quad_p
, 0, size
);
16706 /* Undo neon_dp_fixup. Redo the high eight bits. */
16707 inst
.instruction
&= 0x00ffffff;
16708 inst
.instruction
|= high8
;
16710 #define LOW1(R) ((R) & 0x1)
16711 #define HI4(R) (((R) >> 1) & 0xf)
16712 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16713 whether the instruction is in Q form and whether Vm is a scalar indexed
16715 if (inst
.operands
[2].isscalar
)
16718 = neon_scalar_for_fmac_fp16_long (inst
.operands
[2].reg
, quad_p
);
16719 inst
.instruction
&= 0xffffffd0;
16720 inst
.instruction
|= rm
;
16724 /* Redo Rn as well. */
16725 inst
.instruction
&= 0xfff0ff7f;
16726 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
16727 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
16732 /* Redo Rn and Rm. */
16733 inst
.instruction
&= 0xfff0ff50;
16734 inst
.instruction
|= HI4 (inst
.operands
[1].reg
) << 16;
16735 inst
.instruction
|= LOW1 (inst
.operands
[1].reg
) << 7;
16736 inst
.instruction
|= HI4 (inst
.operands
[2].reg
);
16737 inst
.instruction
|= LOW1 (inst
.operands
[2].reg
) << 5;
16742 do_neon_vfmal (void)
16744 return do_neon_fmac_maybe_scalar_long (0);
16748 do_neon_vfmsl (void)
16750 return do_neon_fmac_maybe_scalar_long (1);
16754 do_neon_dyadic_wide (void)
16756 struct neon_type_el et
= neon_check_type (3, NS_QQD
,
16757 N_EQK
| N_DBL
, N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
16758 neon_mixed_length (et
, et
.size
);
16762 do_neon_dyadic_narrow (void)
16764 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16765 N_EQK
| N_DBL
, N_EQK
, N_I16
| N_I32
| N_I64
| N_KEY
);
16766 /* Operand sign is unimportant, and the U bit is part of the opcode,
16767 so force the operand type to integer. */
16768 et
.type
= NT_integer
;
16769 neon_mixed_length (et
, et
.size
/ 2);
16773 do_neon_mul_sat_scalar_long (void)
16775 neon_mac_reg_scalar_long (N_S16
| N_S32
, N_S16
| N_S32
);
16779 do_neon_vmull (void)
16781 if (inst
.operands
[2].isscalar
)
16782 do_neon_mac_maybe_scalar_long ();
16785 struct neon_type_el et
= neon_check_type (3, NS_QDD
,
16786 N_EQK
| N_DBL
, N_EQK
, N_SU_32
| N_P8
| N_P64
| N_KEY
);
16788 if (et
.type
== NT_poly
)
16789 NEON_ENCODE (POLY
, inst
);
16791 NEON_ENCODE (INTEGER
, inst
);
16793 /* For polynomial encoding the U bit must be zero, and the size must
16794 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16795 obviously, as 0b10). */
16798 /* Check we're on the correct architecture. */
16799 if (!mark_feature_used (&fpu_crypto_ext_armv8
))
16801 _("Instruction form not available on this architecture.");
16806 neon_mixed_length (et
, et
.size
);
16813 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
16814 struct neon_type_el et
= neon_check_type (3, rs
,
16815 N_EQK
, N_EQK
, N_8
| N_16
| N_32
| N_64
| N_KEY
);
16816 unsigned imm
= (inst
.operands
[3].imm
* et
.size
) / 8;
16818 constraint (imm
>= (unsigned) (neon_quad (rs
) ? 16 : 8),
16819 _("shift out of range"));
16820 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16821 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16822 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16823 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16824 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
16825 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
16826 inst
.instruction
|= neon_quad (rs
) << 6;
16827 inst
.instruction
|= imm
<< 8;
16829 neon_dp_fixup (&inst
);
16835 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
16836 struct neon_type_el et
= neon_check_type (2, rs
,
16837 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16838 unsigned op
= (inst
.instruction
>> 7) & 3;
16839 /* N (width of reversed regions) is encoded as part of the bitmask. We
16840 extract it here to check the elements to be reversed are smaller.
16841 Otherwise we'd get a reserved instruction. */
16842 unsigned elsize
= (op
== 2) ? 16 : (op
== 1) ? 32 : (op
== 0) ? 64 : 0;
16843 gas_assert (elsize
!= 0);
16844 constraint (et
.size
>= elsize
,
16845 _("elements must be smaller than reversal region"));
16846 neon_two_same (neon_quad (rs
), 1, et
.size
);
16852 if (inst
.operands
[1].isscalar
)
16854 enum neon_shape rs
= neon_select_shape (NS_DS
, NS_QS
, NS_NULL
);
16855 struct neon_type_el et
= neon_check_type (2, rs
,
16856 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
16857 unsigned sizebits
= et
.size
>> 3;
16858 unsigned dm
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
16859 int logsize
= neon_logbits (et
.size
);
16860 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
) << logsize
;
16862 if (vfp_or_neon_is_neon (NEON_CHECK_CC
) == FAIL
)
16865 NEON_ENCODE (SCALAR
, inst
);
16866 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16867 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16868 inst
.instruction
|= LOW4 (dm
);
16869 inst
.instruction
|= HI1 (dm
) << 5;
16870 inst
.instruction
|= neon_quad (rs
) << 6;
16871 inst
.instruction
|= x
<< 17;
16872 inst
.instruction
|= sizebits
<< 16;
16874 neon_dp_fixup (&inst
);
16878 enum neon_shape rs
= neon_select_shape (NS_DR
, NS_QR
, NS_NULL
);
16879 struct neon_type_el et
= neon_check_type (2, rs
,
16880 N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
16881 /* Duplicate ARM register to lanes of vector. */
16882 NEON_ENCODE (ARMREG
, inst
);
16885 case 8: inst
.instruction
|= 0x400000; break;
16886 case 16: inst
.instruction
|= 0x000020; break;
16887 case 32: inst
.instruction
|= 0x000000; break;
16890 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
16891 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 16;
16892 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 7;
16893 inst
.instruction
|= neon_quad (rs
) << 21;
16894 /* The encoding for this instruction is identical for the ARM and Thumb
16895 variants, except for the condition field. */
16896 do_vfp_cond_or_thumb ();
16900 /* VMOV has particularly many variations. It can be one of:
16901 0. VMOV<c><q> <Qd>, <Qm>
16902 1. VMOV<c><q> <Dd>, <Dm>
16903 (Register operations, which are VORR with Rm = Rn.)
16904 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16905 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16907 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16908 (ARM register to scalar.)
16909 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16910 (Two ARM registers to vector.)
16911 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16912 (Scalar to ARM register.)
16913 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16914 (Vector to two ARM registers.)
16915 8. VMOV.F32 <Sd>, <Sm>
16916 9. VMOV.F64 <Dd>, <Dm>
16917 (VFP register moves.)
16918 10. VMOV.F32 <Sd>, #imm
16919 11. VMOV.F64 <Dd>, #imm
16920 (VFP float immediate load.)
16921 12. VMOV <Rd>, <Sm>
16922 (VFP single to ARM reg.)
16923 13. VMOV <Sd>, <Rm>
16924 (ARM reg to VFP single.)
16925 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16926 (Two ARM regs to two VFP singles.)
16927 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16928 (Two VFP singles to two ARM regs.)
16930 These cases can be disambiguated using neon_select_shape, except cases 1/9
16931 and 3/11 which depend on the operand type too.
16933 All the encoded bits are hardcoded by this function.
16935 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16936 Cases 5, 7 may be used with VFPv2 and above.
16938 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16939 can specify a type where it doesn't make sense to, and is ignored). */
16944 enum neon_shape rs
= neon_select_shape (NS_RRFF
, NS_FFRR
, NS_DRR
, NS_RRD
,
16945 NS_QQ
, NS_DD
, NS_QI
, NS_DI
, NS_SR
,
16946 NS_RS
, NS_FF
, NS_FI
, NS_RF
, NS_FR
,
16947 NS_HR
, NS_RH
, NS_HI
, NS_NULL
);
16948 struct neon_type_el et
;
16949 const char *ldconst
= 0;
16953 case NS_DD
: /* case 1/9. */
16954 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16955 /* It is not an error here if no type is given. */
16957 if (et
.type
== NT_float
&& et
.size
== 64)
16959 do_vfp_nsyn_opcode ("fcpyd");
16962 /* fall through. */
16964 case NS_QQ
: /* case 0/1. */
16966 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16968 /* The architecture manual I have doesn't explicitly state which
16969 value the U bit should have for register->register moves, but
16970 the equivalent VORR instruction has U = 0, so do that. */
16971 inst
.instruction
= 0x0200110;
16972 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
16973 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
16974 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
16975 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
16976 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
16977 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
16978 inst
.instruction
|= neon_quad (rs
) << 6;
16980 neon_dp_fixup (&inst
);
16984 case NS_DI
: /* case 3/11. */
16985 et
= neon_check_type (2, rs
, N_EQK
, N_F64
| N_KEY
);
16987 if (et
.type
== NT_float
&& et
.size
== 64)
16989 /* case 11 (fconstd). */
16990 ldconst
= "fconstd";
16991 goto encode_fconstd
;
16993 /* fall through. */
16995 case NS_QI
: /* case 2/3. */
16996 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH
) == FAIL
)
16998 inst
.instruction
= 0x0800010;
16999 neon_move_immediate ();
17000 neon_dp_fixup (&inst
);
17003 case NS_SR
: /* case 4. */
17005 unsigned bcdebits
= 0;
17007 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[0].reg
);
17008 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[0].reg
);
17010 /* .<size> is optional here, defaulting to .32. */
17011 if (inst
.vectype
.elems
== 0
17012 && inst
.operands
[0].vectype
.type
== NT_invtype
17013 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17015 inst
.vectype
.el
[0].type
= NT_untyped
;
17016 inst
.vectype
.el
[0].size
= 32;
17017 inst
.vectype
.elems
= 1;
17020 et
= neon_check_type (2, NS_NULL
, N_8
| N_16
| N_32
| N_KEY
, N_EQK
);
17021 logsize
= neon_logbits (et
.size
);
17023 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17025 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17026 && et
.size
!= 32, _(BAD_FPU
));
17027 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17028 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17032 case 8: bcdebits
= 0x8; break;
17033 case 16: bcdebits
= 0x1; break;
17034 case 32: bcdebits
= 0x0; break;
17038 bcdebits
|= x
<< logsize
;
17040 inst
.instruction
= 0xe000b10;
17041 do_vfp_cond_or_thumb ();
17042 inst
.instruction
|= LOW4 (dn
) << 16;
17043 inst
.instruction
|= HI1 (dn
) << 7;
17044 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17045 inst
.instruction
|= (bcdebits
& 3) << 5;
17046 inst
.instruction
|= (bcdebits
>> 2) << 21;
17050 case NS_DRR
: /* case 5 (fmdrr). */
17051 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17054 inst
.instruction
= 0xc400b10;
17055 do_vfp_cond_or_thumb ();
17056 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
);
17057 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 5;
17058 inst
.instruction
|= inst
.operands
[1].reg
<< 12;
17059 inst
.instruction
|= inst
.operands
[2].reg
<< 16;
17062 case NS_RS
: /* case 6. */
17065 unsigned dn
= NEON_SCALAR_REG (inst
.operands
[1].reg
);
17066 unsigned x
= NEON_SCALAR_INDEX (inst
.operands
[1].reg
);
17067 unsigned abcdebits
= 0;
17069 /* .<dt> is optional here, defaulting to .32. */
17070 if (inst
.vectype
.elems
== 0
17071 && inst
.operands
[0].vectype
.type
== NT_invtype
17072 && inst
.operands
[1].vectype
.type
== NT_invtype
)
17074 inst
.vectype
.el
[0].type
= NT_untyped
;
17075 inst
.vectype
.el
[0].size
= 32;
17076 inst
.vectype
.elems
= 1;
17079 et
= neon_check_type (2, NS_NULL
,
17080 N_EQK
, N_S8
| N_S16
| N_U8
| N_U16
| N_32
| N_KEY
);
17081 logsize
= neon_logbits (et
.size
);
17083 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v1
),
17085 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_v1
)
17086 && et
.size
!= 32, _(BAD_FPU
));
17087 constraint (et
.type
== NT_invtype
, _("bad type for scalar"));
17088 constraint (x
>= 64 / et
.size
, _("scalar index out of range"));
17092 case 8: abcdebits
= (et
.type
== NT_signed
) ? 0x08 : 0x18; break;
17093 case 16: abcdebits
= (et
.type
== NT_signed
) ? 0x01 : 0x11; break;
17094 case 32: abcdebits
= 0x00; break;
17098 abcdebits
|= x
<< logsize
;
17099 inst
.instruction
= 0xe100b10;
17100 do_vfp_cond_or_thumb ();
17101 inst
.instruction
|= LOW4 (dn
) << 16;
17102 inst
.instruction
|= HI1 (dn
) << 7;
17103 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17104 inst
.instruction
|= (abcdebits
& 3) << 5;
17105 inst
.instruction
|= (abcdebits
>> 2) << 21;
17109 case NS_RRD
: /* case 7 (fmrrd). */
17110 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_v2
),
17113 inst
.instruction
= 0xc500b10;
17114 do_vfp_cond_or_thumb ();
17115 inst
.instruction
|= inst
.operands
[0].reg
<< 12;
17116 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17117 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17118 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17121 case NS_FF
: /* case 8 (fcpys). */
17122 do_vfp_nsyn_opcode ("fcpys");
17126 case NS_FI
: /* case 10 (fconsts). */
17127 ldconst
= "fconsts";
17129 if (!inst
.operands
[1].immisfloat
)
17132 /* Immediate has to fit in 8 bits so float is enough. */
17133 float imm
= (float) inst
.operands
[1].imm
;
17134 memcpy (&new_imm
, &imm
, sizeof (float));
17135 /* But the assembly may have been written to provide an integer
17136 bit pattern that equates to a float, so check that the
17137 conversion has worked. */
17138 if (is_quarter_float (new_imm
))
17140 if (is_quarter_float (inst
.operands
[1].imm
))
17141 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
17143 inst
.operands
[1].imm
= new_imm
;
17144 inst
.operands
[1].immisfloat
= 1;
17148 if (is_quarter_float (inst
.operands
[1].imm
))
17150 inst
.operands
[1].imm
= neon_qfloat_bits (inst
.operands
[1].imm
);
17151 do_vfp_nsyn_opcode (ldconst
);
17153 /* ARMv8.2 fp16 vmov.f16 instruction. */
17155 do_scalar_fp16_v82_encode ();
17158 first_error (_("immediate out of range"));
17162 case NS_RF
: /* case 12 (fmrs). */
17163 do_vfp_nsyn_opcode ("fmrs");
17164 /* ARMv8.2 fp16 vmov.f16 instruction. */
17166 do_scalar_fp16_v82_encode ();
17170 case NS_FR
: /* case 13 (fmsr). */
17171 do_vfp_nsyn_opcode ("fmsr");
17172 /* ARMv8.2 fp16 vmov.f16 instruction. */
17174 do_scalar_fp16_v82_encode ();
17177 /* The encoders for the fmrrs and fmsrr instructions expect three operands
17178 (one of which is a list), but we have parsed four. Do some fiddling to
17179 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
17181 case NS_RRFF
: /* case 14 (fmrrs). */
17182 constraint (inst
.operands
[3].reg
!= inst
.operands
[2].reg
+ 1,
17183 _("VFP registers must be adjacent"));
17184 inst
.operands
[2].imm
= 2;
17185 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17186 do_vfp_nsyn_opcode ("fmrrs");
17189 case NS_FFRR
: /* case 15 (fmsrr). */
17190 constraint (inst
.operands
[1].reg
!= inst
.operands
[0].reg
+ 1,
17191 _("VFP registers must be adjacent"));
17192 inst
.operands
[1] = inst
.operands
[2];
17193 inst
.operands
[2] = inst
.operands
[3];
17194 inst
.operands
[0].imm
= 2;
17195 memset (&inst
.operands
[3], '\0', sizeof (inst
.operands
[3]));
17196 do_vfp_nsyn_opcode ("fmsrr");
17200 /* neon_select_shape has determined that the instruction
17201 shape is wrong and has already set the error message. */
17210 do_neon_rshift_round_imm (void)
17212 enum neon_shape rs
= neon_select_shape (NS_DDI
, NS_QQI
, NS_NULL
);
17213 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_ALL
| N_KEY
);
17214 int imm
= inst
.operands
[2].imm
;
17216 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
17219 inst
.operands
[2].present
= 0;
17224 constraint (imm
< 1 || (unsigned)imm
> et
.size
,
17225 _("immediate out of range for shift"));
17226 neon_imm_shift (TRUE
, et
.type
== NT_unsigned
, neon_quad (rs
), et
,
17231 do_neon_movhf (void)
17233 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_NULL
);
17234 constraint (rs
!= NS_HH
, _("invalid suffix"));
17236 if (inst
.cond
!= COND_ALWAYS
)
17240 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
17241 " the behaviour is UNPREDICTABLE"));
17245 inst
.error
= BAD_COND
;
17250 do_vfp_sp_monadic ();
17253 inst
.instruction
|= 0xf0000000;
17257 do_neon_movl (void)
17259 struct neon_type_el et
= neon_check_type (2, NS_QD
,
17260 N_EQK
| N_DBL
, N_SU_32
| N_KEY
);
17261 unsigned sizebits
= et
.size
>> 3;
17262 inst
.instruction
|= sizebits
<< 19;
17263 neon_two_same (0, et
.type
== NT_unsigned
, -1);
17269 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17270 struct neon_type_el et
= neon_check_type (2, rs
,
17271 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17272 NEON_ENCODE (INTEGER
, inst
);
17273 neon_two_same (neon_quad (rs
), 1, et
.size
);
17277 do_neon_zip_uzp (void)
17279 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17280 struct neon_type_el et
= neon_check_type (2, rs
,
17281 N_EQK
, N_8
| N_16
| N_32
| N_KEY
);
17282 if (rs
== NS_DD
&& et
.size
== 32)
17284 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
17285 inst
.instruction
= N_MNEM_vtrn
;
17289 neon_two_same (neon_quad (rs
), 1, et
.size
);
17293 do_neon_sat_abs_neg (void)
17295 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17296 struct neon_type_el et
= neon_check_type (2, rs
,
17297 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17298 neon_two_same (neon_quad (rs
), 1, et
.size
);
17302 do_neon_pair_long (void)
17304 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17305 struct neon_type_el et
= neon_check_type (2, rs
, N_EQK
, N_SU_32
| N_KEY
);
17306 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
17307 inst
.instruction
|= (et
.type
== NT_unsigned
) << 7;
17308 neon_two_same (neon_quad (rs
), 1, et
.size
);
17312 do_neon_recip_est (void)
17314 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17315 struct neon_type_el et
= neon_check_type (2, rs
,
17316 N_EQK
| N_FLT
, N_F_16_32
| N_U32
| N_KEY
);
17317 inst
.instruction
|= (et
.type
== NT_float
) << 8;
17318 neon_two_same (neon_quad (rs
), 1, et
.size
);
17324 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17325 struct neon_type_el et
= neon_check_type (2, rs
,
17326 N_EQK
, N_S8
| N_S16
| N_S32
| N_KEY
);
17327 neon_two_same (neon_quad (rs
), 1, et
.size
);
17333 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17334 struct neon_type_el et
= neon_check_type (2, rs
,
17335 N_EQK
, N_I8
| N_I16
| N_I32
| N_KEY
);
17336 neon_two_same (neon_quad (rs
), 1, et
.size
);
17342 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17343 struct neon_type_el et
= neon_check_type (2, rs
,
17344 N_EQK
| N_INT
, N_8
| N_KEY
);
17345 neon_two_same (neon_quad (rs
), 1, et
.size
);
17351 enum neon_shape rs
= neon_select_shape (NS_DD
, NS_QQ
, NS_NULL
);
17352 neon_two_same (neon_quad (rs
), 1, -1);
17356 do_neon_tbl_tbx (void)
17358 unsigned listlenbits
;
17359 neon_check_type (3, NS_DLD
, N_EQK
, N_EQK
, N_8
| N_KEY
);
17361 if (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 4)
17363 first_error (_("bad list length for table lookup"));
17367 listlenbits
= inst
.operands
[1].imm
- 1;
17368 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17369 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17370 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
17371 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
17372 inst
.instruction
|= LOW4 (inst
.operands
[2].reg
);
17373 inst
.instruction
|= HI1 (inst
.operands
[2].reg
) << 5;
17374 inst
.instruction
|= listlenbits
<< 8;
17376 neon_dp_fixup (&inst
);
17380 do_neon_ldm_stm (void)
17382 /* P, U and L bits are part of bitmask. */
17383 int is_dbmode
= (inst
.instruction
& (1 << 24)) != 0;
17384 unsigned offsetbits
= inst
.operands
[1].imm
* 2;
17386 if (inst
.operands
[1].issingle
)
17388 do_vfp_nsyn_ldm_stm (is_dbmode
);
17392 constraint (is_dbmode
&& !inst
.operands
[0].writeback
,
17393 _("writeback (!) must be used for VLDMDB and VSTMDB"));
17395 constraint (inst
.operands
[1].imm
< 1 || inst
.operands
[1].imm
> 16,
17396 _("register list must contain at least 1 and at most 16 "
17399 inst
.instruction
|= inst
.operands
[0].reg
<< 16;
17400 inst
.instruction
|= inst
.operands
[0].writeback
<< 21;
17401 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 12;
17402 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 22;
17404 inst
.instruction
|= offsetbits
;
17406 do_vfp_cond_or_thumb ();
17410 do_neon_ldr_str (void)
17412 int is_ldr
= (inst
.instruction
& (1 << 20)) != 0;
17414 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
17415 And is UNPREDICTABLE in thumb mode. */
17417 && inst
.operands
[1].reg
== REG_PC
17418 && (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v7
) || thumb_mode
))
17421 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17422 else if (warn_on_deprecated
)
17423 as_tsktsk (_("Use of PC here is deprecated"));
17426 if (inst
.operands
[0].issingle
)
17429 do_vfp_nsyn_opcode ("flds");
17431 do_vfp_nsyn_opcode ("fsts");
17433 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17434 if (inst
.vectype
.el
[0].size
== 16)
17435 do_scalar_fp16_v82_encode ();
17440 do_vfp_nsyn_opcode ("fldd");
17442 do_vfp_nsyn_opcode ("fstd");
17447 do_t_vldr_vstr_sysreg (void)
17449 int fp_vldr_bitno
= 20, sysreg_vldr_bitno
= 20;
17450 bfd_boolean is_vldr
= ((inst
.instruction
& (1 << fp_vldr_bitno
)) != 0);
17452 /* Use of PC is UNPREDICTABLE. */
17453 if (inst
.operands
[1].reg
== REG_PC
)
17454 inst
.error
= _("Use of PC here is UNPREDICTABLE");
17456 if (inst
.operands
[1].immisreg
)
17457 inst
.error
= _("instruction does not accept register index");
17459 if (!inst
.operands
[1].isreg
)
17460 inst
.error
= _("instruction does not accept PC-relative addressing");
17462 if (abs (inst
.operands
[1].imm
) >= (1 << 7))
17463 inst
.error
= _("immediate value out of range");
17465 inst
.instruction
= 0xec000f80;
17467 inst
.instruction
|= 1 << sysreg_vldr_bitno
;
17468 encode_arm_cp_address (1, TRUE
, FALSE
, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
);
17469 inst
.instruction
|= (inst
.operands
[0].imm
& 0x7) << 13;
17470 inst
.instruction
|= (inst
.operands
[0].imm
& 0x8) << 19;
17474 do_vldr_vstr (void)
17476 bfd_boolean sysreg_op
= !inst
.operands
[0].isreg
;
17478 /* VLDR/VSTR (System Register). */
17481 if (!mark_feature_used (&arm_ext_v8_1m_main
))
17482 as_bad (_("Instruction not permitted on this architecture"));
17484 do_t_vldr_vstr_sysreg ();
17489 if (!mark_feature_used (&fpu_vfp_ext_v1xd
))
17490 as_bad (_("Instruction not permitted on this architecture"));
17491 do_neon_ldr_str ();
17495 /* "interleave" version also handles non-interleaving register VLD1/VST1
17499 do_neon_ld_st_interleave (void)
17501 struct neon_type_el et
= neon_check_type (1, NS_NULL
,
17502 N_8
| N_16
| N_32
| N_64
);
17503 unsigned alignbits
= 0;
17505 /* The bits in this table go:
17506 0: register stride of one (0) or two (1)
17507 1,2: register list length, minus one (1, 2, 3, 4).
17508 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17509 We use -1 for invalid entries. */
17510 const int typetable
[] =
17512 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17513 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17514 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17515 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17519 if (et
.type
== NT_invtype
)
17522 if (inst
.operands
[1].immisalign
)
17523 switch (inst
.operands
[1].imm
>> 8)
17525 case 64: alignbits
= 1; break;
17527 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2
17528 && NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17529 goto bad_alignment
;
17533 if (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4)
17534 goto bad_alignment
;
17539 first_error (_("bad alignment"));
17543 inst
.instruction
|= alignbits
<< 4;
17544 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17546 /* Bits [4:6] of the immediate in a list specifier encode register stride
17547 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17548 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17549 up the right value for "type" in a table based on this value and the given
17550 list style, then stick it back. */
17551 idx
= ((inst
.operands
[0].imm
>> 4) & 7)
17552 | (((inst
.instruction
>> 8) & 3) << 3);
17554 typebits
= typetable
[idx
];
17556 constraint (typebits
== -1, _("bad list type for instruction"));
17557 constraint (((inst
.instruction
>> 8) & 3) && et
.size
== 64,
17558 _("bad element type for instruction"));
17560 inst
.instruction
&= ~0xf00;
17561 inst
.instruction
|= typebits
<< 8;
17564 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17565 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17566 otherwise. The variable arguments are a list of pairs of legal (size, align)
17567 values, terminated with -1. */
17570 neon_alignment_bit (int size
, int align
, int *do_alignment
, ...)
17573 int result
= FAIL
, thissize
, thisalign
;
17575 if (!inst
.operands
[1].immisalign
)
17581 va_start (ap
, do_alignment
);
17585 thissize
= va_arg (ap
, int);
17586 if (thissize
== -1)
17588 thisalign
= va_arg (ap
, int);
17590 if (size
== thissize
&& align
== thisalign
)
17593 while (result
!= SUCCESS
);
17597 if (result
== SUCCESS
)
17600 first_error (_("unsupported alignment for instruction"));
17606 do_neon_ld_st_lane (void)
17608 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17609 int align_good
, do_alignment
= 0;
17610 int logsize
= neon_logbits (et
.size
);
17611 int align
= inst
.operands
[1].imm
>> 8;
17612 int n
= (inst
.instruction
>> 8) & 3;
17613 int max_el
= 64 / et
.size
;
17615 if (et
.type
== NT_invtype
)
17618 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != n
+ 1,
17619 _("bad list length"));
17620 constraint (NEON_LANE (inst
.operands
[0].imm
) >= max_el
,
17621 _("scalar index out of range"));
17622 constraint (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2
17624 _("stride of 2 unavailable when element size is 8"));
17628 case 0: /* VLD1 / VST1. */
17629 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 16, 16,
17631 if (align_good
== FAIL
)
17635 unsigned alignbits
= 0;
17638 case 16: alignbits
= 0x1; break;
17639 case 32: alignbits
= 0x3; break;
17642 inst
.instruction
|= alignbits
<< 4;
17646 case 1: /* VLD2 / VST2. */
17647 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 16,
17648 16, 32, 32, 64, -1);
17649 if (align_good
== FAIL
)
17652 inst
.instruction
|= 1 << 4;
17655 case 2: /* VLD3 / VST3. */
17656 constraint (inst
.operands
[1].immisalign
,
17657 _("can't use alignment with this instruction"));
17660 case 3: /* VLD4 / VST4. */
17661 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17662 16, 64, 32, 64, 32, 128, -1);
17663 if (align_good
== FAIL
)
17667 unsigned alignbits
= 0;
17670 case 8: alignbits
= 0x1; break;
17671 case 16: alignbits
= 0x1; break;
17672 case 32: alignbits
= (align
== 64) ? 0x1 : 0x2; break;
17675 inst
.instruction
|= alignbits
<< 4;
17682 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17683 if (n
!= 0 && NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17684 inst
.instruction
|= 1 << (4 + logsize
);
17686 inst
.instruction
|= NEON_LANE (inst
.operands
[0].imm
) << (logsize
+ 5);
17687 inst
.instruction
|= logsize
<< 10;
17690 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17693 do_neon_ld_dup (void)
17695 struct neon_type_el et
= neon_check_type (1, NS_NULL
, N_8
| N_16
| N_32
);
17696 int align_good
, do_alignment
= 0;
17698 if (et
.type
== NT_invtype
)
17701 switch ((inst
.instruction
>> 8) & 3)
17703 case 0: /* VLD1. */
17704 gas_assert (NEON_REG_STRIDE (inst
.operands
[0].imm
) != 2);
17705 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17706 &do_alignment
, 16, 16, 32, 32, -1);
17707 if (align_good
== FAIL
)
17709 switch (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
))
17712 case 2: inst
.instruction
|= 1 << 5; break;
17713 default: first_error (_("bad list length")); return;
17715 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17718 case 1: /* VLD2. */
17719 align_good
= neon_alignment_bit (et
.size
, inst
.operands
[1].imm
>> 8,
17720 &do_alignment
, 8, 16, 16, 32, 32, 64,
17722 if (align_good
== FAIL
)
17724 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 2,
17725 _("bad list length"));
17726 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17727 inst
.instruction
|= 1 << 5;
17728 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17731 case 2: /* VLD3. */
17732 constraint (inst
.operands
[1].immisalign
,
17733 _("can't use alignment with this instruction"));
17734 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 3,
17735 _("bad list length"));
17736 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17737 inst
.instruction
|= 1 << 5;
17738 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17741 case 3: /* VLD4. */
17743 int align
= inst
.operands
[1].imm
>> 8;
17744 align_good
= neon_alignment_bit (et
.size
, align
, &do_alignment
, 8, 32,
17745 16, 64, 32, 64, 32, 128, -1);
17746 if (align_good
== FAIL
)
17748 constraint (NEON_REGLIST_LENGTH (inst
.operands
[0].imm
) != 4,
17749 _("bad list length"));
17750 if (NEON_REG_STRIDE (inst
.operands
[0].imm
) == 2)
17751 inst
.instruction
|= 1 << 5;
17752 if (et
.size
== 32 && align
== 128)
17753 inst
.instruction
|= 0x3 << 6;
17755 inst
.instruction
|= neon_logbits (et
.size
) << 6;
17762 inst
.instruction
|= do_alignment
<< 4;
17765 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17766 apart from bits [11:4]. */
17769 do_neon_ldx_stx (void)
17771 if (inst
.operands
[1].isreg
)
17772 constraint (inst
.operands
[1].reg
== REG_PC
, BAD_PC
);
17774 switch (NEON_LANE (inst
.operands
[0].imm
))
17776 case NEON_INTERLEAVE_LANES
:
17777 NEON_ENCODE (INTERLV
, inst
);
17778 do_neon_ld_st_interleave ();
17781 case NEON_ALL_LANES
:
17782 NEON_ENCODE (DUP
, inst
);
17783 if (inst
.instruction
== N_INV
)
17785 first_error ("only loads support such operands");
17792 NEON_ENCODE (LANE
, inst
);
17793 do_neon_ld_st_lane ();
17796 /* L bit comes from bit mask. */
17797 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17798 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17799 inst
.instruction
|= inst
.operands
[1].reg
<< 16;
17801 if (inst
.operands
[1].postind
)
17803 int postreg
= inst
.operands
[1].imm
& 0xf;
17804 constraint (!inst
.operands
[1].immisreg
,
17805 _("post-index must be a register"));
17806 constraint (postreg
== 0xd || postreg
== 0xf,
17807 _("bad register for post-index"));
17808 inst
.instruction
|= postreg
;
17812 constraint (inst
.operands
[1].immisreg
, BAD_ADDR_MODE
);
17813 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
17814 || inst
.relocs
[0].exp
.X_add_number
!= 0,
17817 if (inst
.operands
[1].writeback
)
17819 inst
.instruction
|= 0xd;
17822 inst
.instruction
|= 0xf;
17826 inst
.instruction
|= 0xf9000000;
17828 inst
.instruction
|= 0xf4000000;
17833 do_vfp_nsyn_fpv8 (enum neon_shape rs
)
17835 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17836 D register operands. */
17837 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17838 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17841 NEON_ENCODE (FPV8
, inst
);
17843 if (rs
== NS_FFF
|| rs
== NS_HHH
)
17845 do_vfp_sp_dyadic ();
17847 /* ARMv8.2 fp16 instruction. */
17849 do_scalar_fp16_v82_encode ();
17852 do_vfp_dp_rd_rn_rm ();
17855 inst
.instruction
|= 0x100;
17857 inst
.instruction
|= 0xf0000000;
17863 set_it_insn_type (OUTSIDE_IT_INSN
);
17865 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) != SUCCESS
)
17866 first_error (_("invalid instruction shape"));
17872 set_it_insn_type (OUTSIDE_IT_INSN
);
17874 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8
) == SUCCESS
)
17877 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17880 neon_dyadic_misc (NT_untyped
, N_F_16_32
, 0);
17884 do_vrint_1 (enum neon_cvt_mode mode
)
17886 enum neon_shape rs
= neon_select_shape (NS_HH
, NS_FF
, NS_DD
, NS_QQ
, NS_NULL
);
17887 struct neon_type_el et
;
17892 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17893 D register operands. */
17894 if (neon_shape_class
[rs
] == SC_DOUBLE
)
17895 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
17898 et
= neon_check_type (2, rs
, N_EQK
| N_VFP
, N_F_ALL
| N_KEY
17900 if (et
.type
!= NT_invtype
)
17902 /* VFP encodings. */
17903 if (mode
== neon_cvt_mode_a
|| mode
== neon_cvt_mode_n
17904 || mode
== neon_cvt_mode_p
|| mode
== neon_cvt_mode_m
)
17905 set_it_insn_type (OUTSIDE_IT_INSN
);
17907 NEON_ENCODE (FPV8
, inst
);
17908 if (rs
== NS_FF
|| rs
== NS_HH
)
17909 do_vfp_sp_monadic ();
17911 do_vfp_dp_rd_rm ();
17915 case neon_cvt_mode_r
: inst
.instruction
|= 0x00000000; break;
17916 case neon_cvt_mode_z
: inst
.instruction
|= 0x00000080; break;
17917 case neon_cvt_mode_x
: inst
.instruction
|= 0x00010000; break;
17918 case neon_cvt_mode_a
: inst
.instruction
|= 0xf0000000; break;
17919 case neon_cvt_mode_n
: inst
.instruction
|= 0xf0010000; break;
17920 case neon_cvt_mode_p
: inst
.instruction
|= 0xf0020000; break;
17921 case neon_cvt_mode_m
: inst
.instruction
|= 0xf0030000; break;
17925 inst
.instruction
|= (rs
== NS_DD
) << 8;
17926 do_vfp_cond_or_thumb ();
17928 /* ARMv8.2 fp16 vrint instruction. */
17930 do_scalar_fp16_v82_encode ();
17934 /* Neon encodings (or something broken...). */
17936 et
= neon_check_type (2, rs
, N_EQK
, N_F_16_32
| N_KEY
);
17938 if (et
.type
== NT_invtype
)
17941 set_it_insn_type (OUTSIDE_IT_INSN
);
17942 NEON_ENCODE (FLOAT
, inst
);
17944 if (vfp_or_neon_is_neon (NEON_CHECK_CC
| NEON_CHECK_ARCH8
) == FAIL
)
17947 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
17948 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
17949 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
17950 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
17951 inst
.instruction
|= neon_quad (rs
) << 6;
17952 /* Mask off the original size bits and reencode them. */
17953 inst
.instruction
= ((inst
.instruction
& 0xfff3ffff)
17954 | neon_logbits (et
.size
) << 18);
17958 case neon_cvt_mode_z
: inst
.instruction
|= 3 << 7; break;
17959 case neon_cvt_mode_x
: inst
.instruction
|= 1 << 7; break;
17960 case neon_cvt_mode_a
: inst
.instruction
|= 2 << 7; break;
17961 case neon_cvt_mode_n
: inst
.instruction
|= 0 << 7; break;
17962 case neon_cvt_mode_p
: inst
.instruction
|= 7 << 7; break;
17963 case neon_cvt_mode_m
: inst
.instruction
|= 5 << 7; break;
17964 case neon_cvt_mode_r
: inst
.error
= _("invalid rounding mode"); break;
17969 inst
.instruction
|= 0xfc000000;
17971 inst
.instruction
|= 0xf0000000;
17978 do_vrint_1 (neon_cvt_mode_x
);
17984 do_vrint_1 (neon_cvt_mode_z
);
17990 do_vrint_1 (neon_cvt_mode_r
);
17996 do_vrint_1 (neon_cvt_mode_a
);
18002 do_vrint_1 (neon_cvt_mode_n
);
18008 do_vrint_1 (neon_cvt_mode_p
);
18014 do_vrint_1 (neon_cvt_mode_m
);
18018 neon_scalar_for_vcmla (unsigned opnd
, unsigned elsize
)
18020 unsigned regno
= NEON_SCALAR_REG (opnd
);
18021 unsigned elno
= NEON_SCALAR_INDEX (opnd
);
18023 if (elsize
== 16 && elno
< 2 && regno
< 16)
18024 return regno
| (elno
<< 4);
18025 else if (elsize
== 32 && elno
== 0)
18028 first_error (_("scalar out of range"));
18035 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18037 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18038 _("expression too complex"));
18039 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18040 constraint (rot
!= 0 && rot
!= 90 && rot
!= 180 && rot
!= 270,
18041 _("immediate out of range"));
18043 if (inst
.operands
[2].isscalar
)
18045 enum neon_shape rs
= neon_select_shape (NS_DDSI
, NS_QQSI
, NS_NULL
);
18046 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18047 N_KEY
| N_F16
| N_F32
).size
;
18048 unsigned m
= neon_scalar_for_vcmla (inst
.operands
[2].reg
, size
);
18050 inst
.instruction
= 0xfe000800;
18051 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18052 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18053 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
) << 16;
18054 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 7;
18055 inst
.instruction
|= LOW4 (m
);
18056 inst
.instruction
|= HI1 (m
) << 5;
18057 inst
.instruction
|= neon_quad (rs
) << 6;
18058 inst
.instruction
|= rot
<< 20;
18059 inst
.instruction
|= (size
== 32) << 23;
18063 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18064 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18065 N_KEY
| N_F16
| N_F32
).size
;
18066 neon_three_same (neon_quad (rs
), 0, -1);
18067 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18068 inst
.instruction
|= 0xfc200800;
18069 inst
.instruction
|= rot
<< 23;
18070 inst
.instruction
|= (size
== 32) << 20;
18077 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18079 constraint (inst
.relocs
[0].exp
.X_op
!= O_constant
,
18080 _("expression too complex"));
18081 unsigned rot
= inst
.relocs
[0].exp
.X_add_number
;
18082 constraint (rot
!= 90 && rot
!= 270, _("immediate out of range"));
18083 enum neon_shape rs
= neon_select_shape (NS_DDDI
, NS_QQQI
, NS_NULL
);
18084 unsigned size
= neon_check_type (3, rs
, N_EQK
, N_EQK
,
18085 N_KEY
| N_F16
| N_F32
).size
;
18086 neon_three_same (neon_quad (rs
), 0, -1);
18087 inst
.instruction
&= 0x00ffffff; /* Undo neon_dp_fixup. */
18088 inst
.instruction
|= 0xfc800800;
18089 inst
.instruction
|= (rot
== 270) << 24;
18090 inst
.instruction
|= (size
== 32) << 20;
18093 /* Dot Product instructions encoding support. */
18096 do_neon_dotproduct (int unsigned_p
)
18098 enum neon_shape rs
;
18099 unsigned scalar_oprd2
= 0;
18102 if (inst
.cond
!= COND_ALWAYS
)
18103 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
18104 "is UNPREDICTABLE"));
18106 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_neon_ext_armv8
),
18109 /* Dot Product instructions are in three-same D/Q register format or the third
18110 operand can be a scalar index register. */
18111 if (inst
.operands
[2].isscalar
)
18113 scalar_oprd2
= neon_scalar_for_mul (inst
.operands
[2].reg
, 32);
18114 high8
= 0xfe000000;
18115 rs
= neon_select_shape (NS_DDS
, NS_QQS
, NS_NULL
);
18119 high8
= 0xfc000000;
18120 rs
= neon_select_shape (NS_DDD
, NS_QQQ
, NS_NULL
);
18124 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_U8
);
18126 neon_check_type (3, rs
, N_EQK
, N_EQK
, N_KEY
| N_S8
);
18128 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
18129 Product instruction, so we pass 0 as the "ubit" parameter. And the
18130 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
18131 neon_three_same (neon_quad (rs
), 0, 32);
18133 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
18134 different NEON three-same encoding. */
18135 inst
.instruction
&= 0x00ffffff;
18136 inst
.instruction
|= high8
;
18137 /* Encode 'U' bit which indicates signedness. */
18138 inst
.instruction
|= (unsigned_p
? 1 : 0) << 4;
18139 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
18140 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
18141 the instruction encoding. */
18142 if (inst
.operands
[2].isscalar
)
18144 inst
.instruction
&= 0xffffffd0;
18145 inst
.instruction
|= LOW4 (scalar_oprd2
);
18146 inst
.instruction
|= HI1 (scalar_oprd2
) << 5;
18150 /* Dot Product instructions for signed integer. */
18153 do_neon_dotproduct_s (void)
18155 return do_neon_dotproduct (0);
18158 /* Dot Product instructions for unsigned integer. */
18161 do_neon_dotproduct_u (void)
18163 return do_neon_dotproduct (1);
18166 /* Crypto v1 instructions. */
18168 do_crypto_2op_1 (unsigned elttype
, int op
)
18170 set_it_insn_type (OUTSIDE_IT_INSN
);
18172 if (neon_check_type (2, NS_QQ
, N_EQK
| N_UNT
, elttype
| N_UNT
| N_KEY
).type
18178 NEON_ENCODE (INTEGER
, inst
);
18179 inst
.instruction
|= LOW4 (inst
.operands
[0].reg
) << 12;
18180 inst
.instruction
|= HI1 (inst
.operands
[0].reg
) << 22;
18181 inst
.instruction
|= LOW4 (inst
.operands
[1].reg
);
18182 inst
.instruction
|= HI1 (inst
.operands
[1].reg
) << 5;
18184 inst
.instruction
|= op
<< 6;
18187 inst
.instruction
|= 0xfc000000;
18189 inst
.instruction
|= 0xf0000000;
18193 do_crypto_3op_1 (int u
, int op
)
18195 set_it_insn_type (OUTSIDE_IT_INSN
);
18197 if (neon_check_type (3, NS_QQQ
, N_EQK
| N_UNT
, N_EQK
| N_UNT
,
18198 N_32
| N_UNT
| N_KEY
).type
== NT_invtype
)
18203 NEON_ENCODE (INTEGER
, inst
);
18204 neon_three_same (1, u
, 8 << op
);
18210 do_crypto_2op_1 (N_8
, 0);
18216 do_crypto_2op_1 (N_8
, 1);
18222 do_crypto_2op_1 (N_8
, 2);
18228 do_crypto_2op_1 (N_8
, 3);
18234 do_crypto_3op_1 (0, 0);
18240 do_crypto_3op_1 (0, 1);
18246 do_crypto_3op_1 (0, 2);
18252 do_crypto_3op_1 (0, 3);
18258 do_crypto_3op_1 (1, 0);
18264 do_crypto_3op_1 (1, 1);
18268 do_sha256su1 (void)
18270 do_crypto_3op_1 (1, 2);
18276 do_crypto_2op_1 (N_32
, -1);
18282 do_crypto_2op_1 (N_32
, 0);
18286 do_sha256su0 (void)
18288 do_crypto_2op_1 (N_32
, 1);
18292 do_crc32_1 (unsigned int poly
, unsigned int sz
)
18294 unsigned int Rd
= inst
.operands
[0].reg
;
18295 unsigned int Rn
= inst
.operands
[1].reg
;
18296 unsigned int Rm
= inst
.operands
[2].reg
;
18298 set_it_insn_type (OUTSIDE_IT_INSN
);
18299 inst
.instruction
|= LOW4 (Rd
) << (thumb_mode
? 8 : 12);
18300 inst
.instruction
|= LOW4 (Rn
) << 16;
18301 inst
.instruction
|= LOW4 (Rm
);
18302 inst
.instruction
|= sz
<< (thumb_mode
? 4 : 21);
18303 inst
.instruction
|= poly
<< (thumb_mode
? 20 : 9);
18305 if (Rd
== REG_PC
|| Rn
== REG_PC
|| Rm
== REG_PC
)
18306 as_warn (UNPRED_REG ("r15"));
18348 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_vfp_ext_armv8
),
18350 neon_check_type (2, NS_FD
, N_S32
, N_F64
);
18351 do_vfp_sp_dp_cvt ();
18352 do_vfp_cond_or_thumb ();
18356 /* Overall per-instruction processing. */
18358 /* We need to be able to fix up arbitrary expressions in some statements.
18359 This is so that we can handle symbols that are an arbitrary distance from
18360 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
18361 which returns part of an address in a form which will be valid for
18362 a data instruction. We do this by pushing the expression into a symbol
18363 in the expr_section, and creating a fix for that. */
18366 fix_new_arm (fragS
* frag
,
18380 /* Create an absolute valued symbol, so we have something to
18381 refer to in the object file. Unfortunately for us, gas's
18382 generic expression parsing will already have folded out
18383 any use of .set foo/.type foo %function that may have
18384 been used to set type information of the target location,
18385 that's being specified symbolically. We have to presume
18386 the user knows what they are doing. */
18390 sprintf (name
, "*ABS*0x%lx", (unsigned long)exp
->X_add_number
);
18392 symbol
= symbol_find_or_make (name
);
18393 S_SET_SEGMENT (symbol
, absolute_section
);
18394 symbol_set_frag (symbol
, &zero_address_frag
);
18395 S_SET_VALUE (symbol
, exp
->X_add_number
);
18396 exp
->X_op
= O_symbol
;
18397 exp
->X_add_symbol
= symbol
;
18398 exp
->X_add_number
= 0;
18404 new_fix
= fix_new_exp (frag
, where
, size
, exp
, pc_rel
,
18405 (enum bfd_reloc_code_real
) reloc
);
18409 new_fix
= (fixS
*) fix_new (frag
, where
, size
, make_expr_symbol (exp
), 0,
18410 pc_rel
, (enum bfd_reloc_code_real
) reloc
);
18414 /* Mark whether the fix is to a THUMB instruction, or an ARM
18416 new_fix
->tc_fix_data
= thumb_mode
;
18419 /* Create a frg for an instruction requiring relaxation. */
18421 output_relax_insn (void)
18427 /* The size of the instruction is unknown, so tie the debug info to the
18428 start of the instruction. */
18429 dwarf2_emit_insn (0);
18431 switch (inst
.relocs
[0].exp
.X_op
)
18434 sym
= inst
.relocs
[0].exp
.X_add_symbol
;
18435 offset
= inst
.relocs
[0].exp
.X_add_number
;
18439 offset
= inst
.relocs
[0].exp
.X_add_number
;
18442 sym
= make_expr_symbol (&inst
.relocs
[0].exp
);
18446 to
= frag_var (rs_machine_dependent
, INSN_SIZE
, THUMB_SIZE
,
18447 inst
.relax
, sym
, offset
, NULL
/*offset, opcode*/);
18448 md_number_to_chars (to
, inst
.instruction
, THUMB_SIZE
);
18451 /* Write a 32-bit thumb instruction to buf. */
18453 put_thumb32_insn (char * buf
, unsigned long insn
)
18455 md_number_to_chars (buf
, insn
>> 16, THUMB_SIZE
);
18456 md_number_to_chars (buf
+ THUMB_SIZE
, insn
, THUMB_SIZE
);
18460 output_inst (const char * str
)
18466 as_bad ("%s -- `%s'", inst
.error
, str
);
18471 output_relax_insn ();
18474 if (inst
.size
== 0)
18477 to
= frag_more (inst
.size
);
18478 /* PR 9814: Record the thumb mode into the current frag so that we know
18479 what type of NOP padding to use, if necessary. We override any previous
18480 setting so that if the mode has changed then the NOPS that we use will
18481 match the encoding of the last instruction in the frag. */
18482 frag_now
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
18484 if (thumb_mode
&& (inst
.size
> THUMB_SIZE
))
18486 gas_assert (inst
.size
== (2 * THUMB_SIZE
));
18487 put_thumb32_insn (to
, inst
.instruction
);
18489 else if (inst
.size
> INSN_SIZE
)
18491 gas_assert (inst
.size
== (2 * INSN_SIZE
));
18492 md_number_to_chars (to
, inst
.instruction
, INSN_SIZE
);
18493 md_number_to_chars (to
+ INSN_SIZE
, inst
.instruction
, INSN_SIZE
);
18496 md_number_to_chars (to
, inst
.instruction
, inst
.size
);
18499 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
18501 if (inst
.relocs
[r
].type
!= BFD_RELOC_UNUSED
)
18502 fix_new_arm (frag_now
, to
- frag_now
->fr_literal
,
18503 inst
.size
, & inst
.relocs
[r
].exp
, inst
.relocs
[r
].pc_rel
,
18504 inst
.relocs
[r
].type
);
18507 dwarf2_emit_insn (inst
.size
);
18511 output_it_inst (int cond
, int mask
, char * to
)
18513 unsigned long instruction
= 0xbf00;
18516 instruction
|= mask
;
18517 instruction
|= cond
<< 4;
18521 to
= frag_more (2);
18523 dwarf2_emit_insn (2);
18527 md_number_to_chars (to
, instruction
, 2);
18532 /* Tag values used in struct asm_opcode's tag field. */
18535 OT_unconditional
, /* Instruction cannot be conditionalized.
18536 The ARM condition field is still 0xE. */
18537 OT_unconditionalF
, /* Instruction cannot be conditionalized
18538 and carries 0xF in its ARM condition field. */
18539 OT_csuffix
, /* Instruction takes a conditional suffix. */
18540 OT_csuffixF
, /* Some forms of the instruction take a conditional
18541 suffix, others place 0xF where the condition field
18543 OT_cinfix3
, /* Instruction takes a conditional infix,
18544 beginning at character index 3. (In
18545 unified mode, it becomes a suffix.) */
18546 OT_cinfix3_deprecated
, /* The same as OT_cinfix3. This is used for
18547 tsts, cmps, cmns, and teqs. */
18548 OT_cinfix3_legacy
, /* Legacy instruction takes a conditional infix at
18549 character index 3, even in unified mode. Used for
18550 legacy instructions where suffix and infix forms
18551 may be ambiguous. */
18552 OT_csuf_or_in3
, /* Instruction takes either a conditional
18553 suffix or an infix at character index 3. */
18554 OT_odd_infix_unc
, /* This is the unconditional variant of an
18555 instruction that takes a conditional infix
18556 at an unusual position. In unified mode,
18557 this variant will accept a suffix. */
18558 OT_odd_infix_0
/* Values greater than or equal to OT_odd_infix_0
18559 are the conditional variants of instructions that
18560 take conditional infixes in unusual positions.
18561 The infix appears at character index
18562 (tag - OT_odd_infix_0). These are not accepted
18563 in unified mode. */
18566 /* Subroutine of md_assemble, responsible for looking up the primary
18567 opcode from the mnemonic the user wrote. STR points to the
18568 beginning of the mnemonic.
18570 This is not simply a hash table lookup, because of conditional
18571 variants. Most instructions have conditional variants, which are
18572 expressed with a _conditional affix_ to the mnemonic. If we were
18573 to encode each conditional variant as a literal string in the opcode
18574 table, it would have approximately 20,000 entries.
18576 Most mnemonics take this affix as a suffix, and in unified syntax,
18577 'most' is upgraded to 'all'. However, in the divided syntax, some
18578 instructions take the affix as an infix, notably the s-variants of
18579 the arithmetic instructions. Of those instructions, all but six
18580 have the infix appear after the third character of the mnemonic.
18582 Accordingly, the algorithm for looking up primary opcodes given
18585 1. Look up the identifier in the opcode table.
18586 If we find a match, go to step U.
18588 2. Look up the last two characters of the identifier in the
18589 conditions table. If we find a match, look up the first N-2
18590 characters of the identifier in the opcode table. If we
18591 find a match, go to step CE.
18593 3. Look up the fourth and fifth characters of the identifier in
18594 the conditions table. If we find a match, extract those
18595 characters from the identifier, and look up the remaining
18596 characters in the opcode table. If we find a match, go
18601 U. Examine the tag field of the opcode structure, in case this is
18602 one of the six instructions with its conditional infix in an
18603 unusual place. If it is, the tag tells us where to find the
18604 infix; look it up in the conditions table and set inst.cond
18605 accordingly. Otherwise, this is an unconditional instruction.
18606 Again set inst.cond accordingly. Return the opcode structure.
18608 CE. Examine the tag field to make sure this is an instruction that
18609 should receive a conditional suffix. If it is not, fail.
18610 Otherwise, set inst.cond from the suffix we already looked up,
18611 and return the opcode structure.
18613 CM. Examine the tag field to make sure this is an instruction that
18614 should receive a conditional infix after the third character.
18615 If it is not, fail. Otherwise, undo the edits to the current
18616 line of input and proceed as for case CE. */
18618 static const struct asm_opcode
*
18619 opcode_lookup (char **str
)
18623 const struct asm_opcode
*opcode
;
18624 const struct asm_cond
*cond
;
18627 /* Scan up to the end of the mnemonic, which must end in white space,
18628 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18629 for (base
= end
= *str
; *end
!= '\0'; end
++)
18630 if (*end
== ' ' || *end
== '.')
18636 /* Handle a possible width suffix and/or Neon type suffix. */
18641 /* The .w and .n suffixes are only valid if the unified syntax is in
18643 if (unified_syntax
&& end
[1] == 'w')
18645 else if (unified_syntax
&& end
[1] == 'n')
18650 inst
.vectype
.elems
= 0;
18652 *str
= end
+ offset
;
18654 if (end
[offset
] == '.')
18656 /* See if we have a Neon type suffix (possible in either unified or
18657 non-unified ARM syntax mode). */
18658 if (parse_neon_type (&inst
.vectype
, str
) == FAIL
)
18661 else if (end
[offset
] != '\0' && end
[offset
] != ' ')
18667 /* Look for unaffixed or special-case affixed mnemonic. */
18668 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18673 if (opcode
->tag
< OT_odd_infix_0
)
18675 inst
.cond
= COND_ALWAYS
;
18679 if (warn_on_deprecated
&& unified_syntax
)
18680 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18681 affix
= base
+ (opcode
->tag
- OT_odd_infix_0
);
18682 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18685 inst
.cond
= cond
->value
;
18689 /* Cannot have a conditional suffix on a mnemonic of less than two
18691 if (end
- base
< 3)
18694 /* Look for suffixed mnemonic. */
18696 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18697 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18699 if (opcode
&& cond
)
18702 switch (opcode
->tag
)
18704 case OT_cinfix3_legacy
:
18705 /* Ignore conditional suffixes matched on infix only mnemonics. */
18709 case OT_cinfix3_deprecated
:
18710 case OT_odd_infix_unc
:
18711 if (!unified_syntax
)
18713 /* Fall through. */
18717 case OT_csuf_or_in3
:
18718 inst
.cond
= cond
->value
;
18721 case OT_unconditional
:
18722 case OT_unconditionalF
:
18724 inst
.cond
= cond
->value
;
18727 /* Delayed diagnostic. */
18728 inst
.error
= BAD_COND
;
18729 inst
.cond
= COND_ALWAYS
;
18738 /* Cannot have a usual-position infix on a mnemonic of less than
18739 six characters (five would be a suffix). */
18740 if (end
- base
< 6)
18743 /* Look for infixed mnemonic in the usual position. */
18745 cond
= (const struct asm_cond
*) hash_find_n (arm_cond_hsh
, affix
, 2);
18749 memcpy (save
, affix
, 2);
18750 memmove (affix
, affix
+ 2, (end
- affix
) - 2);
18751 opcode
= (const struct asm_opcode
*) hash_find_n (arm_ops_hsh
, base
,
18753 memmove (affix
+ 2, affix
, (end
- affix
) - 2);
18754 memcpy (affix
, save
, 2);
18757 && (opcode
->tag
== OT_cinfix3
18758 || opcode
->tag
== OT_cinfix3_deprecated
18759 || opcode
->tag
== OT_csuf_or_in3
18760 || opcode
->tag
== OT_cinfix3_legacy
))
18763 if (warn_on_deprecated
&& unified_syntax
18764 && (opcode
->tag
== OT_cinfix3
18765 || opcode
->tag
== OT_cinfix3_deprecated
))
18766 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18768 inst
.cond
= cond
->value
;
18775 /* This function generates an initial IT instruction, leaving its block
18776 virtually open for the new instructions. Eventually,
18777 the mask will be updated by now_it_add_mask () each time
18778 a new instruction needs to be included in the IT block.
18779 Finally, the block is closed with close_automatic_it_block ().
18780 The block closure can be requested either from md_assemble (),
18781 a tencode (), or due to a label hook. */
18784 new_automatic_it_block (int cond
)
18786 now_it
.state
= AUTOMATIC_IT_BLOCK
;
18787 now_it
.mask
= 0x18;
18789 now_it
.block_length
= 1;
18790 mapping_state (MAP_THUMB
);
18791 now_it
.insn
= output_it_inst (cond
, now_it
.mask
, NULL
);
18792 now_it
.warn_deprecated
= FALSE
;
18793 now_it
.insn_cond
= TRUE
;
18796 /* Close an automatic IT block.
18797 See comments in new_automatic_it_block (). */
18800 close_automatic_it_block (void)
18802 now_it
.mask
= 0x10;
18803 now_it
.block_length
= 0;
18806 /* Update the mask of the current automatically-generated IT
18807 instruction. See comments in new_automatic_it_block (). */
18810 now_it_add_mask (int cond
)
18812 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18813 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18814 | ((bitvalue) << (nbit)))
18815 const int resulting_bit
= (cond
& 1);
18817 now_it
.mask
&= 0xf;
18818 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18820 (5 - now_it
.block_length
));
18821 now_it
.mask
= SET_BIT_VALUE (now_it
.mask
,
18823 ((5 - now_it
.block_length
) - 1) );
18824 output_it_inst (now_it
.cc
, now_it
.mask
, now_it
.insn
);
18827 #undef SET_BIT_VALUE
18830 /* The IT blocks handling machinery is accessed through the these functions:
18831 it_fsm_pre_encode () from md_assemble ()
18832 set_it_insn_type () optional, from the tencode functions
18833 set_it_insn_type_last () ditto
18834 in_it_block () ditto
18835 it_fsm_post_encode () from md_assemble ()
18836 force_automatic_it_block_close () from label handling functions
18839 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18840 initializing the IT insn type with a generic initial value depending
18841 on the inst.condition.
18842 2) During the tencode function, two things may happen:
18843 a) The tencode function overrides the IT insn type by
18844 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18845 b) The tencode function queries the IT block state by
18846 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18848 Both set_it_insn_type and in_it_block run the internal FSM state
18849 handling function (handle_it_state), because: a) setting the IT insn
18850 type may incur in an invalid state (exiting the function),
18851 and b) querying the state requires the FSM to be updated.
18852 Specifically we want to avoid creating an IT block for conditional
18853 branches, so it_fsm_pre_encode is actually a guess and we can't
18854 determine whether an IT block is required until the tencode () routine
18855 has decided what type of instruction this actually it.
18856 Because of this, if set_it_insn_type and in_it_block have to be used,
18857 set_it_insn_type has to be called first.
18859 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18860 determines the insn IT type depending on the inst.cond code.
18861 When a tencode () routine encodes an instruction that can be
18862 either outside an IT block, or, in the case of being inside, has to be
18863 the last one, set_it_insn_type_last () will determine the proper
18864 IT instruction type based on the inst.cond code. Otherwise,
18865 set_it_insn_type can be called for overriding that logic or
18866 for covering other cases.
18868 Calling handle_it_state () may not transition the IT block state to
18869 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18870 still queried. Instead, if the FSM determines that the state should
18871 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18872 after the tencode () function: that's what it_fsm_post_encode () does.
18874 Since in_it_block () calls the state handling function to get an
18875 updated state, an error may occur (due to invalid insns combination).
18876 In that case, inst.error is set.
18877 Therefore, inst.error has to be checked after the execution of
18878 the tencode () routine.
18880 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18881 any pending state change (if any) that didn't take place in
18882 handle_it_state () as explained above. */
18885 it_fsm_pre_encode (void)
18887 if (inst
.cond
!= COND_ALWAYS
)
18888 inst
.it_insn_type
= INSIDE_IT_INSN
;
18890 inst
.it_insn_type
= OUTSIDE_IT_INSN
;
18892 now_it
.state_handled
= 0;
18895 /* IT state FSM handling function. */
18898 handle_it_state (void)
18900 now_it
.state_handled
= 1;
18901 now_it
.insn_cond
= FALSE
;
18903 switch (now_it
.state
)
18905 case OUTSIDE_IT_BLOCK
:
18906 switch (inst
.it_insn_type
)
18908 case OUTSIDE_IT_INSN
:
18911 case INSIDE_IT_INSN
:
18912 case INSIDE_IT_LAST_INSN
:
18913 if (thumb_mode
== 0)
18916 && !(implicit_it_mode
& IMPLICIT_IT_MODE_ARM
))
18917 as_tsktsk (_("Warning: conditional outside an IT block"\
18922 if ((implicit_it_mode
& IMPLICIT_IT_MODE_THUMB
)
18923 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
18925 /* Automatically generate the IT instruction. */
18926 new_automatic_it_block (inst
.cond
);
18927 if (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
)
18928 close_automatic_it_block ();
18932 inst
.error
= BAD_OUT_IT
;
18938 case IF_INSIDE_IT_LAST_INSN
:
18939 case NEUTRAL_IT_INSN
:
18943 now_it
.state
= MANUAL_IT_BLOCK
;
18944 now_it
.block_length
= 0;
18949 case AUTOMATIC_IT_BLOCK
:
18950 /* Three things may happen now:
18951 a) We should increment current it block size;
18952 b) We should close current it block (closing insn or 4 insns);
18953 c) We should close current it block and start a new one (due
18954 to incompatible conditions or
18955 4 insns-length block reached). */
18957 switch (inst
.it_insn_type
)
18959 case OUTSIDE_IT_INSN
:
18960 /* The closure of the block shall happen immediately,
18961 so any in_it_block () call reports the block as closed. */
18962 force_automatic_it_block_close ();
18965 case INSIDE_IT_INSN
:
18966 case INSIDE_IT_LAST_INSN
:
18967 case IF_INSIDE_IT_LAST_INSN
:
18968 now_it
.block_length
++;
18970 if (now_it
.block_length
> 4
18971 || !now_it_compatible (inst
.cond
))
18973 force_automatic_it_block_close ();
18974 if (inst
.it_insn_type
!= IF_INSIDE_IT_LAST_INSN
)
18975 new_automatic_it_block (inst
.cond
);
18979 now_it
.insn_cond
= TRUE
;
18980 now_it_add_mask (inst
.cond
);
18983 if (now_it
.state
== AUTOMATIC_IT_BLOCK
18984 && (inst
.it_insn_type
== INSIDE_IT_LAST_INSN
18985 || inst
.it_insn_type
== IF_INSIDE_IT_LAST_INSN
))
18986 close_automatic_it_block ();
18989 case NEUTRAL_IT_INSN
:
18990 now_it
.block_length
++;
18991 now_it
.insn_cond
= TRUE
;
18993 if (now_it
.block_length
> 4)
18994 force_automatic_it_block_close ();
18996 now_it_add_mask (now_it
.cc
& 1);
19000 close_automatic_it_block ();
19001 now_it
.state
= MANUAL_IT_BLOCK
;
19006 case MANUAL_IT_BLOCK
:
19008 /* Check conditional suffixes. */
19009 const int cond
= now_it
.cc
^ ((now_it
.mask
>> 4) & 1) ^ 1;
19012 now_it
.mask
&= 0x1f;
19013 is_last
= (now_it
.mask
== 0x10);
19014 now_it
.insn_cond
= TRUE
;
19016 switch (inst
.it_insn_type
)
19018 case OUTSIDE_IT_INSN
:
19019 inst
.error
= BAD_NOT_IT
;
19022 case INSIDE_IT_INSN
:
19023 if (cond
!= inst
.cond
)
19025 inst
.error
= BAD_IT_COND
;
19030 case INSIDE_IT_LAST_INSN
:
19031 case IF_INSIDE_IT_LAST_INSN
:
19032 if (cond
!= inst
.cond
)
19034 inst
.error
= BAD_IT_COND
;
19039 inst
.error
= BAD_BRANCH
;
19044 case NEUTRAL_IT_INSN
:
19045 /* The BKPT instruction is unconditional even in an IT block. */
19049 inst
.error
= BAD_IT_IT
;
19059 struct depr_insn_mask
19061 unsigned long pattern
;
19062 unsigned long mask
;
19063 const char* description
;
19066 /* List of 16-bit instruction patterns deprecated in an IT block in
19068 static const struct depr_insn_mask depr_it_insns
[] = {
19069 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
19070 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
19071 { 0xa000, 0xb800, N_("ADR") },
19072 { 0x4800, 0xf800, N_("Literal loads") },
19073 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
19074 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
19075 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
19076 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
19077 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
19082 it_fsm_post_encode (void)
19086 if (!now_it
.state_handled
)
19087 handle_it_state ();
19089 if (now_it
.insn_cond
19090 && !now_it
.warn_deprecated
19091 && warn_on_deprecated
19092 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v8
)
19093 && !ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_m
))
19095 if (inst
.instruction
>= 0x10000)
19097 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
19098 "performance deprecated in ARMv8-A and ARMv8-R"));
19099 now_it
.warn_deprecated
= TRUE
;
19103 const struct depr_insn_mask
*p
= depr_it_insns
;
19105 while (p
->mask
!= 0)
19107 if ((inst
.instruction
& p
->mask
) == p
->pattern
)
19109 as_tsktsk (_("IT blocks containing 16-bit Thumb "
19110 "instructions of the following class are "
19111 "performance deprecated in ARMv8-A and "
19112 "ARMv8-R: %s"), p
->description
);
19113 now_it
.warn_deprecated
= TRUE
;
19121 if (now_it
.block_length
> 1)
19123 as_tsktsk (_("IT blocks containing more than one conditional "
19124 "instruction are performance deprecated in ARMv8-A and "
19126 now_it
.warn_deprecated
= TRUE
;
19130 is_last
= (now_it
.mask
== 0x10);
19133 now_it
.state
= OUTSIDE_IT_BLOCK
;
19139 force_automatic_it_block_close (void)
19141 if (now_it
.state
== AUTOMATIC_IT_BLOCK
)
19143 close_automatic_it_block ();
19144 now_it
.state
= OUTSIDE_IT_BLOCK
;
19152 if (!now_it
.state_handled
)
19153 handle_it_state ();
19155 return now_it
.state
!= OUTSIDE_IT_BLOCK
;
19158 /* Whether OPCODE only has T32 encoding. Since this function is only used by
19159 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
19160 here, hence the "known" in the function name. */
19163 known_t32_only_insn (const struct asm_opcode
*opcode
)
19165 /* Original Thumb-1 wide instruction. */
19166 if (opcode
->tencode
== do_t_blx
19167 || opcode
->tencode
== do_t_branch23
19168 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_msr
)
19169 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_barrier
))
19172 /* Wide-only instruction added to ARMv8-M Baseline. */
19173 if (ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v8m_m_only
)
19174 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_atomics
)
19175 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_v6t2_v8m
)
19176 || ARM_CPU_HAS_FEATURE (*opcode
->tvariant
, arm_ext_div
))
19182 /* Whether wide instruction variant can be used if available for a valid OPCODE
19186 t32_insn_ok (arm_feature_set arch
, const struct asm_opcode
*opcode
)
19188 if (known_t32_only_insn (opcode
))
19191 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
19192 of variant T3 of B.W is checked in do_t_branch. */
19193 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19194 && opcode
->tencode
== do_t_branch
)
19197 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
19198 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v8m
)
19199 && opcode
->tencode
== do_t_mov_cmp
19200 /* Make sure CMP instruction is not affected. */
19201 && opcode
->aencode
== do_mov
)
19204 /* Wide instruction variants of all instructions with narrow *and* wide
19205 variants become available with ARMv6t2. Other opcodes are either
19206 narrow-only or wide-only and are thus available if OPCODE is valid. */
19207 if (ARM_CPU_HAS_FEATURE (arch
, arm_ext_v6t2
))
19210 /* OPCODE with narrow only instruction variant or wide variant not
19216 md_assemble (char *str
)
19219 const struct asm_opcode
* opcode
;
19221 /* Align the previous label if needed. */
19222 if (last_label_seen
!= NULL
)
19224 symbol_set_frag (last_label_seen
, frag_now
);
19225 S_SET_VALUE (last_label_seen
, (valueT
) frag_now_fix ());
19226 S_SET_SEGMENT (last_label_seen
, now_seg
);
19229 memset (&inst
, '\0', sizeof (inst
));
19231 for (r
= 0; r
< ARM_IT_MAX_RELOCS
; r
++)
19232 inst
.relocs
[r
].type
= BFD_RELOC_UNUSED
;
19234 opcode
= opcode_lookup (&p
);
19237 /* It wasn't an instruction, but it might be a register alias of
19238 the form alias .req reg, or a Neon .dn/.qn directive. */
19239 if (! create_register_alias (str
, p
)
19240 && ! create_neon_reg_alias (str
, p
))
19241 as_bad (_("bad instruction `%s'"), str
);
19246 if (warn_on_deprecated
&& opcode
->tag
== OT_cinfix3_deprecated
)
19247 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
19249 /* The value which unconditional instructions should have in place of the
19250 condition field. */
19251 inst
.uncond_value
= (opcode
->tag
== OT_csuffixF
) ? 0xf : -1;
19255 arm_feature_set variant
;
19257 variant
= cpu_variant
;
19258 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
19259 if (!ARM_CPU_HAS_FEATURE (variant
, arm_arch_t2
))
19260 ARM_CLEAR_FEATURE (variant
, variant
, fpu_any_hard
);
19261 /* Check that this instruction is supported for this CPU. */
19262 if (!opcode
->tvariant
19263 || (thumb_mode
== 1
19264 && !ARM_CPU_HAS_FEATURE (variant
, *opcode
->tvariant
)))
19266 if (opcode
->tencode
== do_t_swi
)
19267 as_bad (_("SVC is not permitted on this architecture"));
19269 as_bad (_("selected processor does not support `%s' in Thumb mode"), str
);
19272 if (inst
.cond
!= COND_ALWAYS
&& !unified_syntax
19273 && opcode
->tencode
!= do_t_branch
)
19275 as_bad (_("Thumb does not support conditional execution"));
19279 /* Two things are addressed here:
19280 1) Implicit require narrow instructions on Thumb-1.
19281 This avoids relaxation accidentally introducing Thumb-2
19283 2) Reject wide instructions in non Thumb-2 cores.
19285 Only instructions with narrow and wide variants need to be handled
19286 but selecting all non wide-only instructions is easier. */
19287 if (!ARM_CPU_HAS_FEATURE (variant
, arm_ext_v6t2
)
19288 && !t32_insn_ok (variant
, opcode
))
19290 if (inst
.size_req
== 0)
19292 else if (inst
.size_req
== 4)
19294 if (ARM_CPU_HAS_FEATURE (variant
, arm_ext_v8m
))
19295 as_bad (_("selected processor does not support 32bit wide "
19296 "variant of instruction `%s'"), str
);
19298 as_bad (_("selected processor does not support `%s' in "
19299 "Thumb-2 mode"), str
);
19304 inst
.instruction
= opcode
->tvalue
;
19306 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/TRUE
))
19308 /* Prepare the it_insn_type for those encodings that don't set
19310 it_fsm_pre_encode ();
19312 opcode
->tencode ();
19314 it_fsm_post_encode ();
19317 if (!(inst
.error
|| inst
.relax
))
19319 gas_assert (inst
.instruction
< 0xe800 || inst
.instruction
> 0xffff);
19320 inst
.size
= (inst
.instruction
> 0xffff ? 4 : 2);
19321 if (inst
.size_req
&& inst
.size_req
!= inst
.size
)
19323 as_bad (_("cannot honor width suffix -- `%s'"), str
);
19328 /* Something has gone badly wrong if we try to relax a fixed size
19330 gas_assert (inst
.size_req
== 0 || !inst
.relax
);
19332 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19333 *opcode
->tvariant
);
19334 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
19335 set those bits when Thumb-2 32-bit instructions are seen. The impact
19336 of relaxable instructions will be considered later after we finish all
19338 if (ARM_FEATURE_CORE_EQUAL (cpu_variant
, arm_arch_any
))
19339 variant
= arm_arch_none
;
19341 variant
= cpu_variant
;
19342 if (inst
.size
== 4 && !t32_insn_ok (variant
, opcode
))
19343 ARM_MERGE_FEATURE_SETS (thumb_arch_used
, thumb_arch_used
,
19346 check_neon_suffixes
;
19350 mapping_state (MAP_THUMB
);
19353 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
19357 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
19358 is_bx
= (opcode
->aencode
== do_bx
);
19360 /* Check that this instruction is supported for this CPU. */
19361 if (!(is_bx
&& fix_v4bx
)
19362 && !(opcode
->avariant
&&
19363 ARM_CPU_HAS_FEATURE (cpu_variant
, *opcode
->avariant
)))
19365 as_bad (_("selected processor does not support `%s' in ARM mode"), str
);
19370 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str
);
19374 inst
.instruction
= opcode
->avalue
;
19375 if (opcode
->tag
== OT_unconditionalF
)
19376 inst
.instruction
|= 0xFU
<< 28;
19378 inst
.instruction
|= inst
.cond
<< 28;
19379 inst
.size
= INSN_SIZE
;
19380 if (!parse_operands (p
, opcode
->operands
, /*thumb=*/FALSE
))
19382 it_fsm_pre_encode ();
19383 opcode
->aencode ();
19384 it_fsm_post_encode ();
19386 /* Arm mode bx is marked as both v4T and v5 because it's still required
19387 on a hypothetical non-thumb v5 core. */
19389 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
, arm_ext_v4t
);
19391 ARM_MERGE_FEATURE_SETS (arm_arch_used
, arm_arch_used
,
19392 *opcode
->avariant
);
19394 check_neon_suffixes
;
19398 mapping_state (MAP_ARM
);
19403 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
19411 check_it_blocks_finished (void)
19416 for (sect
= stdoutput
->sections
; sect
!= NULL
; sect
= sect
->next
)
19417 if (seg_info (sect
)->tc_segment_info_data
.current_it
.state
19418 == MANUAL_IT_BLOCK
)
19420 as_warn (_("section '%s' finished with an open IT block."),
19424 if (now_it
.state
== MANUAL_IT_BLOCK
)
19425 as_warn (_("file finished with an open IT block."));
19429 /* Various frobbings of labels and their addresses. */
19432 arm_start_line_hook (void)
19434 last_label_seen
= NULL
;
19438 arm_frob_label (symbolS
* sym
)
19440 last_label_seen
= sym
;
19442 ARM_SET_THUMB (sym
, thumb_mode
);
19444 #if defined OBJ_COFF || defined OBJ_ELF
19445 ARM_SET_INTERWORK (sym
, support_interwork
);
19448 force_automatic_it_block_close ();
19450 /* Note - do not allow local symbols (.Lxxx) to be labelled
19451 as Thumb functions. This is because these labels, whilst
19452 they exist inside Thumb code, are not the entry points for
19453 possible ARM->Thumb calls. Also, these labels can be used
19454 as part of a computed goto or switch statement. eg gcc
19455 can generate code that looks like this:
19457 ldr r2, [pc, .Laaa]
19467 The first instruction loads the address of the jump table.
19468 The second instruction converts a table index into a byte offset.
19469 The third instruction gets the jump address out of the table.
19470 The fourth instruction performs the jump.
19472 If the address stored at .Laaa is that of a symbol which has the
19473 Thumb_Func bit set, then the linker will arrange for this address
19474 to have the bottom bit set, which in turn would mean that the
19475 address computation performed by the third instruction would end
19476 up with the bottom bit set. Since the ARM is capable of unaligned
19477 word loads, the instruction would then load the incorrect address
19478 out of the jump table, and chaos would ensue. */
19479 if (label_is_thumb_function_name
19480 && (S_GET_NAME (sym
)[0] != '.' || S_GET_NAME (sym
)[1] != 'L')
19481 && (bfd_get_section_flags (stdoutput
, now_seg
) & SEC_CODE
) != 0)
19483 /* When the address of a Thumb function is taken the bottom
19484 bit of that address should be set. This will allow
19485 interworking between Arm and Thumb functions to work
19488 THUMB_SET_FUNC (sym
, 1);
19490 label_is_thumb_function_name
= FALSE
;
19493 dwarf2_emit_label (sym
);
19497 arm_data_in_code (void)
19499 if (thumb_mode
&& ! strncmp (input_line_pointer
+ 1, "data:", 5))
19501 *input_line_pointer
= '/';
19502 input_line_pointer
+= 5;
19503 *input_line_pointer
= 0;
19511 arm_canonicalize_symbol_name (char * name
)
19515 if (thumb_mode
&& (len
= strlen (name
)) > 5
19516 && streq (name
+ len
- 5, "/data"))
19517 *(name
+ len
- 5) = 0;
19522 /* Table of all register names defined by default. The user can
19523 define additional names with .req. Note that all register names
19524 should appear in both upper and lowercase variants. Some registers
19525 also have mixed-case names. */
19527 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19528 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19529 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19530 #define REGSET(p,t) \
19531 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19532 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19533 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19534 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19535 #define REGSETH(p,t) \
19536 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19537 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19538 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19539 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19540 #define REGSET2(p,t) \
19541 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19542 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19543 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19544 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19545 #define SPLRBANK(base,bank,t) \
19546 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19547 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19548 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19549 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19550 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19551 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19553 static const struct reg_entry reg_names
[] =
19555 /* ARM integer registers. */
19556 REGSET(r
, RN
), REGSET(R
, RN
),
19558 /* ATPCS synonyms. */
19559 REGDEF(a1
,0,RN
), REGDEF(a2
,1,RN
), REGDEF(a3
, 2,RN
), REGDEF(a4
, 3,RN
),
19560 REGDEF(v1
,4,RN
), REGDEF(v2
,5,RN
), REGDEF(v3
, 6,RN
), REGDEF(v4
, 7,RN
),
19561 REGDEF(v5
,8,RN
), REGDEF(v6
,9,RN
), REGDEF(v7
,10,RN
), REGDEF(v8
,11,RN
),
19563 REGDEF(A1
,0,RN
), REGDEF(A2
,1,RN
), REGDEF(A3
, 2,RN
), REGDEF(A4
, 3,RN
),
19564 REGDEF(V1
,4,RN
), REGDEF(V2
,5,RN
), REGDEF(V3
, 6,RN
), REGDEF(V4
, 7,RN
),
19565 REGDEF(V5
,8,RN
), REGDEF(V6
,9,RN
), REGDEF(V7
,10,RN
), REGDEF(V8
,11,RN
),
19567 /* Well-known aliases. */
19568 REGDEF(wr
, 7,RN
), REGDEF(sb
, 9,RN
), REGDEF(sl
,10,RN
), REGDEF(fp
,11,RN
),
19569 REGDEF(ip
,12,RN
), REGDEF(sp
,13,RN
), REGDEF(lr
,14,RN
), REGDEF(pc
,15,RN
),
19571 REGDEF(WR
, 7,RN
), REGDEF(SB
, 9,RN
), REGDEF(SL
,10,RN
), REGDEF(FP
,11,RN
),
19572 REGDEF(IP
,12,RN
), REGDEF(SP
,13,RN
), REGDEF(LR
,14,RN
), REGDEF(PC
,15,RN
),
19574 /* Coprocessor numbers. */
19575 REGSET(p
, CP
), REGSET(P
, CP
),
19577 /* Coprocessor register numbers. The "cr" variants are for backward
19579 REGSET(c
, CN
), REGSET(C
, CN
),
19580 REGSET(cr
, CN
), REGSET(CR
, CN
),
19582 /* ARM banked registers. */
19583 REGDEF(R8_usr
,512|(0<<16),RNB
), REGDEF(r8_usr
,512|(0<<16),RNB
),
19584 REGDEF(R9_usr
,512|(1<<16),RNB
), REGDEF(r9_usr
,512|(1<<16),RNB
),
19585 REGDEF(R10_usr
,512|(2<<16),RNB
), REGDEF(r10_usr
,512|(2<<16),RNB
),
19586 REGDEF(R11_usr
,512|(3<<16),RNB
), REGDEF(r11_usr
,512|(3<<16),RNB
),
19587 REGDEF(R12_usr
,512|(4<<16),RNB
), REGDEF(r12_usr
,512|(4<<16),RNB
),
19588 REGDEF(SP_usr
,512|(5<<16),RNB
), REGDEF(sp_usr
,512|(5<<16),RNB
),
19589 REGDEF(LR_usr
,512|(6<<16),RNB
), REGDEF(lr_usr
,512|(6<<16),RNB
),
19591 REGDEF(R8_fiq
,512|(8<<16),RNB
), REGDEF(r8_fiq
,512|(8<<16),RNB
),
19592 REGDEF(R9_fiq
,512|(9<<16),RNB
), REGDEF(r9_fiq
,512|(9<<16),RNB
),
19593 REGDEF(R10_fiq
,512|(10<<16),RNB
), REGDEF(r10_fiq
,512|(10<<16),RNB
),
19594 REGDEF(R11_fiq
,512|(11<<16),RNB
), REGDEF(r11_fiq
,512|(11<<16),RNB
),
19595 REGDEF(R12_fiq
,512|(12<<16),RNB
), REGDEF(r12_fiq
,512|(12<<16),RNB
),
19596 REGDEF(SP_fiq
,512|(13<<16),RNB
), REGDEF(sp_fiq
,512|(13<<16),RNB
),
19597 REGDEF(LR_fiq
,512|(14<<16),RNB
), REGDEF(lr_fiq
,512|(14<<16),RNB
),
19598 REGDEF(SPSR_fiq
,512|(14<<16)|SPSR_BIT
,RNB
), REGDEF(spsr_fiq
,512|(14<<16)|SPSR_BIT
,RNB
),
19600 SPLRBANK(0,IRQ
,RNB
), SPLRBANK(0,irq
,RNB
),
19601 SPLRBANK(2,SVC
,RNB
), SPLRBANK(2,svc
,RNB
),
19602 SPLRBANK(4,ABT
,RNB
), SPLRBANK(4,abt
,RNB
),
19603 SPLRBANK(6,UND
,RNB
), SPLRBANK(6,und
,RNB
),
19604 SPLRBANK(12,MON
,RNB
), SPLRBANK(12,mon
,RNB
),
19605 REGDEF(elr_hyp
,768|(14<<16),RNB
), REGDEF(ELR_hyp
,768|(14<<16),RNB
),
19606 REGDEF(sp_hyp
,768|(15<<16),RNB
), REGDEF(SP_hyp
,768|(15<<16),RNB
),
19607 REGDEF(spsr_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
19608 REGDEF(SPSR_hyp
,768|(14<<16)|SPSR_BIT
,RNB
),
19610 /* FPA registers. */
19611 REGNUM(f
,0,FN
), REGNUM(f
,1,FN
), REGNUM(f
,2,FN
), REGNUM(f
,3,FN
),
19612 REGNUM(f
,4,FN
), REGNUM(f
,5,FN
), REGNUM(f
,6,FN
), REGNUM(f
,7, FN
),
19614 REGNUM(F
,0,FN
), REGNUM(F
,1,FN
), REGNUM(F
,2,FN
), REGNUM(F
,3,FN
),
19615 REGNUM(F
,4,FN
), REGNUM(F
,5,FN
), REGNUM(F
,6,FN
), REGNUM(F
,7, FN
),
19617 /* VFP SP registers. */
19618 REGSET(s
,VFS
), REGSET(S
,VFS
),
19619 REGSETH(s
,VFS
), REGSETH(S
,VFS
),
19621 /* VFP DP Registers. */
19622 REGSET(d
,VFD
), REGSET(D
,VFD
),
19623 /* Extra Neon DP registers. */
19624 REGSETH(d
,VFD
), REGSETH(D
,VFD
),
19626 /* Neon QP registers. */
19627 REGSET2(q
,NQ
), REGSET2(Q
,NQ
),
19629 /* VFP control registers. */
19630 REGDEF(fpsid
,0,VFC
), REGDEF(fpscr
,1,VFC
), REGDEF(fpexc
,8,VFC
),
19631 REGDEF(FPSID
,0,VFC
), REGDEF(FPSCR
,1,VFC
), REGDEF(FPEXC
,8,VFC
),
19632 REGDEF(fpinst
,9,VFC
), REGDEF(fpinst2
,10,VFC
),
19633 REGDEF(FPINST
,9,VFC
), REGDEF(FPINST2
,10,VFC
),
19634 REGDEF(mvfr0
,7,VFC
), REGDEF(mvfr1
,6,VFC
),
19635 REGDEF(MVFR0
,7,VFC
), REGDEF(MVFR1
,6,VFC
),
19636 REGDEF(mvfr2
,5,VFC
), REGDEF(MVFR2
,5,VFC
),
19638 /* Maverick DSP coprocessor registers. */
19639 REGSET(mvf
,MVF
), REGSET(mvd
,MVD
), REGSET(mvfx
,MVFX
), REGSET(mvdx
,MVDX
),
19640 REGSET(MVF
,MVF
), REGSET(MVD
,MVD
), REGSET(MVFX
,MVFX
), REGSET(MVDX
,MVDX
),
19642 REGNUM(mvax
,0,MVAX
), REGNUM(mvax
,1,MVAX
),
19643 REGNUM(mvax
,2,MVAX
), REGNUM(mvax
,3,MVAX
),
19644 REGDEF(dspsc
,0,DSPSC
),
19646 REGNUM(MVAX
,0,MVAX
), REGNUM(MVAX
,1,MVAX
),
19647 REGNUM(MVAX
,2,MVAX
), REGNUM(MVAX
,3,MVAX
),
19648 REGDEF(DSPSC
,0,DSPSC
),
19650 /* iWMMXt data registers - p0, c0-15. */
19651 REGSET(wr
,MMXWR
), REGSET(wR
,MMXWR
), REGSET(WR
, MMXWR
),
19653 /* iWMMXt control registers - p1, c0-3. */
19654 REGDEF(wcid
, 0,MMXWC
), REGDEF(wCID
, 0,MMXWC
), REGDEF(WCID
, 0,MMXWC
),
19655 REGDEF(wcon
, 1,MMXWC
), REGDEF(wCon
, 1,MMXWC
), REGDEF(WCON
, 1,MMXWC
),
19656 REGDEF(wcssf
, 2,MMXWC
), REGDEF(wCSSF
, 2,MMXWC
), REGDEF(WCSSF
, 2,MMXWC
),
19657 REGDEF(wcasf
, 3,MMXWC
), REGDEF(wCASF
, 3,MMXWC
), REGDEF(WCASF
, 3,MMXWC
),
19659 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19660 REGDEF(wcgr0
, 8,MMXWCG
), REGDEF(wCGR0
, 8,MMXWCG
), REGDEF(WCGR0
, 8,MMXWCG
),
19661 REGDEF(wcgr1
, 9,MMXWCG
), REGDEF(wCGR1
, 9,MMXWCG
), REGDEF(WCGR1
, 9,MMXWCG
),
19662 REGDEF(wcgr2
,10,MMXWCG
), REGDEF(wCGR2
,10,MMXWCG
), REGDEF(WCGR2
,10,MMXWCG
),
19663 REGDEF(wcgr3
,11,MMXWCG
), REGDEF(wCGR3
,11,MMXWCG
), REGDEF(WCGR3
,11,MMXWCG
),
19665 /* XScale accumulator registers. */
19666 REGNUM(acc
,0,XSCALE
), REGNUM(ACC
,0,XSCALE
),
19672 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19673 within psr_required_here. */
19674 static const struct asm_psr psrs
[] =
19676 /* Backward compatibility notation. Note that "all" is no longer
19677 truly all possible PSR bits. */
19678 {"all", PSR_c
| PSR_f
},
19682 /* Individual flags. */
19688 /* Combinations of flags. */
19689 {"fs", PSR_f
| PSR_s
},
19690 {"fx", PSR_f
| PSR_x
},
19691 {"fc", PSR_f
| PSR_c
},
19692 {"sf", PSR_s
| PSR_f
},
19693 {"sx", PSR_s
| PSR_x
},
19694 {"sc", PSR_s
| PSR_c
},
19695 {"xf", PSR_x
| PSR_f
},
19696 {"xs", PSR_x
| PSR_s
},
19697 {"xc", PSR_x
| PSR_c
},
19698 {"cf", PSR_c
| PSR_f
},
19699 {"cs", PSR_c
| PSR_s
},
19700 {"cx", PSR_c
| PSR_x
},
19701 {"fsx", PSR_f
| PSR_s
| PSR_x
},
19702 {"fsc", PSR_f
| PSR_s
| PSR_c
},
19703 {"fxs", PSR_f
| PSR_x
| PSR_s
},
19704 {"fxc", PSR_f
| PSR_x
| PSR_c
},
19705 {"fcs", PSR_f
| PSR_c
| PSR_s
},
19706 {"fcx", PSR_f
| PSR_c
| PSR_x
},
19707 {"sfx", PSR_s
| PSR_f
| PSR_x
},
19708 {"sfc", PSR_s
| PSR_f
| PSR_c
},
19709 {"sxf", PSR_s
| PSR_x
| PSR_f
},
19710 {"sxc", PSR_s
| PSR_x
| PSR_c
},
19711 {"scf", PSR_s
| PSR_c
| PSR_f
},
19712 {"scx", PSR_s
| PSR_c
| PSR_x
},
19713 {"xfs", PSR_x
| PSR_f
| PSR_s
},
19714 {"xfc", PSR_x
| PSR_f
| PSR_c
},
19715 {"xsf", PSR_x
| PSR_s
| PSR_f
},
19716 {"xsc", PSR_x
| PSR_s
| PSR_c
},
19717 {"xcf", PSR_x
| PSR_c
| PSR_f
},
19718 {"xcs", PSR_x
| PSR_c
| PSR_s
},
19719 {"cfs", PSR_c
| PSR_f
| PSR_s
},
19720 {"cfx", PSR_c
| PSR_f
| PSR_x
},
19721 {"csf", PSR_c
| PSR_s
| PSR_f
},
19722 {"csx", PSR_c
| PSR_s
| PSR_x
},
19723 {"cxf", PSR_c
| PSR_x
| PSR_f
},
19724 {"cxs", PSR_c
| PSR_x
| PSR_s
},
19725 {"fsxc", PSR_f
| PSR_s
| PSR_x
| PSR_c
},
19726 {"fscx", PSR_f
| PSR_s
| PSR_c
| PSR_x
},
19727 {"fxsc", PSR_f
| PSR_x
| PSR_s
| PSR_c
},
19728 {"fxcs", PSR_f
| PSR_x
| PSR_c
| PSR_s
},
19729 {"fcsx", PSR_f
| PSR_c
| PSR_s
| PSR_x
},
19730 {"fcxs", PSR_f
| PSR_c
| PSR_x
| PSR_s
},
19731 {"sfxc", PSR_s
| PSR_f
| PSR_x
| PSR_c
},
19732 {"sfcx", PSR_s
| PSR_f
| PSR_c
| PSR_x
},
19733 {"sxfc", PSR_s
| PSR_x
| PSR_f
| PSR_c
},
19734 {"sxcf", PSR_s
| PSR_x
| PSR_c
| PSR_f
},
19735 {"scfx", PSR_s
| PSR_c
| PSR_f
| PSR_x
},
19736 {"scxf", PSR_s
| PSR_c
| PSR_x
| PSR_f
},
19737 {"xfsc", PSR_x
| PSR_f
| PSR_s
| PSR_c
},
19738 {"xfcs", PSR_x
| PSR_f
| PSR_c
| PSR_s
},
19739 {"xsfc", PSR_x
| PSR_s
| PSR_f
| PSR_c
},
19740 {"xscf", PSR_x
| PSR_s
| PSR_c
| PSR_f
},
19741 {"xcfs", PSR_x
| PSR_c
| PSR_f
| PSR_s
},
19742 {"xcsf", PSR_x
| PSR_c
| PSR_s
| PSR_f
},
19743 {"cfsx", PSR_c
| PSR_f
| PSR_s
| PSR_x
},
19744 {"cfxs", PSR_c
| PSR_f
| PSR_x
| PSR_s
},
19745 {"csfx", PSR_c
| PSR_s
| PSR_f
| PSR_x
},
19746 {"csxf", PSR_c
| PSR_s
| PSR_x
| PSR_f
},
19747 {"cxfs", PSR_c
| PSR_x
| PSR_f
| PSR_s
},
19748 {"cxsf", PSR_c
| PSR_x
| PSR_s
| PSR_f
},
19751 /* Table of V7M psr names. */
19752 static const struct asm_psr v7m_psrs
[] =
19754 {"apsr", 0x0 }, {"APSR", 0x0 },
19755 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19756 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19757 {"psr", 0x3 }, {"PSR", 0x3 },
19758 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19759 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19760 {"epsr", 0x6 }, {"EPSR", 0x6 },
19761 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19762 {"msp", 0x8 }, {"MSP", 0x8 },
19763 {"psp", 0x9 }, {"PSP", 0x9 },
19764 {"msplim", 0xa }, {"MSPLIM", 0xa },
19765 {"psplim", 0xb }, {"PSPLIM", 0xb },
19766 {"primask", 0x10}, {"PRIMASK", 0x10},
19767 {"basepri", 0x11}, {"BASEPRI", 0x11},
19768 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19769 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19770 {"control", 0x14}, {"CONTROL", 0x14},
19771 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19772 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19773 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19774 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19775 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19776 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19777 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19778 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19779 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19782 /* Table of all shift-in-operand names. */
19783 static const struct asm_shift_name shift_names
[] =
19785 { "asl", SHIFT_LSL
}, { "ASL", SHIFT_LSL
},
19786 { "lsl", SHIFT_LSL
}, { "LSL", SHIFT_LSL
},
19787 { "lsr", SHIFT_LSR
}, { "LSR", SHIFT_LSR
},
19788 { "asr", SHIFT_ASR
}, { "ASR", SHIFT_ASR
},
19789 { "ror", SHIFT_ROR
}, { "ROR", SHIFT_ROR
},
19790 { "rrx", SHIFT_RRX
}, { "RRX", SHIFT_RRX
}
19793 /* Table of all explicit relocation names. */
19795 static struct reloc_entry reloc_names
[] =
19797 { "got", BFD_RELOC_ARM_GOT32
}, { "GOT", BFD_RELOC_ARM_GOT32
},
19798 { "gotoff", BFD_RELOC_ARM_GOTOFF
}, { "GOTOFF", BFD_RELOC_ARM_GOTOFF
},
19799 { "plt", BFD_RELOC_ARM_PLT32
}, { "PLT", BFD_RELOC_ARM_PLT32
},
19800 { "target1", BFD_RELOC_ARM_TARGET1
}, { "TARGET1", BFD_RELOC_ARM_TARGET1
},
19801 { "target2", BFD_RELOC_ARM_TARGET2
}, { "TARGET2", BFD_RELOC_ARM_TARGET2
},
19802 { "sbrel", BFD_RELOC_ARM_SBREL32
}, { "SBREL", BFD_RELOC_ARM_SBREL32
},
19803 { "tlsgd", BFD_RELOC_ARM_TLS_GD32
}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32
},
19804 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32
}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32
},
19805 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32
}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32
},
19806 { "gottpoff",BFD_RELOC_ARM_TLS_IE32
}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32
},
19807 { "tpoff", BFD_RELOC_ARM_TLS_LE32
}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32
},
19808 { "got_prel", BFD_RELOC_ARM_GOT_PREL
}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL
},
19809 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC
},
19810 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC
},
19811 { "tlscall", BFD_RELOC_ARM_TLS_CALL
},
19812 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL
},
19813 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ
},
19814 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ
},
19815 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC
},
19816 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC
},
19817 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
19818 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC
},
19819 { "funcdesc", BFD_RELOC_ARM_FUNCDESC
},
19820 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC
},
19821 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC
}, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC
},
19822 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC
}, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC
},
19823 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC
}, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC
},
19827 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19828 static const struct asm_cond conds
[] =
19832 {"cs", 0x2}, {"hs", 0x2},
19833 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19847 #define UL_BARRIER(L,U,CODE,FEAT) \
19848 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19849 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19851 static struct asm_barrier_opt barrier_opt_names
[] =
19853 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER
),
19854 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER
),
19855 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8
),
19856 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER
),
19857 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER
),
19858 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER
),
19859 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER
),
19860 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8
),
19861 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER
),
19862 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER
),
19863 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER
),
19864 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER
),
19865 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8
),
19866 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER
),
19867 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER
),
19868 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8
)
19873 /* Table of ARM-format instructions. */
19875 /* Macros for gluing together operand strings. N.B. In all cases
19876 other than OPS0, the trailing OP_stop comes from default
19877 zero-initialization of the unspecified elements of the array. */
19878 #define OPS0() { OP_stop, }
19879 #define OPS1(a) { OP_##a, }
19880 #define OPS2(a,b) { OP_##a,OP_##b, }
19881 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19882 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19883 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19884 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19886 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19887 This is useful when mixing operands for ARM and THUMB, i.e. using the
19888 MIX_ARM_THUMB_OPERANDS macro.
19889 In order to use these macros, prefix the number of operands with _
19891 #define OPS_1(a) { a, }
19892 #define OPS_2(a,b) { a,b, }
19893 #define OPS_3(a,b,c) { a,b,c, }
19894 #define OPS_4(a,b,c,d) { a,b,c,d, }
19895 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19896 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19898 /* These macros abstract out the exact format of the mnemonic table and
19899 save some repeated characters. */
19901 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19902 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19903 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19904 THUMB_VARIANT, do_##ae, do_##te }
19906 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19907 a T_MNEM_xyz enumerator. */
19908 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19909 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19910 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19911 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19913 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19914 infix after the third character. */
19915 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19916 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19917 THUMB_VARIANT, do_##ae, do_##te }
19918 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19919 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19920 THUMB_VARIANT, do_##ae, do_##te }
19921 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19922 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19923 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19924 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19925 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19926 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19927 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19928 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19930 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19931 field is still 0xE. Many of the Thumb variants can be executed
19932 conditionally, so this is checked separately. */
19933 #define TUE(mnem, op, top, nops, ops, ae, te) \
19934 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19935 THUMB_VARIANT, do_##ae, do_##te }
19937 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19938 Used by mnemonics that have very minimal differences in the encoding for
19939 ARM and Thumb variants and can be handled in a common function. */
19940 #define TUEc(mnem, op, top, nops, ops, en) \
19941 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19942 THUMB_VARIANT, do_##en, do_##en }
19944 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19945 condition code field. */
19946 #define TUF(mnem, op, top, nops, ops, ae, te) \
19947 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19948 THUMB_VARIANT, do_##ae, do_##te }
19950 /* ARM-only variants of all the above. */
19951 #define CE(mnem, op, nops, ops, ae) \
19952 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19954 #define C3(mnem, op, nops, ops, ae) \
19955 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19957 /* Thumb-only variants of TCE and TUE. */
19958 #define ToC(mnem, top, nops, ops, te) \
19959 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19962 #define ToU(mnem, top, nops, ops, te) \
19963 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19966 /* T_MNEM_xyz enumerator variants of ToC. */
19967 #define toC(mnem, top, nops, ops, te) \
19968 { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
19971 /* T_MNEM_xyz enumerator variants of ToU. */
19972 #define toU(mnem, top, nops, ops, te) \
19973 { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
19976 /* Legacy mnemonics that always have conditional infix after the third
19978 #define CL(mnem, op, nops, ops, ae) \
19979 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19980 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19982 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19983 #define cCE(mnem, op, nops, ops, ae) \
19984 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19986 /* Legacy coprocessor instructions where conditional infix and conditional
19987 suffix are ambiguous. For consistency this includes all FPA instructions,
19988 not just the potentially ambiguous ones. */
19989 #define cCL(mnem, op, nops, ops, ae) \
19990 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19991 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19993 /* Coprocessor, takes either a suffix or a position-3 infix
19994 (for an FPA corner case). */
19995 #define C3E(mnem, op, nops, ops, ae) \
19996 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19997 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19999 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
20000 { m1 #m2 m3, OPS##nops ops, \
20001 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
20002 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
20004 #define CM(m1, m2, op, nops, ops, ae) \
20005 xCM_ (m1, , m2, op, nops, ops, ae), \
20006 xCM_ (m1, eq, m2, op, nops, ops, ae), \
20007 xCM_ (m1, ne, m2, op, nops, ops, ae), \
20008 xCM_ (m1, cs, m2, op, nops, ops, ae), \
20009 xCM_ (m1, hs, m2, op, nops, ops, ae), \
20010 xCM_ (m1, cc, m2, op, nops, ops, ae), \
20011 xCM_ (m1, ul, m2, op, nops, ops, ae), \
20012 xCM_ (m1, lo, m2, op, nops, ops, ae), \
20013 xCM_ (m1, mi, m2, op, nops, ops, ae), \
20014 xCM_ (m1, pl, m2, op, nops, ops, ae), \
20015 xCM_ (m1, vs, m2, op, nops, ops, ae), \
20016 xCM_ (m1, vc, m2, op, nops, ops, ae), \
20017 xCM_ (m1, hi, m2, op, nops, ops, ae), \
20018 xCM_ (m1, ls, m2, op, nops, ops, ae), \
20019 xCM_ (m1, ge, m2, op, nops, ops, ae), \
20020 xCM_ (m1, lt, m2, op, nops, ops, ae), \
20021 xCM_ (m1, gt, m2, op, nops, ops, ae), \
20022 xCM_ (m1, le, m2, op, nops, ops, ae), \
20023 xCM_ (m1, al, m2, op, nops, ops, ae)
20025 #define UE(mnem, op, nops, ops, ae) \
20026 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20028 #define UF(mnem, op, nops, ops, ae) \
20029 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
20031 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
20032 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
20033 use the same encoding function for each. */
20034 #define NUF(mnem, op, nops, ops, enc) \
20035 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
20036 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20038 /* Neon data processing, version which indirects through neon_enc_tab for
20039 the various overloaded versions of opcodes. */
20040 #define nUF(mnem, op, nops, ops, enc) \
20041 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
20042 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20044 /* Neon insn with conditional suffix for the ARM version, non-overloaded
20046 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
20047 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
20048 THUMB_VARIANT, do_##enc, do_##enc }
20050 #define NCE(mnem, op, nops, ops, enc) \
20051 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20053 #define NCEF(mnem, op, nops, ops, enc) \
20054 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20056 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
20057 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
20058 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
20059 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
20061 #define nCE(mnem, op, nops, ops, enc) \
20062 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
20064 #define nCEF(mnem, op, nops, ops, enc) \
20065 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
20069 static const struct asm_opcode insns
[] =
20071 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
20072 #define THUMB_VARIANT & arm_ext_v4t
20073 tCE("and", 0000000, _and
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20074 tC3("ands", 0100000, _ands
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20075 tCE("eor", 0200000, _eor
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20076 tC3("eors", 0300000, _eors
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20077 tCE("sub", 0400000, _sub
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20078 tC3("subs", 0500000, _subs
, 3, (RR
, oRR
, SH
), arit
, t_add_sub
),
20079 tCE("add", 0800000, _add
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20080 tC3("adds", 0900000, _adds
, 3, (RR
, oRR
, SHG
), arit
, t_add_sub
),
20081 tCE("adc", 0a00000
, _adc
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20082 tC3("adcs", 0b00000, _adcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20083 tCE("sbc", 0c00000
, _sbc
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20084 tC3("sbcs", 0d00000
, _sbcs
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20085 tCE("orr", 1800000, _orr
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20086 tC3("orrs", 1900000, _orrs
, 3, (RR
, oRR
, SH
), arit
, t_arit3c
),
20087 tCE("bic", 1c00000
, _bic
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20088 tC3("bics", 1d00000
, _bics
, 3, (RR
, oRR
, SH
), arit
, t_arit3
),
20090 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
20091 for setting PSR flag bits. They are obsolete in V6 and do not
20092 have Thumb equivalents. */
20093 tCE("tst", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20094 tC3w("tsts", 1100000, _tst
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20095 CL("tstp", 110f000
, 2, (RR
, SH
), cmp
),
20096 tCE("cmp", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20097 tC3w("cmps", 1500000, _cmp
, 2, (RR
, SH
), cmp
, t_mov_cmp
),
20098 CL("cmpp", 150f000
, 2, (RR
, SH
), cmp
),
20099 tCE("cmn", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20100 tC3w("cmns", 1700000, _cmn
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20101 CL("cmnp", 170f000
, 2, (RR
, SH
), cmp
),
20103 tCE("mov", 1a00000
, _mov
, 2, (RR
, SH
), mov
, t_mov_cmp
),
20104 tC3("movs", 1b00000
, _movs
, 2, (RR
, SHG
), mov
, t_mov_cmp
),
20105 tCE("mvn", 1e00000
, _mvn
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20106 tC3("mvns", 1f00000
, _mvns
, 2, (RR
, SH
), mov
, t_mvn_tst
),
20108 tCE("ldr", 4100000, _ldr
, 2, (RR
, ADDRGLDR
),ldst
, t_ldst
),
20109 tC3("ldrb", 4500000, _ldrb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20110 tCE("str", 4000000, _str
, _2
, (MIX_ARM_THUMB_OPERANDS (OP_RR
,
20112 OP_ADDRGLDR
),ldst
, t_ldst
),
20113 tC3("strb", 4400000, _strb
, 2, (RRnpc_npcsp
, ADDRGLDR
),ldst
, t_ldst
),
20115 tCE("stm", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20116 tC3("stmia", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20117 tC3("stmea", 8800000, _stmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20118 tCE("ldm", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20119 tC3("ldmia", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20120 tC3("ldmfd", 8900000, _ldmia
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20122 tCE("b", a000000
, _b
, 1, (EXPr
), branch
, t_branch
),
20123 TCE("bl", b000000
, f000f800
, 1, (EXPr
), bl
, t_branch23
),
20126 tCE("adr", 28f0000
, _adr
, 2, (RR
, EXP
), adr
, t_adr
),
20127 C3(adrl
, 28f0000
, 2, (RR
, EXP
), adrl
),
20128 tCE("nop", 1a00000
, _nop
, 1, (oI255c
), nop
, t_nop
),
20129 tCE("udf", 7f000f0
, _udf
, 1, (oIffffb
), bkpt
, t_udf
),
20131 /* Thumb-compatibility pseudo ops. */
20132 tCE("lsl", 1a00000
, _lsl
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20133 tC3("lsls", 1b00000
, _lsls
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20134 tCE("lsr", 1a00020
, _lsr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20135 tC3("lsrs", 1b00020
, _lsrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20136 tCE("asr", 1a00040
, _asr
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20137 tC3("asrs", 1b00040
, _asrs
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20138 tCE("ror", 1a00060
, _ror
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20139 tC3("rors", 1b00060
, _rors
, 3, (RR
, oRR
, SH
), shift
, t_shift
),
20140 tCE("neg", 2600000, _neg
, 2, (RR
, RR
), rd_rn
, t_neg
),
20141 tC3("negs", 2700000, _negs
, 2, (RR
, RR
), rd_rn
, t_neg
),
20142 tCE("push", 92d0000
, _push
, 1, (REGLST
), push_pop
, t_push_pop
),
20143 tCE("pop", 8bd0000
, _pop
, 1, (REGLST
), push_pop
, t_push_pop
),
20145 /* These may simplify to neg. */
20146 TCE("rsb", 0600000, ebc00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20147 TC3("rsbs", 0700000, ebd00000
, 3, (RR
, oRR
, SH
), arit
, t_rsb
),
20149 #undef THUMB_VARIANT
20150 #define THUMB_VARIANT & arm_ext_os
20152 TCE("swi", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20153 TCE("svc", f000000
, df00
, 1, (EXPi
), swi
, t_swi
),
20155 #undef THUMB_VARIANT
20156 #define THUMB_VARIANT & arm_ext_v6
20158 TCE("cpy", 1a00000
, 4600, 2, (RR
, RR
), rd_rm
, t_cpy
),
20160 /* V1 instructions with no Thumb analogue prior to V6T2. */
20161 #undef THUMB_VARIANT
20162 #define THUMB_VARIANT & arm_ext_v6t2
20164 TCE("teq", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20165 TC3w("teqs", 1300000, ea900f00
, 2, (RR
, SH
), cmp
, t_mvn_tst
),
20166 CL("teqp", 130f000
, 2, (RR
, SH
), cmp
),
20168 TC3("ldrt", 4300000, f8500e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20169 TC3("ldrbt", 4700000, f8100e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20170 TC3("strt", 4200000, f8400e00
, 2, (RR_npcsp
, ADDR
), ldstt
, t_ldstt
),
20171 TC3("strbt", 4600000, f8000e00
, 2, (RRnpc_npcsp
, ADDR
),ldstt
, t_ldstt
),
20173 TC3("stmdb", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20174 TC3("stmfd", 9000000, e9000000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20176 TC3("ldmdb", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20177 TC3("ldmea", 9100000, e9100000
, 2, (RRw
, REGLST
), ldmstm
, t_ldmstm
),
20179 /* V1 instructions with no Thumb analogue at all. */
20180 CE("rsc", 0e00000
, 3, (RR
, oRR
, SH
), arit
),
20181 C3(rscs
, 0f00000
, 3, (RR
, oRR
, SH
), arit
),
20183 C3(stmib
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20184 C3(stmfa
, 9800000, 2, (RRw
, REGLST
), ldmstm
),
20185 C3(stmda
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20186 C3(stmed
, 8000000, 2, (RRw
, REGLST
), ldmstm
),
20187 C3(ldmib
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20188 C3(ldmed
, 9900000, 2, (RRw
, REGLST
), ldmstm
),
20189 C3(ldmda
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20190 C3(ldmfa
, 8100000, 2, (RRw
, REGLST
), ldmstm
),
20193 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
20194 #undef THUMB_VARIANT
20195 #define THUMB_VARIANT & arm_ext_v4t
20197 tCE("mul", 0000090, _mul
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20198 tC3("muls", 0100090, _muls
, 3, (RRnpc
, RRnpc
, oRR
), mul
, t_mul
),
20200 #undef THUMB_VARIANT
20201 #define THUMB_VARIANT & arm_ext_v6t2
20203 TCE("mla", 0200090, fb000000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20204 C3(mlas
, 0300090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
),
20206 /* Generic coprocessor instructions. */
20207 TCE("cdp", e000000
, ee000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20208 TCE("ldc", c100000
, ec100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20209 TC3("ldcl", c500000
, ec500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20210 TCE("stc", c000000
, ec000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20211 TC3("stcl", c400000
, ec400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20212 TCE("mcr", e000010
, ee000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20213 TCE("mrc", e100010
, ee100010
, 6, (RCP
, I7b
, APSR_RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20216 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
20218 CE("swp", 1000090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20219 C3(swpb
, 1400090, 3, (RRnpc
, RRnpc
, RRnpcb
), rd_rm_rn
),
20222 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
20223 #undef THUMB_VARIANT
20224 #define THUMB_VARIANT & arm_ext_msr
20226 TCE("mrs", 1000000, f3e08000
, 2, (RRnpc
, rPSR
), mrs
, t_mrs
),
20227 TCE("msr", 120f000
, f3808000
, 2, (wPSR
, RR_EXi
), msr
, t_msr
),
20230 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
20231 #undef THUMB_VARIANT
20232 #define THUMB_VARIANT & arm_ext_v6t2
20234 TCE("smull", 0c00090
, fb800000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20235 CM("smull","s", 0d00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20236 TCE("umull", 0800090, fba00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20237 CM("umull","s", 0900090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20238 TCE("smlal", 0e00090
, fbc00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20239 CM("smlal","s", 0f00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20240 TCE("umlal", 0a00090
, fbe00000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
, t_mull
),
20241 CM("umlal","s", 0b00090, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mull
),
20244 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
20245 #undef THUMB_VARIANT
20246 #define THUMB_VARIANT & arm_ext_v4t
20248 tC3("ldrh", 01000b0
, _ldrh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20249 tC3("strh", 00000b0
, _strh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20250 tC3("ldrsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20251 tC3("ldrsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20252 tC3("ldsh", 01000f0
, _ldrsh
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20253 tC3("ldsb", 01000d0
, _ldrsb
, 2, (RRnpc_npcsp
, ADDRGLDRS
), ldstv4
, t_ldst
),
20256 #define ARM_VARIANT & arm_ext_v4t_5
20258 /* ARM Architecture 4T. */
20259 /* Note: bx (and blx) are required on V5, even if the processor does
20260 not support Thumb. */
20261 TCE("bx", 12fff10
, 4700, 1, (RR
), bx
, t_bx
),
20264 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
20265 #undef THUMB_VARIANT
20266 #define THUMB_VARIANT & arm_ext_v5t
20268 /* Note: blx has 2 variants; the .value coded here is for
20269 BLX(2). Only this variant has conditional execution. */
20270 TCE("blx", 12fff30
, 4780, 1, (RR_EXr
), blx
, t_blx
),
20271 TUE("bkpt", 1200070, be00
, 1, (oIffffb
), bkpt
, t_bkpt
),
20273 #undef THUMB_VARIANT
20274 #define THUMB_VARIANT & arm_ext_v6t2
20276 TCE("clz", 16f0f10
, fab0f080
, 2, (RRnpc
, RRnpc
), rd_rm
, t_clz
),
20277 TUF("ldc2", c100000
, fc100000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20278 TUF("ldc2l", c500000
, fc500000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20279 TUF("stc2", c000000
, fc000000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20280 TUF("stc2l", c400000
, fc400000
, 3, (RCP
, RCN
, ADDRGLDC
), lstc
, lstc
),
20281 TUF("cdp2", e000000
, fe000000
, 6, (RCP
, I15b
, RCN
, RCN
, RCN
, oI7b
), cdp
, cdp
),
20282 TUF("mcr2", e000010
, fe000010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20283 TUF("mrc2", e100010
, fe100010
, 6, (RCP
, I7b
, RR
, RCN
, RCN
, oI7b
), co_reg
, co_reg
),
20286 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
20287 #undef THUMB_VARIANT
20288 #define THUMB_VARIANT & arm_ext_v5exp
20290 TCE("smlabb", 1000080, fb100000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20291 TCE("smlatb", 10000a0
, fb100020
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20292 TCE("smlabt", 10000c0
, fb100010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20293 TCE("smlatt", 10000e0
, fb100030
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20295 TCE("smlawb", 1200080, fb300000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20296 TCE("smlawt", 12000c0
, fb300010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smla
, t_mla
),
20298 TCE("smlalbb", 1400080, fbc00080
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20299 TCE("smlaltb", 14000a0
, fbc000a0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20300 TCE("smlalbt", 14000c0
, fbc00090
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20301 TCE("smlaltt", 14000e0
, fbc000b0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), smlal
, t_mlal
),
20303 TCE("smulbb", 1600080, fb10f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20304 TCE("smultb", 16000a0
, fb10f020
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20305 TCE("smulbt", 16000c0
, fb10f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20306 TCE("smultt", 16000e0
, fb10f030
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20308 TCE("smulwb", 12000a0
, fb30f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20309 TCE("smulwt", 12000e0
, fb30f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20311 TCE("qadd", 1000050, fa80f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20312 TCE("qdadd", 1400050, fa80f090
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20313 TCE("qsub", 1200050, fa80f0a0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20314 TCE("qdsub", 1600050, fa80f0b0
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rm_rn
, t_simd2
),
20317 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
20318 #undef THUMB_VARIANT
20319 #define THUMB_VARIANT & arm_ext_v6t2
20321 TUF("pld", 450f000
, f810f000
, 1, (ADDR
), pld
, t_pld
),
20322 TC3("ldrd", 00000d0
, e8500000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, ADDRGLDRS
),
20324 TC3("strd", 00000f0
, e8400000
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
,
20325 ADDRGLDRS
), ldrd
, t_ldstd
),
20327 TCE("mcrr", c400000
, ec400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20328 TCE("mrrc", c500000
, ec500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20331 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
20333 TCE("bxj", 12fff20
, f3c08f00
, 1, (RR
), bxj
, t_bxj
),
20336 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
20337 #undef THUMB_VARIANT
20338 #define THUMB_VARIANT & arm_ext_v6
20340 TUF("cpsie", 1080000, b660
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20341 TUF("cpsid", 10c0000
, b670
, 2, (CPSF
, oI31b
), cpsi
, t_cpsi
),
20342 tCE("rev", 6bf0f30
, _rev
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20343 tCE("rev16", 6bf0fb0
, _rev16
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20344 tCE("revsh", 6ff0fb0
, _revsh
, 2, (RRnpc
, RRnpc
), rd_rm
, t_rev
),
20345 tCE("sxth", 6bf0070
, _sxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20346 tCE("uxth", 6ff0070
, _uxth
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20347 tCE("sxtb", 6af0070
, _sxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20348 tCE("uxtb", 6ef0070
, _uxtb
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20349 TUF("setend", 1010000, b650
, 1, (ENDI
), setend
, t_setend
),
20351 #undef THUMB_VARIANT
20352 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20354 TCE("ldrex", 1900f9f
, e8500f00
, 2, (RRnpc_npcsp
, ADDR
), ldrex
, t_ldrex
),
20355 TCE("strex", 1800f90
, e8400000
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20357 #undef THUMB_VARIANT
20358 #define THUMB_VARIANT & arm_ext_v6t2
20360 TUF("mcrr2", c400000
, fc400000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20361 TUF("mrrc2", c500000
, fc500000
, 5, (RCP
, I15b
, RRnpc
, RRnpc
, RCN
), co_reg2c
, co_reg2c
),
20363 TCE("ssat", 6a00010
, f3000000
, 4, (RRnpc
, I32
, RRnpc
, oSHllar
),ssat
, t_ssat
),
20364 TCE("usat", 6e00010
, f3800000
, 4, (RRnpc
, I31
, RRnpc
, oSHllar
),usat
, t_usat
),
20366 /* ARM V6 not included in V7M. */
20367 #undef THUMB_VARIANT
20368 #define THUMB_VARIANT & arm_ext_v6_notm
20369 TUF("rfeia", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20370 TUF("rfe", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20371 UF(rfeib
, 9900a00
, 1, (RRw
), rfe
),
20372 UF(rfeda
, 8100a00
, 1, (RRw
), rfe
),
20373 TUF("rfedb", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
20374 TUF("rfefd", 8900a00
, e990c000
, 1, (RRw
), rfe
, rfe
),
20375 UF(rfefa
, 8100a00
, 1, (RRw
), rfe
),
20376 TUF("rfeea", 9100a00
, e810c000
, 1, (RRw
), rfe
, rfe
),
20377 UF(rfeed
, 9900a00
, 1, (RRw
), rfe
),
20378 TUF("srsia", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20379 TUF("srs", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20380 TUF("srsea", 8c00500
, e980c000
, 2, (oRRw
, I31w
), srs
, srs
),
20381 UF(srsib
, 9c00500
, 2, (oRRw
, I31w
), srs
),
20382 UF(srsfa
, 9c00500
, 2, (oRRw
, I31w
), srs
),
20383 UF(srsda
, 8400500, 2, (oRRw
, I31w
), srs
),
20384 UF(srsed
, 8400500, 2, (oRRw
, I31w
), srs
),
20385 TUF("srsdb", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
20386 TUF("srsfd", 9400500, e800c000
, 2, (oRRw
, I31w
), srs
, srs
),
20387 TUF("cps", 1020000, f3af8100
, 1, (I31b
), imm0
, t_cps
),
20389 /* ARM V6 not included in V7M (eg. integer SIMD). */
20390 #undef THUMB_VARIANT
20391 #define THUMB_VARIANT & arm_ext_v6_dsp
20392 TCE("pkhbt", 6800010, eac00000
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHll
), pkhbt
, t_pkhbt
),
20393 TCE("pkhtb", 6800050, eac00020
, 4, (RRnpc
, RRnpc
, RRnpc
, oSHar
), pkhtb
, t_pkhtb
),
20394 TCE("qadd16", 6200f10
, fa90f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20395 TCE("qadd8", 6200f90
, fa80f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20396 TCE("qasx", 6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20397 /* Old name for QASX. */
20398 TCE("qaddsubx",6200f30
, faa0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20399 TCE("qsax", 6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20400 /* Old name for QSAX. */
20401 TCE("qsubaddx",6200f50
, fae0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20402 TCE("qsub16", 6200f70
, fad0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20403 TCE("qsub8", 6200ff0
, fac0f010
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20404 TCE("sadd16", 6100f10
, fa90f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20405 TCE("sadd8", 6100f90
, fa80f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20406 TCE("sasx", 6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20407 /* Old name for SASX. */
20408 TCE("saddsubx",6100f30
, faa0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20409 TCE("shadd16", 6300f10
, fa90f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20410 TCE("shadd8", 6300f90
, fa80f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20411 TCE("shasx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20412 /* Old name for SHASX. */
20413 TCE("shaddsubx", 6300f30
, faa0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20414 TCE("shsax", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20415 /* Old name for SHSAX. */
20416 TCE("shsubaddx", 6300f50
, fae0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20417 TCE("shsub16", 6300f70
, fad0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20418 TCE("shsub8", 6300ff0
, fac0f020
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20419 TCE("ssax", 6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20420 /* Old name for SSAX. */
20421 TCE("ssubaddx",6100f50
, fae0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20422 TCE("ssub16", 6100f70
, fad0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20423 TCE("ssub8", 6100ff0
, fac0f000
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20424 TCE("uadd16", 6500f10
, fa90f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20425 TCE("uadd8", 6500f90
, fa80f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20426 TCE("uasx", 6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20427 /* Old name for UASX. */
20428 TCE("uaddsubx",6500f30
, faa0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20429 TCE("uhadd16", 6700f10
, fa90f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20430 TCE("uhadd8", 6700f90
, fa80f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20431 TCE("uhasx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20432 /* Old name for UHASX. */
20433 TCE("uhaddsubx", 6700f30
, faa0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20434 TCE("uhsax", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20435 /* Old name for UHSAX. */
20436 TCE("uhsubaddx", 6700f50
, fae0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20437 TCE("uhsub16", 6700f70
, fad0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20438 TCE("uhsub8", 6700ff0
, fac0f060
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20439 TCE("uqadd16", 6600f10
, fa90f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20440 TCE("uqadd8", 6600f90
, fa80f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20441 TCE("uqasx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20442 /* Old name for UQASX. */
20443 TCE("uqaddsubx", 6600f30
, faa0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20444 TCE("uqsax", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20445 /* Old name for UQSAX. */
20446 TCE("uqsubaddx", 6600f50
, fae0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20447 TCE("uqsub16", 6600f70
, fad0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20448 TCE("uqsub8", 6600ff0
, fac0f050
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20449 TCE("usub16", 6500f70
, fad0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20450 TCE("usax", 6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20451 /* Old name for USAX. */
20452 TCE("usubaddx",6500f50
, fae0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20453 TCE("usub8", 6500ff0
, fac0f040
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20454 TCE("sxtah", 6b00070
, fa00f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20455 TCE("sxtab16", 6800070, fa20f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20456 TCE("sxtab", 6a00070
, fa40f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20457 TCE("sxtb16", 68f0070
, fa2ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20458 TCE("uxtah", 6f00070
, fa10f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20459 TCE("uxtab16", 6c00070
, fa30f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20460 TCE("uxtab", 6e00070
, fa50f080
, 4, (RRnpc
, RRnpc
, RRnpc
, oROR
), sxtah
, t_sxtah
),
20461 TCE("uxtb16", 6cf0070
, fa3ff080
, 3, (RRnpc
, RRnpc
, oROR
), sxth
, t_sxth
),
20462 TCE("sel", 6800fb0
, faa0f080
, 3, (RRnpc
, RRnpc
, RRnpc
), rd_rn_rm
, t_simd
),
20463 TCE("smlad", 7000010, fb200000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20464 TCE("smladx", 7000030, fb200010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20465 TCE("smlald", 7400010, fbc000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20466 TCE("smlaldx", 7400030, fbc000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20467 TCE("smlsd", 7000050, fb400000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20468 TCE("smlsdx", 7000070, fb400010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20469 TCE("smlsld", 7400050, fbd000c0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20470 TCE("smlsldx", 7400070, fbd000d0
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
,t_mlal
),
20471 TCE("smmla", 7500010, fb500000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20472 TCE("smmlar", 7500030, fb500010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20473 TCE("smmls", 75000d0
, fb600000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20474 TCE("smmlsr", 75000f0
, fb600010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20475 TCE("smmul", 750f010
, fb50f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20476 TCE("smmulr", 750f030
, fb50f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20477 TCE("smuad", 700f010
, fb20f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20478 TCE("smuadx", 700f030
, fb20f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20479 TCE("smusd", 700f050
, fb40f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20480 TCE("smusdx", 700f070
, fb40f010
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20481 TCE("ssat16", 6a00f30
, f3200000
, 3, (RRnpc
, I16
, RRnpc
), ssat16
, t_ssat16
),
20482 TCE("umaal", 0400090, fbe00060
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smlal
, t_mlal
),
20483 TCE("usad8", 780f010
, fb70f000
, 3, (RRnpc
, RRnpc
, RRnpc
), smul
, t_simd
),
20484 TCE("usada8", 7800010, fb700000
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
),smla
, t_mla
),
20485 TCE("usat16", 6e00f30
, f3a00000
, 3, (RRnpc
, I15
, RRnpc
), usat16
, t_usat16
),
20488 #define ARM_VARIANT & arm_ext_v6k_v6t2
20489 #undef THUMB_VARIANT
20490 #define THUMB_VARIANT & arm_ext_v6k_v6t2
20492 tCE("yield", 320f001
, _yield
, 0, (), noargs
, t_hint
),
20493 tCE("wfe", 320f002
, _wfe
, 0, (), noargs
, t_hint
),
20494 tCE("wfi", 320f003
, _wfi
, 0, (), noargs
, t_hint
),
20495 tCE("sev", 320f004
, _sev
, 0, (), noargs
, t_hint
),
20497 #undef THUMB_VARIANT
20498 #define THUMB_VARIANT & arm_ext_v6_notm
20499 TCE("ldrexd", 1b00f9f
, e8d0007f
, 3, (RRnpc_npcsp
, oRRnpc_npcsp
, RRnpcb
),
20501 TCE("strexd", 1a00f90
, e8c00070
, 4, (RRnpc_npcsp
, RRnpc_npcsp
, oRRnpc_npcsp
,
20502 RRnpcb
), strexd
, t_strexd
),
20504 #undef THUMB_VARIANT
20505 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20506 TCE("ldrexb", 1d00f9f
, e8d00f4f
, 2, (RRnpc_npcsp
,RRnpcb
),
20508 TCE("ldrexh", 1f00f9f
, e8d00f5f
, 2, (RRnpc_npcsp
, RRnpcb
),
20510 TCE("strexb", 1c00f90
, e8c00f40
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20512 TCE("strexh", 1e00f90
, e8c00f50
, 3, (RRnpc_npcsp
, RRnpc_npcsp
, ADDR
),
20514 TUF("clrex", 57ff01f
, f3bf8f2f
, 0, (), noargs
, noargs
),
20517 #define ARM_VARIANT & arm_ext_sec
20518 #undef THUMB_VARIANT
20519 #define THUMB_VARIANT & arm_ext_sec
20521 TCE("smc", 1600070, f7f08000
, 1, (EXPi
), smc
, t_smc
),
20524 #define ARM_VARIANT & arm_ext_virt
20525 #undef THUMB_VARIANT
20526 #define THUMB_VARIANT & arm_ext_virt
20528 TCE("hvc", 1400070, f7e08000
, 1, (EXPi
), hvc
, t_hvc
),
20529 TCE("eret", 160006e
, f3de8f00
, 0, (), noargs
, noargs
),
20532 #define ARM_VARIANT & arm_ext_pan
20533 #undef THUMB_VARIANT
20534 #define THUMB_VARIANT & arm_ext_pan
20536 TUF("setpan", 1100000, b610
, 1, (I7
), setpan
, t_setpan
),
20539 #define ARM_VARIANT & arm_ext_v6t2
20540 #undef THUMB_VARIANT
20541 #define THUMB_VARIANT & arm_ext_v6t2
20543 TCE("bfc", 7c0001f
, f36f0000
, 3, (RRnpc
, I31
, I32
), bfc
, t_bfc
),
20544 TCE("bfi", 7c00010
, f3600000
, 4, (RRnpc
, RRnpc_I0
, I31
, I32
), bfi
, t_bfi
),
20545 TCE("sbfx", 7a00050
, f3400000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
20546 TCE("ubfx", 7e00050
, f3c00000
, 4, (RR
, RR
, I31
, I32
), bfx
, t_bfx
),
20548 TCE("mls", 0600090, fb000010
, 4, (RRnpc
, RRnpc
, RRnpc
, RRnpc
), mlas
, t_mla
),
20549 TCE("rbit", 6ff0f30
, fa90f0a0
, 2, (RR
, RR
), rd_rm
, t_rbit
),
20551 TC3("ldrht", 03000b0
, f8300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20552 TC3("ldrsht", 03000f0
, f9300e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20553 TC3("ldrsbt", 03000d0
, f9100e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20554 TC3("strht", 02000b0
, f8200e00
, 2, (RRnpc_npcsp
, ADDR
), ldsttv4
, t_ldstt
),
20557 #define ARM_VARIANT & arm_ext_v3
20558 #undef THUMB_VARIANT
20559 #define THUMB_VARIANT & arm_ext_v6t2
20561 TUE("csdb", 320f014
, f3af8014
, 0, (), noargs
, t_csdb
),
20562 TUF("ssbb", 57ff040
, f3bf8f40
, 0, (), noargs
, t_csdb
),
20563 TUF("pssbb", 57ff044
, f3bf8f44
, 0, (), noargs
, t_csdb
),
20566 #define ARM_VARIANT & arm_ext_v6t2
20567 #undef THUMB_VARIANT
20568 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20569 TCE("movw", 3000000, f2400000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
20570 TCE("movt", 3400000, f2c00000
, 2, (RRnpc
, HALF
), mov16
, t_mov16
),
20572 /* Thumb-only instructions. */
20574 #define ARM_VARIANT NULL
20575 TUE("cbnz", 0, b900
, 2, (RR
, EXP
), 0, t_cbz
),
20576 TUE("cbz", 0, b100
, 2, (RR
, EXP
), 0, t_cbz
),
20578 /* ARM does not really have an IT instruction, so always allow it.
20579 The opcode is copied from Thumb in order to allow warnings in
20580 -mimplicit-it=[never | arm] modes. */
20582 #define ARM_VARIANT & arm_ext_v1
20583 #undef THUMB_VARIANT
20584 #define THUMB_VARIANT & arm_ext_v6t2
20586 TUE("it", bf08
, bf08
, 1, (COND
), it
, t_it
),
20587 TUE("itt", bf0c
, bf0c
, 1, (COND
), it
, t_it
),
20588 TUE("ite", bf04
, bf04
, 1, (COND
), it
, t_it
),
20589 TUE("ittt", bf0e
, bf0e
, 1, (COND
), it
, t_it
),
20590 TUE("itet", bf06
, bf06
, 1, (COND
), it
, t_it
),
20591 TUE("itte", bf0a
, bf0a
, 1, (COND
), it
, t_it
),
20592 TUE("itee", bf02
, bf02
, 1, (COND
), it
, t_it
),
20593 TUE("itttt", bf0f
, bf0f
, 1, (COND
), it
, t_it
),
20594 TUE("itett", bf07
, bf07
, 1, (COND
), it
, t_it
),
20595 TUE("ittet", bf0b
, bf0b
, 1, (COND
), it
, t_it
),
20596 TUE("iteet", bf03
, bf03
, 1, (COND
), it
, t_it
),
20597 TUE("ittte", bf0d
, bf0d
, 1, (COND
), it
, t_it
),
20598 TUE("itete", bf05
, bf05
, 1, (COND
), it
, t_it
),
20599 TUE("ittee", bf09
, bf09
, 1, (COND
), it
, t_it
),
20600 TUE("iteee", bf01
, bf01
, 1, (COND
), it
, t_it
),
20601 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20602 TC3("rrx", 01a00060
, ea4f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
20603 TC3("rrxs", 01b00060
, ea5f0030
, 2, (RR
, RR
), rd_rm
, t_rrx
),
20605 /* Thumb2 only instructions. */
20607 #define ARM_VARIANT NULL
20609 TCE("addw", 0, f2000000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
20610 TCE("subw", 0, f2a00000
, 3, (RR
, RR
, EXPi
), 0, t_add_sub_w
),
20611 TCE("orn", 0, ea600000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
20612 TCE("orns", 0, ea700000
, 3, (RR
, oRR
, SH
), 0, t_orn
),
20613 TCE("tbb", 0, e8d0f000
, 1, (TB
), 0, t_tb
),
20614 TCE("tbh", 0, e8d0f010
, 1, (TB
), 0, t_tb
),
20616 /* Hardware division instructions. */
20618 #define ARM_VARIANT & arm_ext_adiv
20619 #undef THUMB_VARIANT
20620 #define THUMB_VARIANT & arm_ext_div
20622 TCE("sdiv", 710f010
, fb90f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
20623 TCE("udiv", 730f010
, fbb0f0f0
, 3, (RR
, oRR
, RR
), div
, t_div
),
20625 /* ARM V6M/V7 instructions. */
20627 #define ARM_VARIANT & arm_ext_barrier
20628 #undef THUMB_VARIANT
20629 #define THUMB_VARIANT & arm_ext_barrier
20631 TUF("dmb", 57ff050
, f3bf8f50
, 1, (oBARRIER_I15
), barrier
, barrier
),
20632 TUF("dsb", 57ff040
, f3bf8f40
, 1, (oBARRIER_I15
), barrier
, barrier
),
20633 TUF("isb", 57ff060
, f3bf8f60
, 1, (oBARRIER_I15
), barrier
, barrier
),
20635 /* ARM V7 instructions. */
20637 #define ARM_VARIANT & arm_ext_v7
20638 #undef THUMB_VARIANT
20639 #define THUMB_VARIANT & arm_ext_v7
20641 TUF("pli", 450f000
, f910f000
, 1, (ADDR
), pli
, t_pld
),
20642 TCE("dbg", 320f0f0
, f3af80f0
, 1, (I15
), dbg
, t_dbg
),
20645 #define ARM_VARIANT & arm_ext_mp
20646 #undef THUMB_VARIANT
20647 #define THUMB_VARIANT & arm_ext_mp
20649 TUF("pldw", 410f000
, f830f000
, 1, (ADDR
), pld
, t_pld
),
20651 /* AArchv8 instructions. */
20653 #define ARM_VARIANT & arm_ext_v8
20655 /* Instructions shared between armv8-a and armv8-m. */
20656 #undef THUMB_VARIANT
20657 #define THUMB_VARIANT & arm_ext_atomics
20659 TCE("lda", 1900c9f
, e8d00faf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20660 TCE("ldab", 1d00c9f
, e8d00f8f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20661 TCE("ldah", 1f00c9f
, e8d00f9f
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20662 TCE("stl", 180fc90
, e8c00faf
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20663 TCE("stlb", 1c0fc90
, e8c00f8f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20664 TCE("stlh", 1e0fc90
, e8c00f9f
, 2, (RRnpc
, RRnpcb
), rm_rn
, rd_rn
),
20665 TCE("ldaex", 1900e9f
, e8d00fef
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20666 TCE("ldaexb", 1d00e9f
, e8d00fcf
, 2, (RRnpc
,RRnpcb
), rd_rn
, rd_rn
),
20667 TCE("ldaexh", 1f00e9f
, e8d00fdf
, 2, (RRnpc
, RRnpcb
), rd_rn
, rd_rn
),
20668 TCE("stlex", 1800e90
, e8c00fe0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20670 TCE("stlexb", 1c00e90
, e8c00fc0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20672 TCE("stlexh", 1e00e90
, e8c00fd0
, 3, (RRnpc
, RRnpc
, RRnpcb
),
20674 #undef THUMB_VARIANT
20675 #define THUMB_VARIANT & arm_ext_v8
20677 tCE("sevl", 320f005
, _sevl
, 0, (), noargs
, t_hint
),
20678 TCE("ldaexd", 1b00e9f
, e8d000ff
, 3, (RRnpc
, oRRnpc
, RRnpcb
),
20680 TCE("stlexd", 1a00e90
, e8c000f0
, 4, (RRnpc
, RRnpc
, oRRnpc
, RRnpcb
),
20683 /* Defined in V8 but is in undefined encoding space for earlier
20684 architectures. However earlier architectures are required to treat
20685 this instuction as a semihosting trap as well. Hence while not explicitly
20686 defined as such, it is in fact correct to define the instruction for all
20688 #undef THUMB_VARIANT
20689 #define THUMB_VARIANT & arm_ext_v1
20691 #define ARM_VARIANT & arm_ext_v1
20692 TUE("hlt", 1000070, ba80
, 1, (oIffffb
), bkpt
, t_hlt
),
20694 /* ARMv8 T32 only. */
20696 #define ARM_VARIANT NULL
20697 TUF("dcps1", 0, f78f8001
, 0, (), noargs
, noargs
),
20698 TUF("dcps2", 0, f78f8002
, 0, (), noargs
, noargs
),
20699 TUF("dcps3", 0, f78f8003
, 0, (), noargs
, noargs
),
20701 /* FP for ARMv8. */
20703 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20704 #undef THUMB_VARIANT
20705 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20707 nUF(vseleq
, _vseleq
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20708 nUF(vselvs
, _vselvs
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20709 nUF(vselge
, _vselge
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20710 nUF(vselgt
, _vselgt
, 3, (RVSD
, RVSD
, RVSD
), vsel
),
20711 nUF(vmaxnm
, _vmaxnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
20712 nUF(vminnm
, _vminnm
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), vmaxnm
),
20713 nUF(vcvta
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvta
),
20714 nUF(vcvtn
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtn
),
20715 nUF(vcvtp
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtp
),
20716 nUF(vcvtm
, _vcvta
, 2, (RNSDQ
, oRNSDQ
), neon_cvtm
),
20717 nCE(vrintr
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintr
),
20718 nCE(vrintz
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintz
),
20719 nCE(vrintx
, _vrintr
, 2, (RNSDQ
, oRNSDQ
), vrintx
),
20720 nUF(vrinta
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrinta
),
20721 nUF(vrintn
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintn
),
20722 nUF(vrintp
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintp
),
20723 nUF(vrintm
, _vrinta
, 2, (RNSDQ
, oRNSDQ
), vrintm
),
20725 /* Crypto v1 extensions. */
20727 #define ARM_VARIANT & fpu_crypto_ext_armv8
20728 #undef THUMB_VARIANT
20729 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20731 nUF(aese
, _aes
, 2, (RNQ
, RNQ
), aese
),
20732 nUF(aesd
, _aes
, 2, (RNQ
, RNQ
), aesd
),
20733 nUF(aesmc
, _aes
, 2, (RNQ
, RNQ
), aesmc
),
20734 nUF(aesimc
, _aes
, 2, (RNQ
, RNQ
), aesimc
),
20735 nUF(sha1c
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1c
),
20736 nUF(sha1p
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1p
),
20737 nUF(sha1m
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1m
),
20738 nUF(sha1su0
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha1su0
),
20739 nUF(sha256h
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h
),
20740 nUF(sha256h2
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256h2
),
20741 nUF(sha256su1
, _sha3op
, 3, (RNQ
, RNQ
, RNQ
), sha256su1
),
20742 nUF(sha1h
, _sha1h
, 2, (RNQ
, RNQ
), sha1h
),
20743 nUF(sha1su1
, _sha2op
, 2, (RNQ
, RNQ
), sha1su1
),
20744 nUF(sha256su0
, _sha2op
, 2, (RNQ
, RNQ
), sha256su0
),
20747 #define ARM_VARIANT & crc_ext_armv8
20748 #undef THUMB_VARIANT
20749 #define THUMB_VARIANT & crc_ext_armv8
20750 TUEc("crc32b", 1000040, fac0f080
, 3, (RR
, oRR
, RR
), crc32b
),
20751 TUEc("crc32h", 1200040, fac0f090
, 3, (RR
, oRR
, RR
), crc32h
),
20752 TUEc("crc32w", 1400040, fac0f0a0
, 3, (RR
, oRR
, RR
), crc32w
),
20753 TUEc("crc32cb",1000240, fad0f080
, 3, (RR
, oRR
, RR
), crc32cb
),
20754 TUEc("crc32ch",1200240, fad0f090
, 3, (RR
, oRR
, RR
), crc32ch
),
20755 TUEc("crc32cw",1400240, fad0f0a0
, 3, (RR
, oRR
, RR
), crc32cw
),
20757 /* ARMv8.2 RAS extension. */
20759 #define ARM_VARIANT & arm_ext_ras
20760 #undef THUMB_VARIANT
20761 #define THUMB_VARIANT & arm_ext_ras
20762 TUE ("esb", 320f010
, f3af8010
, 0, (), noargs
, noargs
),
20765 #define ARM_VARIANT & arm_ext_v8_3
20766 #undef THUMB_VARIANT
20767 #define THUMB_VARIANT & arm_ext_v8_3
20768 NCE (vjcvt
, eb90bc0
, 2, (RVS
, RVD
), vjcvt
),
20769 NUF (vcmla
, 0, 4, (RNDQ
, RNDQ
, RNDQ_RNSC
, EXPi
), vcmla
),
20770 NUF (vcadd
, 0, 4, (RNDQ
, RNDQ
, RNDQ
, EXPi
), vcadd
),
20773 #define ARM_VARIANT & fpu_neon_ext_dotprod
20774 #undef THUMB_VARIANT
20775 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20776 NUF (vsdot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_s
),
20777 NUF (vudot
, d00
, 3, (RNDQ
, RNDQ
, RNDQ_RNSC
), neon_dotproduct_u
),
20780 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20781 #undef THUMB_VARIANT
20782 #define THUMB_VARIANT NULL
20784 cCE("wfs", e200110
, 1, (RR
), rd
),
20785 cCE("rfs", e300110
, 1, (RR
), rd
),
20786 cCE("wfc", e400110
, 1, (RR
), rd
),
20787 cCE("rfc", e500110
, 1, (RR
), rd
),
20789 cCL("ldfs", c100100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20790 cCL("ldfd", c108100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20791 cCL("ldfe", c500100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20792 cCL("ldfp", c508100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20794 cCL("stfs", c000100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20795 cCL("stfd", c008100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20796 cCL("stfe", c400100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20797 cCL("stfp", c408100
, 2, (RF
, ADDRGLDC
), rd_cpaddr
),
20799 cCL("mvfs", e008100
, 2, (RF
, RF_IF
), rd_rm
),
20800 cCL("mvfsp", e008120
, 2, (RF
, RF_IF
), rd_rm
),
20801 cCL("mvfsm", e008140
, 2, (RF
, RF_IF
), rd_rm
),
20802 cCL("mvfsz", e008160
, 2, (RF
, RF_IF
), rd_rm
),
20803 cCL("mvfd", e008180
, 2, (RF
, RF_IF
), rd_rm
),
20804 cCL("mvfdp", e0081a0
, 2, (RF
, RF_IF
), rd_rm
),
20805 cCL("mvfdm", e0081c0
, 2, (RF
, RF_IF
), rd_rm
),
20806 cCL("mvfdz", e0081e0
, 2, (RF
, RF_IF
), rd_rm
),
20807 cCL("mvfe", e088100
, 2, (RF
, RF_IF
), rd_rm
),
20808 cCL("mvfep", e088120
, 2, (RF
, RF_IF
), rd_rm
),
20809 cCL("mvfem", e088140
, 2, (RF
, RF_IF
), rd_rm
),
20810 cCL("mvfez", e088160
, 2, (RF
, RF_IF
), rd_rm
),
20812 cCL("mnfs", e108100
, 2, (RF
, RF_IF
), rd_rm
),
20813 cCL("mnfsp", e108120
, 2, (RF
, RF_IF
), rd_rm
),
20814 cCL("mnfsm", e108140
, 2, (RF
, RF_IF
), rd_rm
),
20815 cCL("mnfsz", e108160
, 2, (RF
, RF_IF
), rd_rm
),
20816 cCL("mnfd", e108180
, 2, (RF
, RF_IF
), rd_rm
),
20817 cCL("mnfdp", e1081a0
, 2, (RF
, RF_IF
), rd_rm
),
20818 cCL("mnfdm", e1081c0
, 2, (RF
, RF_IF
), rd_rm
),
20819 cCL("mnfdz", e1081e0
, 2, (RF
, RF_IF
), rd_rm
),
20820 cCL("mnfe", e188100
, 2, (RF
, RF_IF
), rd_rm
),
20821 cCL("mnfep", e188120
, 2, (RF
, RF_IF
), rd_rm
),
20822 cCL("mnfem", e188140
, 2, (RF
, RF_IF
), rd_rm
),
20823 cCL("mnfez", e188160
, 2, (RF
, RF_IF
), rd_rm
),
20825 cCL("abss", e208100
, 2, (RF
, RF_IF
), rd_rm
),
20826 cCL("abssp", e208120
, 2, (RF
, RF_IF
), rd_rm
),
20827 cCL("abssm", e208140
, 2, (RF
, RF_IF
), rd_rm
),
20828 cCL("abssz", e208160
, 2, (RF
, RF_IF
), rd_rm
),
20829 cCL("absd", e208180
, 2, (RF
, RF_IF
), rd_rm
),
20830 cCL("absdp", e2081a0
, 2, (RF
, RF_IF
), rd_rm
),
20831 cCL("absdm", e2081c0
, 2, (RF
, RF_IF
), rd_rm
),
20832 cCL("absdz", e2081e0
, 2, (RF
, RF_IF
), rd_rm
),
20833 cCL("abse", e288100
, 2, (RF
, RF_IF
), rd_rm
),
20834 cCL("absep", e288120
, 2, (RF
, RF_IF
), rd_rm
),
20835 cCL("absem", e288140
, 2, (RF
, RF_IF
), rd_rm
),
20836 cCL("absez", e288160
, 2, (RF
, RF_IF
), rd_rm
),
20838 cCL("rnds", e308100
, 2, (RF
, RF_IF
), rd_rm
),
20839 cCL("rndsp", e308120
, 2, (RF
, RF_IF
), rd_rm
),
20840 cCL("rndsm", e308140
, 2, (RF
, RF_IF
), rd_rm
),
20841 cCL("rndsz", e308160
, 2, (RF
, RF_IF
), rd_rm
),
20842 cCL("rndd", e308180
, 2, (RF
, RF_IF
), rd_rm
),
20843 cCL("rnddp", e3081a0
, 2, (RF
, RF_IF
), rd_rm
),
20844 cCL("rnddm", e3081c0
, 2, (RF
, RF_IF
), rd_rm
),
20845 cCL("rnddz", e3081e0
, 2, (RF
, RF_IF
), rd_rm
),
20846 cCL("rnde", e388100
, 2, (RF
, RF_IF
), rd_rm
),
20847 cCL("rndep", e388120
, 2, (RF
, RF_IF
), rd_rm
),
20848 cCL("rndem", e388140
, 2, (RF
, RF_IF
), rd_rm
),
20849 cCL("rndez", e388160
, 2, (RF
, RF_IF
), rd_rm
),
20851 cCL("sqts", e408100
, 2, (RF
, RF_IF
), rd_rm
),
20852 cCL("sqtsp", e408120
, 2, (RF
, RF_IF
), rd_rm
),
20853 cCL("sqtsm", e408140
, 2, (RF
, RF_IF
), rd_rm
),
20854 cCL("sqtsz", e408160
, 2, (RF
, RF_IF
), rd_rm
),
20855 cCL("sqtd", e408180
, 2, (RF
, RF_IF
), rd_rm
),
20856 cCL("sqtdp", e4081a0
, 2, (RF
, RF_IF
), rd_rm
),
20857 cCL("sqtdm", e4081c0
, 2, (RF
, RF_IF
), rd_rm
),
20858 cCL("sqtdz", e4081e0
, 2, (RF
, RF_IF
), rd_rm
),
20859 cCL("sqte", e488100
, 2, (RF
, RF_IF
), rd_rm
),
20860 cCL("sqtep", e488120
, 2, (RF
, RF_IF
), rd_rm
),
20861 cCL("sqtem", e488140
, 2, (RF
, RF_IF
), rd_rm
),
20862 cCL("sqtez", e488160
, 2, (RF
, RF_IF
), rd_rm
),
20864 cCL("logs", e508100
, 2, (RF
, RF_IF
), rd_rm
),
20865 cCL("logsp", e508120
, 2, (RF
, RF_IF
), rd_rm
),
20866 cCL("logsm", e508140
, 2, (RF
, RF_IF
), rd_rm
),
20867 cCL("logsz", e508160
, 2, (RF
, RF_IF
), rd_rm
),
20868 cCL("logd", e508180
, 2, (RF
, RF_IF
), rd_rm
),
20869 cCL("logdp", e5081a0
, 2, (RF
, RF_IF
), rd_rm
),
20870 cCL("logdm", e5081c0
, 2, (RF
, RF_IF
), rd_rm
),
20871 cCL("logdz", e5081e0
, 2, (RF
, RF_IF
), rd_rm
),
20872 cCL("loge", e588100
, 2, (RF
, RF_IF
), rd_rm
),
20873 cCL("logep", e588120
, 2, (RF
, RF_IF
), rd_rm
),
20874 cCL("logem", e588140
, 2, (RF
, RF_IF
), rd_rm
),
20875 cCL("logez", e588160
, 2, (RF
, RF_IF
), rd_rm
),
20877 cCL("lgns", e608100
, 2, (RF
, RF_IF
), rd_rm
),
20878 cCL("lgnsp", e608120
, 2, (RF
, RF_IF
), rd_rm
),
20879 cCL("lgnsm", e608140
, 2, (RF
, RF_IF
), rd_rm
),
20880 cCL("lgnsz", e608160
, 2, (RF
, RF_IF
), rd_rm
),
20881 cCL("lgnd", e608180
, 2, (RF
, RF_IF
), rd_rm
),
20882 cCL("lgndp", e6081a0
, 2, (RF
, RF_IF
), rd_rm
),
20883 cCL("lgndm", e6081c0
, 2, (RF
, RF_IF
), rd_rm
),
20884 cCL("lgndz", e6081e0
, 2, (RF
, RF_IF
), rd_rm
),
20885 cCL("lgne", e688100
, 2, (RF
, RF_IF
), rd_rm
),
20886 cCL("lgnep", e688120
, 2, (RF
, RF_IF
), rd_rm
),
20887 cCL("lgnem", e688140
, 2, (RF
, RF_IF
), rd_rm
),
20888 cCL("lgnez", e688160
, 2, (RF
, RF_IF
), rd_rm
),
20890 cCL("exps", e708100
, 2, (RF
, RF_IF
), rd_rm
),
20891 cCL("expsp", e708120
, 2, (RF
, RF_IF
), rd_rm
),
20892 cCL("expsm", e708140
, 2, (RF
, RF_IF
), rd_rm
),
20893 cCL("expsz", e708160
, 2, (RF
, RF_IF
), rd_rm
),
20894 cCL("expd", e708180
, 2, (RF
, RF_IF
), rd_rm
),
20895 cCL("expdp", e7081a0
, 2, (RF
, RF_IF
), rd_rm
),
20896 cCL("expdm", e7081c0
, 2, (RF
, RF_IF
), rd_rm
),
20897 cCL("expdz", e7081e0
, 2, (RF
, RF_IF
), rd_rm
),
20898 cCL("expe", e788100
, 2, (RF
, RF_IF
), rd_rm
),
20899 cCL("expep", e788120
, 2, (RF
, RF_IF
), rd_rm
),
20900 cCL("expem", e788140
, 2, (RF
, RF_IF
), rd_rm
),
20901 cCL("expdz", e788160
, 2, (RF
, RF_IF
), rd_rm
),
20903 cCL("sins", e808100
, 2, (RF
, RF_IF
), rd_rm
),
20904 cCL("sinsp", e808120
, 2, (RF
, RF_IF
), rd_rm
),
20905 cCL("sinsm", e808140
, 2, (RF
, RF_IF
), rd_rm
),
20906 cCL("sinsz", e808160
, 2, (RF
, RF_IF
), rd_rm
),
20907 cCL("sind", e808180
, 2, (RF
, RF_IF
), rd_rm
),
20908 cCL("sindp", e8081a0
, 2, (RF
, RF_IF
), rd_rm
),
20909 cCL("sindm", e8081c0
, 2, (RF
, RF_IF
), rd_rm
),
20910 cCL("sindz", e8081e0
, 2, (RF
, RF_IF
), rd_rm
),
20911 cCL("sine", e888100
, 2, (RF
, RF_IF
), rd_rm
),
20912 cCL("sinep", e888120
, 2, (RF
, RF_IF
), rd_rm
),
20913 cCL("sinem", e888140
, 2, (RF
, RF_IF
), rd_rm
),
20914 cCL("sinez", e888160
, 2, (RF
, RF_IF
), rd_rm
),
20916 cCL("coss", e908100
, 2, (RF
, RF_IF
), rd_rm
),
20917 cCL("cossp", e908120
, 2, (RF
, RF_IF
), rd_rm
),
20918 cCL("cossm", e908140
, 2, (RF
, RF_IF
), rd_rm
),
20919 cCL("cossz", e908160
, 2, (RF
, RF_IF
), rd_rm
),
20920 cCL("cosd", e908180
, 2, (RF
, RF_IF
), rd_rm
),
20921 cCL("cosdp", e9081a0
, 2, (RF
, RF_IF
), rd_rm
),
20922 cCL("cosdm", e9081c0
, 2, (RF
, RF_IF
), rd_rm
),
20923 cCL("cosdz", e9081e0
, 2, (RF
, RF_IF
), rd_rm
),
20924 cCL("cose", e988100
, 2, (RF
, RF_IF
), rd_rm
),
20925 cCL("cosep", e988120
, 2, (RF
, RF_IF
), rd_rm
),
20926 cCL("cosem", e988140
, 2, (RF
, RF_IF
), rd_rm
),
20927 cCL("cosez", e988160
, 2, (RF
, RF_IF
), rd_rm
),
20929 cCL("tans", ea08100
, 2, (RF
, RF_IF
), rd_rm
),
20930 cCL("tansp", ea08120
, 2, (RF
, RF_IF
), rd_rm
),
20931 cCL("tansm", ea08140
, 2, (RF
, RF_IF
), rd_rm
),
20932 cCL("tansz", ea08160
, 2, (RF
, RF_IF
), rd_rm
),
20933 cCL("tand", ea08180
, 2, (RF
, RF_IF
), rd_rm
),
20934 cCL("tandp", ea081a0
, 2, (RF
, RF_IF
), rd_rm
),
20935 cCL("tandm", ea081c0
, 2, (RF
, RF_IF
), rd_rm
),
20936 cCL("tandz", ea081e0
, 2, (RF
, RF_IF
), rd_rm
),
20937 cCL("tane", ea88100
, 2, (RF
, RF_IF
), rd_rm
),
20938 cCL("tanep", ea88120
, 2, (RF
, RF_IF
), rd_rm
),
20939 cCL("tanem", ea88140
, 2, (RF
, RF_IF
), rd_rm
),
20940 cCL("tanez", ea88160
, 2, (RF
, RF_IF
), rd_rm
),
20942 cCL("asns", eb08100
, 2, (RF
, RF_IF
), rd_rm
),
20943 cCL("asnsp", eb08120
, 2, (RF
, RF_IF
), rd_rm
),
20944 cCL("asnsm", eb08140
, 2, (RF
, RF_IF
), rd_rm
),
20945 cCL("asnsz", eb08160
, 2, (RF
, RF_IF
), rd_rm
),
20946 cCL("asnd", eb08180
, 2, (RF
, RF_IF
), rd_rm
),
20947 cCL("asndp", eb081a0
, 2, (RF
, RF_IF
), rd_rm
),
20948 cCL("asndm", eb081c0
, 2, (RF
, RF_IF
), rd_rm
),
20949 cCL("asndz", eb081e0
, 2, (RF
, RF_IF
), rd_rm
),
20950 cCL("asne", eb88100
, 2, (RF
, RF_IF
), rd_rm
),
20951 cCL("asnep", eb88120
, 2, (RF
, RF_IF
), rd_rm
),
20952 cCL("asnem", eb88140
, 2, (RF
, RF_IF
), rd_rm
),
20953 cCL("asnez", eb88160
, 2, (RF
, RF_IF
), rd_rm
),
20955 cCL("acss", ec08100
, 2, (RF
, RF_IF
), rd_rm
),
20956 cCL("acssp", ec08120
, 2, (RF
, RF_IF
), rd_rm
),
20957 cCL("acssm", ec08140
, 2, (RF
, RF_IF
), rd_rm
),
20958 cCL("acssz", ec08160
, 2, (RF
, RF_IF
), rd_rm
),
20959 cCL("acsd", ec08180
, 2, (RF
, RF_IF
), rd_rm
),
20960 cCL("acsdp", ec081a0
, 2, (RF
, RF_IF
), rd_rm
),
20961 cCL("acsdm", ec081c0
, 2, (RF
, RF_IF
), rd_rm
),
20962 cCL("acsdz", ec081e0
, 2, (RF
, RF_IF
), rd_rm
),
20963 cCL("acse", ec88100
, 2, (RF
, RF_IF
), rd_rm
),
20964 cCL("acsep", ec88120
, 2, (RF
, RF_IF
), rd_rm
),
20965 cCL("acsem", ec88140
, 2, (RF
, RF_IF
), rd_rm
),
20966 cCL("acsez", ec88160
, 2, (RF
, RF_IF
), rd_rm
),
20968 cCL("atns", ed08100
, 2, (RF
, RF_IF
), rd_rm
),
20969 cCL("atnsp", ed08120
, 2, (RF
, RF_IF
), rd_rm
),
20970 cCL("atnsm", ed08140
, 2, (RF
, RF_IF
), rd_rm
),
20971 cCL("atnsz", ed08160
, 2, (RF
, RF_IF
), rd_rm
),
20972 cCL("atnd", ed08180
, 2, (RF
, RF_IF
), rd_rm
),
20973 cCL("atndp", ed081a0
, 2, (RF
, RF_IF
), rd_rm
),
20974 cCL("atndm", ed081c0
, 2, (RF
, RF_IF
), rd_rm
),
20975 cCL("atndz", ed081e0
, 2, (RF
, RF_IF
), rd_rm
),
20976 cCL("atne", ed88100
, 2, (RF
, RF_IF
), rd_rm
),
20977 cCL("atnep", ed88120
, 2, (RF
, RF_IF
), rd_rm
),
20978 cCL("atnem", ed88140
, 2, (RF
, RF_IF
), rd_rm
),
20979 cCL("atnez", ed88160
, 2, (RF
, RF_IF
), rd_rm
),
20981 cCL("urds", ee08100
, 2, (RF
, RF_IF
), rd_rm
),
20982 cCL("urdsp", ee08120
, 2, (RF
, RF_IF
), rd_rm
),
20983 cCL("urdsm", ee08140
, 2, (RF
, RF_IF
), rd_rm
),
20984 cCL("urdsz", ee08160
, 2, (RF
, RF_IF
), rd_rm
),
20985 cCL("urdd", ee08180
, 2, (RF
, RF_IF
), rd_rm
),
20986 cCL("urddp", ee081a0
, 2, (RF
, RF_IF
), rd_rm
),
20987 cCL("urddm", ee081c0
, 2, (RF
, RF_IF
), rd_rm
),
20988 cCL("urddz", ee081e0
, 2, (RF
, RF_IF
), rd_rm
),
20989 cCL("urde", ee88100
, 2, (RF
, RF_IF
), rd_rm
),
20990 cCL("urdep", ee88120
, 2, (RF
, RF_IF
), rd_rm
),
20991 cCL("urdem", ee88140
, 2, (RF
, RF_IF
), rd_rm
),
20992 cCL("urdez", ee88160
, 2, (RF
, RF_IF
), rd_rm
),
20994 cCL("nrms", ef08100
, 2, (RF
, RF_IF
), rd_rm
),
20995 cCL("nrmsp", ef08120
, 2, (RF
, RF_IF
), rd_rm
),
20996 cCL("nrmsm", ef08140
, 2, (RF
, RF_IF
), rd_rm
),
20997 cCL("nrmsz", ef08160
, 2, (RF
, RF_IF
), rd_rm
),
20998 cCL("nrmd", ef08180
, 2, (RF
, RF_IF
), rd_rm
),
20999 cCL("nrmdp", ef081a0
, 2, (RF
, RF_IF
), rd_rm
),
21000 cCL("nrmdm", ef081c0
, 2, (RF
, RF_IF
), rd_rm
),
21001 cCL("nrmdz", ef081e0
, 2, (RF
, RF_IF
), rd_rm
),
21002 cCL("nrme", ef88100
, 2, (RF
, RF_IF
), rd_rm
),
21003 cCL("nrmep", ef88120
, 2, (RF
, RF_IF
), rd_rm
),
21004 cCL("nrmem", ef88140
, 2, (RF
, RF_IF
), rd_rm
),
21005 cCL("nrmez", ef88160
, 2, (RF
, RF_IF
), rd_rm
),
21007 cCL("adfs", e000100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21008 cCL("adfsp", e000120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21009 cCL("adfsm", e000140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21010 cCL("adfsz", e000160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21011 cCL("adfd", e000180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21012 cCL("adfdp", e0001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21013 cCL("adfdm", e0001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21014 cCL("adfdz", e0001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21015 cCL("adfe", e080100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21016 cCL("adfep", e080120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21017 cCL("adfem", e080140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21018 cCL("adfez", e080160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21020 cCL("sufs", e200100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21021 cCL("sufsp", e200120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21022 cCL("sufsm", e200140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21023 cCL("sufsz", e200160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21024 cCL("sufd", e200180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21025 cCL("sufdp", e2001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21026 cCL("sufdm", e2001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21027 cCL("sufdz", e2001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21028 cCL("sufe", e280100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21029 cCL("sufep", e280120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21030 cCL("sufem", e280140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21031 cCL("sufez", e280160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21033 cCL("rsfs", e300100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21034 cCL("rsfsp", e300120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21035 cCL("rsfsm", e300140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21036 cCL("rsfsz", e300160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21037 cCL("rsfd", e300180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21038 cCL("rsfdp", e3001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21039 cCL("rsfdm", e3001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21040 cCL("rsfdz", e3001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21041 cCL("rsfe", e380100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21042 cCL("rsfep", e380120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21043 cCL("rsfem", e380140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21044 cCL("rsfez", e380160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21046 cCL("mufs", e100100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21047 cCL("mufsp", e100120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21048 cCL("mufsm", e100140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21049 cCL("mufsz", e100160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21050 cCL("mufd", e100180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21051 cCL("mufdp", e1001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21052 cCL("mufdm", e1001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21053 cCL("mufdz", e1001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21054 cCL("mufe", e180100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21055 cCL("mufep", e180120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21056 cCL("mufem", e180140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21057 cCL("mufez", e180160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21059 cCL("dvfs", e400100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21060 cCL("dvfsp", e400120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21061 cCL("dvfsm", e400140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21062 cCL("dvfsz", e400160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21063 cCL("dvfd", e400180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21064 cCL("dvfdp", e4001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21065 cCL("dvfdm", e4001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21066 cCL("dvfdz", e4001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21067 cCL("dvfe", e480100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21068 cCL("dvfep", e480120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21069 cCL("dvfem", e480140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21070 cCL("dvfez", e480160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21072 cCL("rdfs", e500100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21073 cCL("rdfsp", e500120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21074 cCL("rdfsm", e500140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21075 cCL("rdfsz", e500160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21076 cCL("rdfd", e500180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21077 cCL("rdfdp", e5001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21078 cCL("rdfdm", e5001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21079 cCL("rdfdz", e5001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21080 cCL("rdfe", e580100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21081 cCL("rdfep", e580120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21082 cCL("rdfem", e580140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21083 cCL("rdfez", e580160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21085 cCL("pows", e600100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21086 cCL("powsp", e600120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21087 cCL("powsm", e600140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21088 cCL("powsz", e600160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21089 cCL("powd", e600180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21090 cCL("powdp", e6001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21091 cCL("powdm", e6001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21092 cCL("powdz", e6001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21093 cCL("powe", e680100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21094 cCL("powep", e680120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21095 cCL("powem", e680140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21096 cCL("powez", e680160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21098 cCL("rpws", e700100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21099 cCL("rpwsp", e700120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21100 cCL("rpwsm", e700140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21101 cCL("rpwsz", e700160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21102 cCL("rpwd", e700180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21103 cCL("rpwdp", e7001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21104 cCL("rpwdm", e7001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21105 cCL("rpwdz", e7001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21106 cCL("rpwe", e780100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21107 cCL("rpwep", e780120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21108 cCL("rpwem", e780140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21109 cCL("rpwez", e780160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21111 cCL("rmfs", e800100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21112 cCL("rmfsp", e800120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21113 cCL("rmfsm", e800140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21114 cCL("rmfsz", e800160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21115 cCL("rmfd", e800180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21116 cCL("rmfdp", e8001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21117 cCL("rmfdm", e8001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21118 cCL("rmfdz", e8001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21119 cCL("rmfe", e880100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21120 cCL("rmfep", e880120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21121 cCL("rmfem", e880140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21122 cCL("rmfez", e880160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21124 cCL("fmls", e900100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21125 cCL("fmlsp", e900120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21126 cCL("fmlsm", e900140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21127 cCL("fmlsz", e900160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21128 cCL("fmld", e900180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21129 cCL("fmldp", e9001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21130 cCL("fmldm", e9001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21131 cCL("fmldz", e9001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21132 cCL("fmle", e980100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21133 cCL("fmlep", e980120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21134 cCL("fmlem", e980140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21135 cCL("fmlez", e980160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21137 cCL("fdvs", ea00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21138 cCL("fdvsp", ea00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21139 cCL("fdvsm", ea00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21140 cCL("fdvsz", ea00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21141 cCL("fdvd", ea00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21142 cCL("fdvdp", ea001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21143 cCL("fdvdm", ea001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21144 cCL("fdvdz", ea001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21145 cCL("fdve", ea80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21146 cCL("fdvep", ea80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21147 cCL("fdvem", ea80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21148 cCL("fdvez", ea80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21150 cCL("frds", eb00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21151 cCL("frdsp", eb00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21152 cCL("frdsm", eb00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21153 cCL("frdsz", eb00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21154 cCL("frdd", eb00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21155 cCL("frddp", eb001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21156 cCL("frddm", eb001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21157 cCL("frddz", eb001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21158 cCL("frde", eb80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21159 cCL("frdep", eb80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21160 cCL("frdem", eb80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21161 cCL("frdez", eb80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21163 cCL("pols", ec00100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21164 cCL("polsp", ec00120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21165 cCL("polsm", ec00140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21166 cCL("polsz", ec00160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21167 cCL("pold", ec00180
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21168 cCL("poldp", ec001a0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21169 cCL("poldm", ec001c0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21170 cCL("poldz", ec001e0
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21171 cCL("pole", ec80100
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21172 cCL("polep", ec80120
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21173 cCL("polem", ec80140
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21174 cCL("polez", ec80160
, 3, (RF
, RF
, RF_IF
), rd_rn_rm
),
21176 cCE("cmf", e90f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21177 C3E("cmfe", ed0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21178 cCE("cnf", eb0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21179 C3E("cnfe", ef0f110
, 2, (RF
, RF_IF
), fpa_cmp
),
21181 cCL("flts", e000110
, 2, (RF
, RR
), rn_rd
),
21182 cCL("fltsp", e000130
, 2, (RF
, RR
), rn_rd
),
21183 cCL("fltsm", e000150
, 2, (RF
, RR
), rn_rd
),
21184 cCL("fltsz", e000170
, 2, (RF
, RR
), rn_rd
),
21185 cCL("fltd", e000190
, 2, (RF
, RR
), rn_rd
),
21186 cCL("fltdp", e0001b0
, 2, (RF
, RR
), rn_rd
),
21187 cCL("fltdm", e0001d0
, 2, (RF
, RR
), rn_rd
),
21188 cCL("fltdz", e0001f0
, 2, (RF
, RR
), rn_rd
),
21189 cCL("flte", e080110
, 2, (RF
, RR
), rn_rd
),
21190 cCL("fltep", e080130
, 2, (RF
, RR
), rn_rd
),
21191 cCL("fltem", e080150
, 2, (RF
, RR
), rn_rd
),
21192 cCL("fltez", e080170
, 2, (RF
, RR
), rn_rd
),
21194 /* The implementation of the FIX instruction is broken on some
21195 assemblers, in that it accepts a precision specifier as well as a
21196 rounding specifier, despite the fact that this is meaningless.
21197 To be more compatible, we accept it as well, though of course it
21198 does not set any bits. */
21199 cCE("fix", e100110
, 2, (RR
, RF
), rd_rm
),
21200 cCL("fixp", e100130
, 2, (RR
, RF
), rd_rm
),
21201 cCL("fixm", e100150
, 2, (RR
, RF
), rd_rm
),
21202 cCL("fixz", e100170
, 2, (RR
, RF
), rd_rm
),
21203 cCL("fixsp", e100130
, 2, (RR
, RF
), rd_rm
),
21204 cCL("fixsm", e100150
, 2, (RR
, RF
), rd_rm
),
21205 cCL("fixsz", e100170
, 2, (RR
, RF
), rd_rm
),
21206 cCL("fixdp", e100130
, 2, (RR
, RF
), rd_rm
),
21207 cCL("fixdm", e100150
, 2, (RR
, RF
), rd_rm
),
21208 cCL("fixdz", e100170
, 2, (RR
, RF
), rd_rm
),
21209 cCL("fixep", e100130
, 2, (RR
, RF
), rd_rm
),
21210 cCL("fixem", e100150
, 2, (RR
, RF
), rd_rm
),
21211 cCL("fixez", e100170
, 2, (RR
, RF
), rd_rm
),
21213 /* Instructions that were new with the real FPA, call them V2. */
21215 #define ARM_VARIANT & fpu_fpa_ext_v2
21217 cCE("lfm", c100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21218 cCL("lfmfd", c900200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21219 cCL("lfmea", d100200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21220 cCE("sfm", c000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21221 cCL("sfmfd", d000200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21222 cCL("sfmea", c800200
, 3, (RF
, I4b
, ADDR
), fpa_ldmstm
),
21225 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
21227 /* Moves and type conversions. */
21228 cCE("fcpys", eb00a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21229 cCE("fmrs", e100a10
, 2, (RR
, RVS
), vfp_reg_from_sp
),
21230 cCE("fmsr", e000a10
, 2, (RVS
, RR
), vfp_sp_from_reg
),
21231 cCE("fmstat", ef1fa10
, 0, (), noargs
),
21232 cCE("vmrs", ef00a10
, 2, (APSR_RR
, RVC
), vmrs
),
21233 cCE("vmsr", ee00a10
, 2, (RVC
, RR
), vmsr
),
21234 cCE("fsitos", eb80ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21235 cCE("fuitos", eb80a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21236 cCE("ftosis", ebd0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21237 cCE("ftosizs", ebd0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21238 cCE("ftouis", ebc0a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21239 cCE("ftouizs", ebc0ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21240 cCE("fmrx", ef00a10
, 2, (RR
, RVC
), rd_rn
),
21241 cCE("fmxr", ee00a10
, 2, (RVC
, RR
), rn_rd
),
21243 /* Memory operations. */
21244 cCE("flds", d100a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21245 cCE("fsts", d000a00
, 2, (RVS
, ADDRGLDC
), vfp_sp_ldst
),
21246 cCE("fldmias", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21247 cCE("fldmfds", c900a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21248 cCE("fldmdbs", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21249 cCE("fldmeas", d300a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21250 cCE("fldmiax", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21251 cCE("fldmfdx", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21252 cCE("fldmdbx", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21253 cCE("fldmeax", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21254 cCE("fstmias", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21255 cCE("fstmeas", c800a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmia
),
21256 cCE("fstmdbs", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21257 cCE("fstmfds", d200a00
, 2, (RRnpctw
, VRSLST
), vfp_sp_ldstmdb
),
21258 cCE("fstmiax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21259 cCE("fstmeax", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmia
),
21260 cCE("fstmdbx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21261 cCE("fstmfdx", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_xp_ldstmdb
),
21263 /* Monadic operations. */
21264 cCE("fabss", eb00ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21265 cCE("fnegs", eb10a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21266 cCE("fsqrts", eb10ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21268 /* Dyadic operations. */
21269 cCE("fadds", e300a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21270 cCE("fsubs", e300a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21271 cCE("fmuls", e200a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21272 cCE("fdivs", e800a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21273 cCE("fmacs", e000a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21274 cCE("fmscs", e100a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21275 cCE("fnmuls", e200a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21276 cCE("fnmacs", e000a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21277 cCE("fnmscs", e100a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21280 cCE("fcmps", eb40a40
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21281 cCE("fcmpzs", eb50a40
, 1, (RVS
), vfp_sp_compare_z
),
21282 cCE("fcmpes", eb40ac0
, 2, (RVS
, RVS
), vfp_sp_monadic
),
21283 cCE("fcmpezs", eb50ac0
, 1, (RVS
), vfp_sp_compare_z
),
21285 /* Double precision load/store are still present on single precision
21286 implementations. */
21287 cCE("fldd", d100b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21288 cCE("fstd", d000b00
, 2, (RVD
, ADDRGLDC
), vfp_dp_ldst
),
21289 cCE("fldmiad", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21290 cCE("fldmfdd", c900b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21291 cCE("fldmdbd", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21292 cCE("fldmead", d300b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21293 cCE("fstmiad", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21294 cCE("fstmead", c800b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmia
),
21295 cCE("fstmdbd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21296 cCE("fstmfdd", d200b00
, 2, (RRnpctw
, VRDLST
), vfp_dp_ldstmdb
),
21299 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
21301 /* Moves and type conversions. */
21302 cCE("fcpyd", eb00b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21303 cCE("fcvtds", eb70ac0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21304 cCE("fcvtsd", eb70bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21305 cCE("fmdhr", e200b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21306 cCE("fmdlr", e000b10
, 2, (RVD
, RR
), vfp_dp_rn_rd
),
21307 cCE("fmrdh", e300b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21308 cCE("fmrdl", e100b10
, 2, (RR
, RVD
), vfp_dp_rd_rn
),
21309 cCE("fsitod", eb80bc0
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21310 cCE("fuitod", eb80b40
, 2, (RVD
, RVS
), vfp_dp_sp_cvt
),
21311 cCE("ftosid", ebd0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21312 cCE("ftosizd", ebd0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21313 cCE("ftouid", ebc0b40
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21314 cCE("ftouizd", ebc0bc0
, 2, (RVS
, RVD
), vfp_sp_dp_cvt
),
21316 /* Monadic operations. */
21317 cCE("fabsd", eb00bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21318 cCE("fnegd", eb10b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21319 cCE("fsqrtd", eb10bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21321 /* Dyadic operations. */
21322 cCE("faddd", e300b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21323 cCE("fsubd", e300b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21324 cCE("fmuld", e200b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21325 cCE("fdivd", e800b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21326 cCE("fmacd", e000b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21327 cCE("fmscd", e100b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21328 cCE("fnmuld", e200b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21329 cCE("fnmacd", e000b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21330 cCE("fnmscd", e100b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21333 cCE("fcmpd", eb40b40
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21334 cCE("fcmpzd", eb50b40
, 1, (RVD
), vfp_dp_rd
),
21335 cCE("fcmped", eb40bc0
, 2, (RVD
, RVD
), vfp_dp_rd_rm
),
21336 cCE("fcmpezd", eb50bc0
, 1, (RVD
), vfp_dp_rd
),
21339 #define ARM_VARIANT & fpu_vfp_ext_v2
21341 cCE("fmsrr", c400a10
, 3, (VRSLST
, RR
, RR
), vfp_sp2_from_reg2
),
21342 cCE("fmrrs", c500a10
, 3, (RR
, RR
, VRSLST
), vfp_reg2_from_sp2
),
21343 cCE("fmdrr", c400b10
, 3, (RVD
, RR
, RR
), vfp_dp_rm_rd_rn
),
21344 cCE("fmrrd", c500b10
, 3, (RR
, RR
, RVD
), vfp_dp_rd_rn_rm
),
21346 /* Instructions which may belong to either the Neon or VFP instruction sets.
21347 Individual encoder functions perform additional architecture checks. */
21349 #define ARM_VARIANT & fpu_vfp_ext_v1xd
21350 #undef THUMB_VARIANT
21351 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
21353 /* These mnemonics are unique to VFP. */
21354 NCE(vsqrt
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_sqrt
),
21355 NCE(vdiv
, 0, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_div
),
21356 nCE(vnmul
, _vnmul
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21357 nCE(vnmla
, _vnmla
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21358 nCE(vnmls
, _vnmls
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21359 nCE(vcmp
, _vcmp
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
21360 nCE(vcmpe
, _vcmpe
, 2, (RVSD
, RSVD_FI0
), vfp_nsyn_cmp
),
21361 NCE(vpush
, 0, 1, (VRSDLST
), vfp_nsyn_push
),
21362 NCE(vpop
, 0, 1, (VRSDLST
), vfp_nsyn_pop
),
21363 NCE(vcvtz
, 0, 2, (RVSD
, RVSD
), vfp_nsyn_cvtz
),
21365 /* Mnemonics shared by Neon and VFP. */
21366 nCEF(vmul
, _vmul
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mul
),
21367 nCEF(vmla
, _vmla
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
21368 nCEF(vmls
, _vmls
, 3, (RNSDQ
, oRNSDQ
, RNSDQ_RNSC
), neon_mac_maybe_scalar
),
21370 nCEF(vadd
, _vadd
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
21371 nCEF(vsub
, _vsub
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_addsub_if_i
),
21373 NCEF(vabs
, 1b10300
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
21374 NCEF(vneg
, 1b10380
, 2, (RNSDQ
, RNSDQ
), neon_abs_neg
),
21376 NCE(vldm
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21377 NCE(vldmia
, c900b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21378 NCE(vldmdb
, d100b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21379 NCE(vstm
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21380 NCE(vstmia
, c800b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21381 NCE(vstmdb
, d000b00
, 2, (RRnpctw
, VRSDLST
), neon_ldm_stm
),
21383 nCEF(vcvt
, _vcvt
, 3, (RNSDQ
, RNSDQ
, oI32z
), neon_cvt
),
21384 nCEF(vcvtr
, _vcvt
, 2, (RNSDQ
, RNSDQ
), neon_cvtr
),
21385 NCEF(vcvtb
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtb
),
21386 NCEF(vcvtt
, eb20a40
, 2, (RVSD
, RVSD
), neon_cvtt
),
21389 /* NOTE: All VMOV encoding is special-cased! */
21390 NCE(vmov
, 0, 1, (VMOV
), neon_mov
),
21391 NCE(vmovq
, 0, 1, (VMOV
), neon_mov
),
21393 #undef THUMB_VARIANT
21394 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
21395 by different feature bits. Since we are setting the Thumb guard, we can
21396 require Thumb-1 which makes it a nop guard and set the right feature bit in
21397 do_vldr_vstr (). */
21398 #define THUMB_VARIANT & arm_ext_v4t
21399 NCE(vldr
, d100b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
21400 NCE(vstr
, d000b00
, 2, (VLDR
, ADDRGLDC
), vldr_vstr
),
21403 #define ARM_VARIANT & arm_ext_fp16
21404 #undef THUMB_VARIANT
21405 #define THUMB_VARIANT & arm_ext_fp16
21406 /* New instructions added from v8.2, allowing the extraction and insertion of
21407 the upper 16 bits of a 32-bit vector register. */
21408 NCE (vmovx
, eb00a40
, 2, (RVS
, RVS
), neon_movhf
),
21409 NCE (vins
, eb00ac0
, 2, (RVS
, RVS
), neon_movhf
),
21411 /* New backported fma/fms instructions optional in v8.2. */
21412 NCE (vfmal
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmal
),
21413 NCE (vfmsl
, 810, 3, (RNDQ
, RNSD
, RNSD_RNSC
), neon_vfmsl
),
21415 #undef THUMB_VARIANT
21416 #define THUMB_VARIANT & fpu_neon_ext_v1
21418 #define ARM_VARIANT & fpu_neon_ext_v1
21420 /* Data processing with three registers of the same length. */
21421 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
21422 NUF(vaba
, 0000710, 3, (RNDQ
, RNDQ
, RNDQ
), neon_dyadic_i_su
),
21423 NUF(vabaq
, 0000710, 3, (RNQ
, RNQ
, RNQ
), neon_dyadic_i_su
),
21424 NUF(vhadd
, 0000000, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21425 NUF(vhaddq
, 0000000, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21426 NUF(vrhadd
, 0000100, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21427 NUF(vrhaddq
, 0000100, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21428 NUF(vhsub
, 0000200, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i_su
),
21429 NUF(vhsubq
, 0000200, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i_su
),
21430 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
21431 NUF(vqadd
, 0000010, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
21432 NUF(vqaddq
, 0000010, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
21433 NUF(vqsub
, 0000210, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_i64_su
),
21434 NUF(vqsubq
, 0000210, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_i64_su
),
21435 NUF(vrshl
, 0000500, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
21436 NUF(vrshlq
, 0000500, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
21437 NUF(vqrshl
, 0000510, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_rshl
),
21438 NUF(vqrshlq
, 0000510, 3, (RNQ
, oRNQ
, RNQ
), neon_rshl
),
21439 /* If not immediate, fall back to neon_dyadic_i64_su.
21440 shl_imm should accept I8 I16 I32 I64,
21441 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
21442 nUF(vshl
, _vshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_shl_imm
),
21443 nUF(vshlq
, _vshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_shl_imm
),
21444 nUF(vqshl
, _vqshl
, 3, (RNDQ
, oRNDQ
, RNDQ_I63b
), neon_qshl_imm
),
21445 nUF(vqshlq
, _vqshl
, 3, (RNQ
, oRNQ
, RNDQ_I63b
), neon_qshl_imm
),
21446 /* Logic ops, types optional & ignored. */
21447 nUF(vand
, _vand
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21448 nUF(vandq
, _vand
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21449 nUF(vbic
, _vbic
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21450 nUF(vbicq
, _vbic
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21451 nUF(vorr
, _vorr
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21452 nUF(vorrq
, _vorr
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21453 nUF(vorn
, _vorn
, 3, (RNDQ
, oRNDQ
, RNDQ_Ibig
), neon_logic
),
21454 nUF(vornq
, _vorn
, 3, (RNQ
, oRNQ
, RNDQ_Ibig
), neon_logic
),
21455 nUF(veor
, _veor
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_logic
),
21456 nUF(veorq
, _veor
, 3, (RNQ
, oRNQ
, RNQ
), neon_logic
),
21457 /* Bitfield ops, untyped. */
21458 NUF(vbsl
, 1100110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21459 NUF(vbslq
, 1100110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21460 NUF(vbit
, 1200110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21461 NUF(vbitq
, 1200110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21462 NUF(vbif
, 1300110, 3, (RNDQ
, RNDQ
, RNDQ
), neon_bitfield
),
21463 NUF(vbifq
, 1300110, 3, (RNQ
, RNQ
, RNQ
), neon_bitfield
),
21464 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
21465 nUF(vabd
, _vabd
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21466 nUF(vabdq
, _vabd
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21467 nUF(vmax
, _vmax
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21468 nUF(vmaxq
, _vmax
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21469 nUF(vmin
, _vmin
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_dyadic_if_su
),
21470 nUF(vminq
, _vmin
, 3, (RNQ
, oRNQ
, RNQ
), neon_dyadic_if_su
),
21471 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
21472 back to neon_dyadic_if_su. */
21473 nUF(vcge
, _vcge
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
21474 nUF(vcgeq
, _vcge
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
21475 nUF(vcgt
, _vcgt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp
),
21476 nUF(vcgtq
, _vcgt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp
),
21477 nUF(vclt
, _vclt
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
21478 nUF(vcltq
, _vclt
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
21479 nUF(vcle
, _vcle
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_cmp_inv
),
21480 nUF(vcleq
, _vcle
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_cmp_inv
),
21481 /* Comparison. Type I8 I16 I32 F32. */
21482 nUF(vceq
, _vceq
, 3, (RNDQ
, oRNDQ
, RNDQ_I0
), neon_ceq
),
21483 nUF(vceqq
, _vceq
, 3, (RNQ
, oRNQ
, RNDQ_I0
), neon_ceq
),
21484 /* As above, D registers only. */
21485 nUF(vpmax
, _vpmax
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
21486 nUF(vpmin
, _vpmin
, 3, (RND
, oRND
, RND
), neon_dyadic_if_su_d
),
21487 /* Int and float variants, signedness unimportant. */
21488 nUF(vmlaq
, _vmla
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
21489 nUF(vmlsq
, _vmls
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mac_maybe_scalar
),
21490 nUF(vpadd
, _vpadd
, 3, (RND
, oRND
, RND
), neon_dyadic_if_i_d
),
21491 /* Add/sub take types I8 I16 I32 I64 F32. */
21492 nUF(vaddq
, _vadd
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
21493 nUF(vsubq
, _vsub
, 3, (RNQ
, oRNQ
, RNQ
), neon_addsub_if_i
),
21494 /* vtst takes sizes 8, 16, 32. */
21495 NUF(vtst
, 0000810, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_tst
),
21496 NUF(vtstq
, 0000810, 3, (RNQ
, oRNQ
, RNQ
), neon_tst
),
21497 /* VMUL takes I8 I16 I32 F32 P8. */
21498 nUF(vmulq
, _vmul
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_mul
),
21499 /* VQD{R}MULH takes S16 S32. */
21500 nUF(vqdmulh
, _vqdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
21501 nUF(vqdmulhq
, _vqdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
21502 nUF(vqrdmulh
, _vqrdmulh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qdmulh
),
21503 nUF(vqrdmulhq
, _vqrdmulh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qdmulh
),
21504 NUF(vacge
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
21505 NUF(vacgeq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
21506 NUF(vacgt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute
),
21507 NUF(vacgtq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute
),
21508 NUF(vaclt
, 0200e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
21509 NUF(vacltq
, 0200e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
21510 NUF(vacle
, 0000e10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_fcmp_absolute_inv
),
21511 NUF(vacleq
, 0000e10
, 3, (RNQ
, oRNQ
, RNQ
), neon_fcmp_absolute_inv
),
21512 NUF(vrecps
, 0000f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
21513 NUF(vrecpsq
, 0000f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
21514 NUF(vrsqrts
, 0200f10
, 3, (RNDQ
, oRNDQ
, RNDQ
), neon_step
),
21515 NUF(vrsqrtsq
, 0200f10
, 3, (RNQ
, oRNQ
, RNQ
), neon_step
),
21516 /* ARM v8.1 extension. */
21517 nUF (vqrdmlah
, _vqrdmlah
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
21518 nUF (vqrdmlahq
, _vqrdmlah
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
21519 nUF (vqrdmlsh
, _vqrdmlsh
, 3, (RNDQ
, oRNDQ
, RNDQ_RNSC
), neon_qrdmlah
),
21520 nUF (vqrdmlshq
, _vqrdmlsh
, 3, (RNQ
, oRNQ
, RNDQ_RNSC
), neon_qrdmlah
),
21522 /* Two address, int/float. Types S8 S16 S32 F32. */
21523 NUF(vabsq
, 1b10300
, 2, (RNQ
, RNQ
), neon_abs_neg
),
21524 NUF(vnegq
, 1b10380
, 2, (RNQ
, RNQ
), neon_abs_neg
),
21526 /* Data processing with two registers and a shift amount. */
21527 /* Right shifts, and variants with rounding.
21528 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21529 NUF(vshr
, 0800010, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
21530 NUF(vshrq
, 0800010, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
21531 NUF(vrshr
, 0800210, 3, (RNDQ
, oRNDQ
, I64z
), neon_rshift_round_imm
),
21532 NUF(vrshrq
, 0800210, 3, (RNQ
, oRNQ
, I64z
), neon_rshift_round_imm
),
21533 NUF(vsra
, 0800110, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
21534 NUF(vsraq
, 0800110, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
21535 NUF(vrsra
, 0800310, 3, (RNDQ
, oRNDQ
, I64
), neon_rshift_round_imm
),
21536 NUF(vrsraq
, 0800310, 3, (RNQ
, oRNQ
, I64
), neon_rshift_round_imm
),
21537 /* Shift and insert. Sizes accepted 8 16 32 64. */
21538 NUF(vsli
, 1800510, 3, (RNDQ
, oRNDQ
, I63
), neon_sli
),
21539 NUF(vsliq
, 1800510, 3, (RNQ
, oRNQ
, I63
), neon_sli
),
21540 NUF(vsri
, 1800410, 3, (RNDQ
, oRNDQ
, I64
), neon_sri
),
21541 NUF(vsriq
, 1800410, 3, (RNQ
, oRNQ
, I64
), neon_sri
),
21542 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21543 NUF(vqshlu
, 1800610, 3, (RNDQ
, oRNDQ
, I63
), neon_qshlu_imm
),
21544 NUF(vqshluq
, 1800610, 3, (RNQ
, oRNQ
, I63
), neon_qshlu_imm
),
21545 /* Right shift immediate, saturating & narrowing, with rounding variants.
21546 Types accepted S16 S32 S64 U16 U32 U64. */
21547 NUF(vqshrn
, 0800910, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
21548 NUF(vqrshrn
, 0800950, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow
),
21549 /* As above, unsigned. Types accepted S16 S32 S64. */
21550 NUF(vqshrun
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
21551 NUF(vqrshrun
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_sat_narrow_u
),
21552 /* Right shift narrowing. Types accepted I16 I32 I64. */
21553 NUF(vshrn
, 0800810, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
21554 NUF(vrshrn
, 0800850, 3, (RND
, RNQ
, I32z
), neon_rshift_narrow
),
21555 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21556 nUF(vshll
, _vshll
, 3, (RNQ
, RND
, I32
), neon_shll
),
21557 /* CVT with optional immediate for fixed-point variant. */
21558 nUF(vcvtq
, _vcvt
, 3, (RNQ
, RNQ
, oI32b
), neon_cvt
),
21560 nUF(vmvn
, _vmvn
, 2, (RNDQ
, RNDQ_Ibig
), neon_mvn
),
21561 nUF(vmvnq
, _vmvn
, 2, (RNQ
, RNDQ_Ibig
), neon_mvn
),
21563 /* Data processing, three registers of different lengths. */
21564 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21565 NUF(vabal
, 0800500, 3, (RNQ
, RND
, RND
), neon_abal
),
21566 NUF(vabdl
, 0800700, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21567 NUF(vaddl
, 0800000, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21568 NUF(vsubl
, 0800200, 3, (RNQ
, RND
, RND
), neon_dyadic_long
),
21569 /* If not scalar, fall back to neon_dyadic_long.
21570 Vector types as above, scalar types S16 S32 U16 U32. */
21571 nUF(vmlal
, _vmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
21572 nUF(vmlsl
, _vmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mac_maybe_scalar_long
),
21573 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21574 NUF(vaddw
, 0800100, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
21575 NUF(vsubw
, 0800300, 3, (RNQ
, oRNQ
, RND
), neon_dyadic_wide
),
21576 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21577 NUF(vaddhn
, 0800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21578 NUF(vraddhn
, 1800400, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21579 NUF(vsubhn
, 0800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21580 NUF(vrsubhn
, 1800600, 3, (RND
, RNQ
, RNQ
), neon_dyadic_narrow
),
21581 /* Saturating doubling multiplies. Types S16 S32. */
21582 nUF(vqdmlal
, _vqdmlal
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21583 nUF(vqdmlsl
, _vqdmlsl
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21584 nUF(vqdmull
, _vqdmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_mul_sat_scalar_long
),
21585 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21586 S16 S32 U16 U32. */
21587 nUF(vmull
, _vmull
, 3, (RNQ
, RND
, RND_RNSC
), neon_vmull
),
21589 /* Extract. Size 8. */
21590 NUF(vext
, 0b00000, 4, (RNDQ
, oRNDQ
, RNDQ
, I15
), neon_ext
),
21591 NUF(vextq
, 0b00000, 4, (RNQ
, oRNQ
, RNQ
, I15
), neon_ext
),
21593 /* Two registers, miscellaneous. */
21594 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21595 NUF(vrev64
, 1b00000
, 2, (RNDQ
, RNDQ
), neon_rev
),
21596 NUF(vrev64q
, 1b00000
, 2, (RNQ
, RNQ
), neon_rev
),
21597 NUF(vrev32
, 1b00080
, 2, (RNDQ
, RNDQ
), neon_rev
),
21598 NUF(vrev32q
, 1b00080
, 2, (RNQ
, RNQ
), neon_rev
),
21599 NUF(vrev16
, 1b00100
, 2, (RNDQ
, RNDQ
), neon_rev
),
21600 NUF(vrev16q
, 1b00100
, 2, (RNQ
, RNQ
), neon_rev
),
21601 /* Vector replicate. Sizes 8 16 32. */
21602 nCE(vdup
, _vdup
, 2, (RNDQ
, RR_RNSC
), neon_dup
),
21603 nCE(vdupq
, _vdup
, 2, (RNQ
, RR_RNSC
), neon_dup
),
21604 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21605 NUF(vmovl
, 0800a10
, 2, (RNQ
, RND
), neon_movl
),
21606 /* VMOVN. Types I16 I32 I64. */
21607 nUF(vmovn
, _vmovn
, 2, (RND
, RNQ
), neon_movn
),
21608 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21609 nUF(vqmovn
, _vqmovn
, 2, (RND
, RNQ
), neon_qmovn
),
21610 /* VQMOVUN. Types S16 S32 S64. */
21611 nUF(vqmovun
, _vqmovun
, 2, (RND
, RNQ
), neon_qmovun
),
21612 /* VZIP / VUZP. Sizes 8 16 32. */
21613 NUF(vzip
, 1b20180
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
21614 NUF(vzipq
, 1b20180
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
21615 NUF(vuzp
, 1b20100
, 2, (RNDQ
, RNDQ
), neon_zip_uzp
),
21616 NUF(vuzpq
, 1b20100
, 2, (RNQ
, RNQ
), neon_zip_uzp
),
21617 /* VQABS / VQNEG. Types S8 S16 S32. */
21618 NUF(vqabs
, 1b00700
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
21619 NUF(vqabsq
, 1b00700
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
21620 NUF(vqneg
, 1b00780
, 2, (RNDQ
, RNDQ
), neon_sat_abs_neg
),
21621 NUF(vqnegq
, 1b00780
, 2, (RNQ
, RNQ
), neon_sat_abs_neg
),
21622 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21623 NUF(vpadal
, 1b00600
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
21624 NUF(vpadalq
, 1b00600
, 2, (RNQ
, RNQ
), neon_pair_long
),
21625 NUF(vpaddl
, 1b00200
, 2, (RNDQ
, RNDQ
), neon_pair_long
),
21626 NUF(vpaddlq
, 1b00200
, 2, (RNQ
, RNQ
), neon_pair_long
),
21627 /* Reciprocal estimates. Types U32 F16 F32. */
21628 NUF(vrecpe
, 1b30400
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
21629 NUF(vrecpeq
, 1b30400
, 2, (RNQ
, RNQ
), neon_recip_est
),
21630 NUF(vrsqrte
, 1b30480
, 2, (RNDQ
, RNDQ
), neon_recip_est
),
21631 NUF(vrsqrteq
, 1b30480
, 2, (RNQ
, RNQ
), neon_recip_est
),
21632 /* VCLS. Types S8 S16 S32. */
21633 NUF(vcls
, 1b00400
, 2, (RNDQ
, RNDQ
), neon_cls
),
21634 NUF(vclsq
, 1b00400
, 2, (RNQ
, RNQ
), neon_cls
),
21635 /* VCLZ. Types I8 I16 I32. */
21636 NUF(vclz
, 1b00480
, 2, (RNDQ
, RNDQ
), neon_clz
),
21637 NUF(vclzq
, 1b00480
, 2, (RNQ
, RNQ
), neon_clz
),
21638 /* VCNT. Size 8. */
21639 NUF(vcnt
, 1b00500
, 2, (RNDQ
, RNDQ
), neon_cnt
),
21640 NUF(vcntq
, 1b00500
, 2, (RNQ
, RNQ
), neon_cnt
),
21641 /* Two address, untyped. */
21642 NUF(vswp
, 1b20000
, 2, (RNDQ
, RNDQ
), neon_swp
),
21643 NUF(vswpq
, 1b20000
, 2, (RNQ
, RNQ
), neon_swp
),
21644 /* VTRN. Sizes 8 16 32. */
21645 nUF(vtrn
, _vtrn
, 2, (RNDQ
, RNDQ
), neon_trn
),
21646 nUF(vtrnq
, _vtrn
, 2, (RNQ
, RNQ
), neon_trn
),
21648 /* Table lookup. Size 8. */
21649 NUF(vtbl
, 1b00800
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
21650 NUF(vtbx
, 1b00840
, 3, (RND
, NRDLST
, RND
), neon_tbl_tbx
),
21652 #undef THUMB_VARIANT
21653 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21655 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21657 /* Neon element/structure load/store. */
21658 nUF(vld1
, _vld1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21659 nUF(vst1
, _vst1
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21660 nUF(vld2
, _vld2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21661 nUF(vst2
, _vst2
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21662 nUF(vld3
, _vld3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21663 nUF(vst3
, _vst3
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21664 nUF(vld4
, _vld4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21665 nUF(vst4
, _vst4
, 2, (NSTRLST
, ADDR
), neon_ldx_stx
),
21667 #undef THUMB_VARIANT
21668 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21670 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21671 cCE("fconsts", eb00a00
, 2, (RVS
, I255
), vfp_sp_const
),
21672 cCE("fshtos", eba0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21673 cCE("fsltos", eba0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21674 cCE("fuhtos", ebb0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21675 cCE("fultos", ebb0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21676 cCE("ftoshs", ebe0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21677 cCE("ftosls", ebe0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21678 cCE("ftouhs", ebf0a40
, 2, (RVS
, I16z
), vfp_sp_conv_16
),
21679 cCE("ftouls", ebf0ac0
, 2, (RVS
, I32
), vfp_sp_conv_32
),
21681 #undef THUMB_VARIANT
21682 #define THUMB_VARIANT & fpu_vfp_ext_v3
21684 #define ARM_VARIANT & fpu_vfp_ext_v3
21686 cCE("fconstd", eb00b00
, 2, (RVD
, I255
), vfp_dp_const
),
21687 cCE("fshtod", eba0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21688 cCE("fsltod", eba0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21689 cCE("fuhtod", ebb0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21690 cCE("fultod", ebb0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21691 cCE("ftoshd", ebe0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21692 cCE("ftosld", ebe0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21693 cCE("ftouhd", ebf0b40
, 2, (RVD
, I16z
), vfp_dp_conv_16
),
21694 cCE("ftould", ebf0bc0
, 2, (RVD
, I32
), vfp_dp_conv_32
),
21697 #define ARM_VARIANT & fpu_vfp_ext_fma
21698 #undef THUMB_VARIANT
21699 #define THUMB_VARIANT & fpu_vfp_ext_fma
21700 /* Mnemonics shared by Neon and VFP. These are included in the
21701 VFP FMA variant; NEON and VFP FMA always includes the NEON
21702 FMA instructions. */
21703 nCEF(vfma
, _vfma
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
21704 nCEF(vfms
, _vfms
, 3, (RNSDQ
, oRNSDQ
, RNSDQ
), neon_fmac
),
21705 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21706 the v form should always be used. */
21707 cCE("ffmas", ea00a00
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21708 cCE("ffnmas", ea00a40
, 3, (RVS
, RVS
, RVS
), vfp_sp_dyadic
),
21709 cCE("ffmad", ea00b00
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21710 cCE("ffnmad", ea00b40
, 3, (RVD
, RVD
, RVD
), vfp_dp_rd_rn_rm
),
21711 nCE(vfnma
, _vfnma
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21712 nCE(vfnms
, _vfnms
, 3, (RVSD
, RVSD
, RVSD
), vfp_nsyn_nmul
),
21714 #undef THUMB_VARIANT
21716 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21718 cCE("mia", e200010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21719 cCE("miaph", e280010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21720 cCE("miabb", e2c0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21721 cCE("miabt", e2d0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21722 cCE("miatb", e2e0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21723 cCE("miatt", e2f0010
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mia
),
21724 cCE("mar", c400000
, 3, (RXA
, RRnpc
, RRnpc
), xsc_mar
),
21725 cCE("mra", c500000
, 3, (RRnpc
, RRnpc
, RXA
), xsc_mra
),
21728 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21730 cCE("tandcb", e13f130
, 1, (RR
), iwmmxt_tandorc
),
21731 cCE("tandch", e53f130
, 1, (RR
), iwmmxt_tandorc
),
21732 cCE("tandcw", e93f130
, 1, (RR
), iwmmxt_tandorc
),
21733 cCE("tbcstb", e400010
, 2, (RIWR
, RR
), rn_rd
),
21734 cCE("tbcsth", e400050
, 2, (RIWR
, RR
), rn_rd
),
21735 cCE("tbcstw", e400090
, 2, (RIWR
, RR
), rn_rd
),
21736 cCE("textrcb", e130170
, 2, (RR
, I7
), iwmmxt_textrc
),
21737 cCE("textrch", e530170
, 2, (RR
, I7
), iwmmxt_textrc
),
21738 cCE("textrcw", e930170
, 2, (RR
, I7
), iwmmxt_textrc
),
21739 cCE("textrmub",e100070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21740 cCE("textrmuh",e500070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21741 cCE("textrmuw",e900070
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21742 cCE("textrmsb",e100078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21743 cCE("textrmsh",e500078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21744 cCE("textrmsw",e900078
, 3, (RR
, RIWR
, I7
), iwmmxt_textrm
),
21745 cCE("tinsrb", e600010
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21746 cCE("tinsrh", e600050
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21747 cCE("tinsrw", e600090
, 3, (RIWR
, RR
, I7
), iwmmxt_tinsr
),
21748 cCE("tmcr", e000110
, 2, (RIWC_RIWG
, RR
), rn_rd
),
21749 cCE("tmcrr", c400000
, 3, (RIWR
, RR
, RR
), rm_rd_rn
),
21750 cCE("tmia", e200010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21751 cCE("tmiaph", e280010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21752 cCE("tmiabb", e2c0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21753 cCE("tmiabt", e2d0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21754 cCE("tmiatb", e2e0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21755 cCE("tmiatt", e2f0010
, 3, (RIWR
, RR
, RR
), iwmmxt_tmia
),
21756 cCE("tmovmskb",e100030
, 2, (RR
, RIWR
), rd_rn
),
21757 cCE("tmovmskh",e500030
, 2, (RR
, RIWR
), rd_rn
),
21758 cCE("tmovmskw",e900030
, 2, (RR
, RIWR
), rd_rn
),
21759 cCE("tmrc", e100110
, 2, (RR
, RIWC_RIWG
), rd_rn
),
21760 cCE("tmrrc", c500000
, 3, (RR
, RR
, RIWR
), rd_rn_rm
),
21761 cCE("torcb", e13f150
, 1, (RR
), iwmmxt_tandorc
),
21762 cCE("torch", e53f150
, 1, (RR
), iwmmxt_tandorc
),
21763 cCE("torcw", e93f150
, 1, (RR
), iwmmxt_tandorc
),
21764 cCE("waccb", e0001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21765 cCE("wacch", e4001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21766 cCE("waccw", e8001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21767 cCE("waddbss", e300180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21768 cCE("waddb", e000180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21769 cCE("waddbus", e100180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21770 cCE("waddhss", e700180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21771 cCE("waddh", e400180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21772 cCE("waddhus", e500180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21773 cCE("waddwss", eb00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21774 cCE("waddw", e800180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21775 cCE("waddwus", e900180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21776 cCE("waligni", e000020
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_waligni
),
21777 cCE("walignr0",e800020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21778 cCE("walignr1",e900020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21779 cCE("walignr2",ea00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21780 cCE("walignr3",eb00020
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21781 cCE("wand", e200000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21782 cCE("wandn", e300000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21783 cCE("wavg2b", e800000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21784 cCE("wavg2br", e900000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21785 cCE("wavg2h", ec00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21786 cCE("wavg2hr", ed00000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21787 cCE("wcmpeqb", e000060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21788 cCE("wcmpeqh", e400060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21789 cCE("wcmpeqw", e800060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21790 cCE("wcmpgtub",e100060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21791 cCE("wcmpgtuh",e500060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21792 cCE("wcmpgtuw",e900060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21793 cCE("wcmpgtsb",e300060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21794 cCE("wcmpgtsh",e700060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21795 cCE("wcmpgtsw",eb00060
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21796 cCE("wldrb", c100000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21797 cCE("wldrh", c500000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21798 cCE("wldrw", c100100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21799 cCE("wldrd", c500100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21800 cCE("wmacs", e600100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21801 cCE("wmacsz", e700100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21802 cCE("wmacu", e400100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21803 cCE("wmacuz", e500100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21804 cCE("wmadds", ea00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21805 cCE("wmaddu", e800100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21806 cCE("wmaxsb", e200160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21807 cCE("wmaxsh", e600160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21808 cCE("wmaxsw", ea00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21809 cCE("wmaxub", e000160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21810 cCE("wmaxuh", e400160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21811 cCE("wmaxuw", e800160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21812 cCE("wminsb", e300160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21813 cCE("wminsh", e700160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21814 cCE("wminsw", eb00160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21815 cCE("wminub", e100160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21816 cCE("wminuh", e500160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21817 cCE("wminuw", e900160
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21818 cCE("wmov", e000000
, 2, (RIWR
, RIWR
), iwmmxt_wmov
),
21819 cCE("wmulsm", e300100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21820 cCE("wmulsl", e200100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21821 cCE("wmulum", e100100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21822 cCE("wmulul", e000100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21823 cCE("wor", e000000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21824 cCE("wpackhss",e700080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21825 cCE("wpackhus",e500080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21826 cCE("wpackwss",eb00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21827 cCE("wpackwus",e900080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21828 cCE("wpackdss",ef00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21829 cCE("wpackdus",ed00080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21830 cCE("wrorh", e700040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21831 cCE("wrorhg", e700148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21832 cCE("wrorw", eb00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21833 cCE("wrorwg", eb00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21834 cCE("wrord", ef00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21835 cCE("wrordg", ef00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21836 cCE("wsadb", e000120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21837 cCE("wsadbz", e100120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21838 cCE("wsadh", e400120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21839 cCE("wsadhz", e500120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21840 cCE("wshufh", e0001e0
, 3, (RIWR
, RIWR
, I255
), iwmmxt_wshufh
),
21841 cCE("wsllh", e500040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21842 cCE("wsllhg", e500148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21843 cCE("wsllw", e900040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21844 cCE("wsllwg", e900148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21845 cCE("wslld", ed00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21846 cCE("wslldg", ed00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21847 cCE("wsrah", e400040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21848 cCE("wsrahg", e400148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21849 cCE("wsraw", e800040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21850 cCE("wsrawg", e800148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21851 cCE("wsrad", ec00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21852 cCE("wsradg", ec00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21853 cCE("wsrlh", e600040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21854 cCE("wsrlhg", e600148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21855 cCE("wsrlw", ea00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21856 cCE("wsrlwg", ea00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21857 cCE("wsrld", ee00040
, 3, (RIWR
, RIWR
, RIWR_I32z
),iwmmxt_wrwrwr_or_imm5
),
21858 cCE("wsrldg", ee00148
, 3, (RIWR
, RIWR
, RIWG
), rd_rn_rm
),
21859 cCE("wstrb", c000000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21860 cCE("wstrh", c400000
, 2, (RIWR
, ADDR
), iwmmxt_wldstbh
),
21861 cCE("wstrw", c000100
, 2, (RIWR_RIWC
, ADDR
), iwmmxt_wldstw
),
21862 cCE("wstrd", c400100
, 2, (RIWR
, ADDR
), iwmmxt_wldstd
),
21863 cCE("wsubbss", e3001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21864 cCE("wsubb", e0001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21865 cCE("wsubbus", e1001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21866 cCE("wsubhss", e7001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21867 cCE("wsubh", e4001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21868 cCE("wsubhus", e5001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21869 cCE("wsubwss", eb001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21870 cCE("wsubw", e8001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21871 cCE("wsubwus", e9001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21872 cCE("wunpckehub",e0000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21873 cCE("wunpckehuh",e4000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21874 cCE("wunpckehuw",e8000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21875 cCE("wunpckehsb",e2000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21876 cCE("wunpckehsh",e6000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21877 cCE("wunpckehsw",ea000c0
, 2, (RIWR
, RIWR
), rd_rn
),
21878 cCE("wunpckihb", e1000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21879 cCE("wunpckihh", e5000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21880 cCE("wunpckihw", e9000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21881 cCE("wunpckelub",e0000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21882 cCE("wunpckeluh",e4000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21883 cCE("wunpckeluw",e8000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21884 cCE("wunpckelsb",e2000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21885 cCE("wunpckelsh",e6000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21886 cCE("wunpckelsw",ea000e0
, 2, (RIWR
, RIWR
), rd_rn
),
21887 cCE("wunpckilb", e1000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21888 cCE("wunpckilh", e5000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21889 cCE("wunpckilw", e9000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21890 cCE("wxor", e100000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21891 cCE("wzero", e300000
, 1, (RIWR
), iwmmxt_wzero
),
21894 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21896 cCE("torvscb", e12f190
, 1, (RR
), iwmmxt_tandorc
),
21897 cCE("torvsch", e52f190
, 1, (RR
), iwmmxt_tandorc
),
21898 cCE("torvscw", e92f190
, 1, (RR
), iwmmxt_tandorc
),
21899 cCE("wabsb", e2001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21900 cCE("wabsh", e6001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21901 cCE("wabsw", ea001c0
, 2, (RIWR
, RIWR
), rd_rn
),
21902 cCE("wabsdiffb", e1001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21903 cCE("wabsdiffh", e5001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21904 cCE("wabsdiffw", e9001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21905 cCE("waddbhusl", e2001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21906 cCE("waddbhusm", e6001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21907 cCE("waddhc", e600180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21908 cCE("waddwc", ea00180
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21909 cCE("waddsubhx", ea001a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21910 cCE("wavg4", e400000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21911 cCE("wavg4r", e500000
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21912 cCE("wmaddsn", ee00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21913 cCE("wmaddsx", eb00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21914 cCE("wmaddun", ec00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21915 cCE("wmaddux", e900100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21916 cCE("wmerge", e000080
, 4, (RIWR
, RIWR
, RIWR
, I7
), iwmmxt_wmerge
),
21917 cCE("wmiabb", e0000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21918 cCE("wmiabt", e1000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21919 cCE("wmiatb", e2000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21920 cCE("wmiatt", e3000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21921 cCE("wmiabbn", e4000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21922 cCE("wmiabtn", e5000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21923 cCE("wmiatbn", e6000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21924 cCE("wmiattn", e7000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21925 cCE("wmiawbb", e800120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21926 cCE("wmiawbt", e900120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21927 cCE("wmiawtb", ea00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21928 cCE("wmiawtt", eb00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21929 cCE("wmiawbbn", ec00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21930 cCE("wmiawbtn", ed00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21931 cCE("wmiawtbn", ee00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21932 cCE("wmiawttn", ef00120
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21933 cCE("wmulsmr", ef00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21934 cCE("wmulumr", ed00100
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21935 cCE("wmulwumr", ec000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21936 cCE("wmulwsmr", ee000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21937 cCE("wmulwum", ed000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21938 cCE("wmulwsm", ef000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21939 cCE("wmulwl", eb000c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21940 cCE("wqmiabb", e8000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21941 cCE("wqmiabt", e9000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21942 cCE("wqmiatb", ea000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21943 cCE("wqmiatt", eb000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21944 cCE("wqmiabbn", ec000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21945 cCE("wqmiabtn", ed000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21946 cCE("wqmiatbn", ee000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21947 cCE("wqmiattn", ef000a0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21948 cCE("wqmulm", e100080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21949 cCE("wqmulmr", e300080
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21950 cCE("wqmulwm", ec000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21951 cCE("wqmulwmr", ee000e0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21952 cCE("wsubaddhx", ed001c0
, 3, (RIWR
, RIWR
, RIWR
), rd_rn_rm
),
21955 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21957 cCE("cfldrs", c100400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21958 cCE("cfldrd", c500400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21959 cCE("cfldr32", c100500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21960 cCE("cfldr64", c500500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21961 cCE("cfstrs", c000400
, 2, (RMF
, ADDRGLDC
), rd_cpaddr
),
21962 cCE("cfstrd", c400400
, 2, (RMD
, ADDRGLDC
), rd_cpaddr
),
21963 cCE("cfstr32", c000500
, 2, (RMFX
, ADDRGLDC
), rd_cpaddr
),
21964 cCE("cfstr64", c400500
, 2, (RMDX
, ADDRGLDC
), rd_cpaddr
),
21965 cCE("cfmvsr", e000450
, 2, (RMF
, RR
), rn_rd
),
21966 cCE("cfmvrs", e100450
, 2, (RR
, RMF
), rd_rn
),
21967 cCE("cfmvdlr", e000410
, 2, (RMD
, RR
), rn_rd
),
21968 cCE("cfmvrdl", e100410
, 2, (RR
, RMD
), rd_rn
),
21969 cCE("cfmvdhr", e000430
, 2, (RMD
, RR
), rn_rd
),
21970 cCE("cfmvrdh", e100430
, 2, (RR
, RMD
), rd_rn
),
21971 cCE("cfmv64lr",e000510
, 2, (RMDX
, RR
), rn_rd
),
21972 cCE("cfmvr64l",e100510
, 2, (RR
, RMDX
), rd_rn
),
21973 cCE("cfmv64hr",e000530
, 2, (RMDX
, RR
), rn_rd
),
21974 cCE("cfmvr64h",e100530
, 2, (RR
, RMDX
), rd_rn
),
21975 cCE("cfmval32",e200440
, 2, (RMAX
, RMFX
), rd_rn
),
21976 cCE("cfmv32al",e100440
, 2, (RMFX
, RMAX
), rd_rn
),
21977 cCE("cfmvam32",e200460
, 2, (RMAX
, RMFX
), rd_rn
),
21978 cCE("cfmv32am",e100460
, 2, (RMFX
, RMAX
), rd_rn
),
21979 cCE("cfmvah32",e200480
, 2, (RMAX
, RMFX
), rd_rn
),
21980 cCE("cfmv32ah",e100480
, 2, (RMFX
, RMAX
), rd_rn
),
21981 cCE("cfmva32", e2004a0
, 2, (RMAX
, RMFX
), rd_rn
),
21982 cCE("cfmv32a", e1004a0
, 2, (RMFX
, RMAX
), rd_rn
),
21983 cCE("cfmva64", e2004c0
, 2, (RMAX
, RMDX
), rd_rn
),
21984 cCE("cfmv64a", e1004c0
, 2, (RMDX
, RMAX
), rd_rn
),
21985 cCE("cfmvsc32",e2004e0
, 2, (RMDS
, RMDX
), mav_dspsc
),
21986 cCE("cfmv32sc",e1004e0
, 2, (RMDX
, RMDS
), rd
),
21987 cCE("cfcpys", e000400
, 2, (RMF
, RMF
), rd_rn
),
21988 cCE("cfcpyd", e000420
, 2, (RMD
, RMD
), rd_rn
),
21989 cCE("cfcvtsd", e000460
, 2, (RMD
, RMF
), rd_rn
),
21990 cCE("cfcvtds", e000440
, 2, (RMF
, RMD
), rd_rn
),
21991 cCE("cfcvt32s",e000480
, 2, (RMF
, RMFX
), rd_rn
),
21992 cCE("cfcvt32d",e0004a0
, 2, (RMD
, RMFX
), rd_rn
),
21993 cCE("cfcvt64s",e0004c0
, 2, (RMF
, RMDX
), rd_rn
),
21994 cCE("cfcvt64d",e0004e0
, 2, (RMD
, RMDX
), rd_rn
),
21995 cCE("cfcvts32",e100580
, 2, (RMFX
, RMF
), rd_rn
),
21996 cCE("cfcvtd32",e1005a0
, 2, (RMFX
, RMD
), rd_rn
),
21997 cCE("cftruncs32",e1005c0
, 2, (RMFX
, RMF
), rd_rn
),
21998 cCE("cftruncd32",e1005e0
, 2, (RMFX
, RMD
), rd_rn
),
21999 cCE("cfrshl32",e000550
, 3, (RMFX
, RMFX
, RR
), mav_triple
),
22000 cCE("cfrshl64",e000570
, 3, (RMDX
, RMDX
, RR
), mav_triple
),
22001 cCE("cfsh32", e000500
, 3, (RMFX
, RMFX
, I63s
), mav_shift
),
22002 cCE("cfsh64", e200500
, 3, (RMDX
, RMDX
, I63s
), mav_shift
),
22003 cCE("cfcmps", e100490
, 3, (RR
, RMF
, RMF
), rd_rn_rm
),
22004 cCE("cfcmpd", e1004b0
, 3, (RR
, RMD
, RMD
), rd_rn_rm
),
22005 cCE("cfcmp32", e100590
, 3, (RR
, RMFX
, RMFX
), rd_rn_rm
),
22006 cCE("cfcmp64", e1005b0
, 3, (RR
, RMDX
, RMDX
), rd_rn_rm
),
22007 cCE("cfabss", e300400
, 2, (RMF
, RMF
), rd_rn
),
22008 cCE("cfabsd", e300420
, 2, (RMD
, RMD
), rd_rn
),
22009 cCE("cfnegs", e300440
, 2, (RMF
, RMF
), rd_rn
),
22010 cCE("cfnegd", e300460
, 2, (RMD
, RMD
), rd_rn
),
22011 cCE("cfadds", e300480
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22012 cCE("cfaddd", e3004a0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22013 cCE("cfsubs", e3004c0
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22014 cCE("cfsubd", e3004e0
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22015 cCE("cfmuls", e100400
, 3, (RMF
, RMF
, RMF
), rd_rn_rm
),
22016 cCE("cfmuld", e100420
, 3, (RMD
, RMD
, RMD
), rd_rn_rm
),
22017 cCE("cfabs32", e300500
, 2, (RMFX
, RMFX
), rd_rn
),
22018 cCE("cfabs64", e300520
, 2, (RMDX
, RMDX
), rd_rn
),
22019 cCE("cfneg32", e300540
, 2, (RMFX
, RMFX
), rd_rn
),
22020 cCE("cfneg64", e300560
, 2, (RMDX
, RMDX
), rd_rn
),
22021 cCE("cfadd32", e300580
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22022 cCE("cfadd64", e3005a0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22023 cCE("cfsub32", e3005c0
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22024 cCE("cfsub64", e3005e0
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22025 cCE("cfmul32", e100500
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22026 cCE("cfmul64", e100520
, 3, (RMDX
, RMDX
, RMDX
), rd_rn_rm
),
22027 cCE("cfmac32", e100540
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22028 cCE("cfmsc32", e100560
, 3, (RMFX
, RMFX
, RMFX
), rd_rn_rm
),
22029 cCE("cfmadd32",e000600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22030 cCE("cfmsub32",e100600
, 4, (RMAX
, RMFX
, RMFX
, RMFX
), mav_quad
),
22031 cCE("cfmadda32", e200600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22032 cCE("cfmsuba32", e300600
, 4, (RMAX
, RMAX
, RMFX
, RMFX
), mav_quad
),
22034 /* ARMv8.5-A instructions. */
22036 #define ARM_VARIANT & arm_ext_sb
22037 #undef THUMB_VARIANT
22038 #define THUMB_VARIANT & arm_ext_sb
22039 TUF("sb", 57ff070
, f3bf8f70
, 0, (), noargs
, noargs
),
22042 #define ARM_VARIANT & arm_ext_predres
22043 #undef THUMB_VARIANT
22044 #define THUMB_VARIANT & arm_ext_predres
22045 CE("cfprctx", e070f93
, 1, (RRnpc
), rd
),
22046 CE("dvprctx", e070fb3
, 1, (RRnpc
), rd
),
22047 CE("cpprctx", e070ff3
, 1, (RRnpc
), rd
),
22049 /* ARMv8-M instructions. */
22051 #define ARM_VARIANT NULL
22052 #undef THUMB_VARIANT
22053 #define THUMB_VARIANT & arm_ext_v8m
22054 ToU("sg", e97fe97f
, 0, (), noargs
),
22055 ToC("blxns", 4784, 1, (RRnpc
), t_blx
),
22056 ToC("bxns", 4704, 1, (RRnpc
), t_bx
),
22057 ToC("tt", e840f000
, 2, (RRnpc
, RRnpc
), tt
),
22058 ToC("ttt", e840f040
, 2, (RRnpc
, RRnpc
), tt
),
22059 ToC("tta", e840f080
, 2, (RRnpc
, RRnpc
), tt
),
22060 ToC("ttat", e840f0c0
, 2, (RRnpc
, RRnpc
), tt
),
22062 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
22063 instructions behave as nop if no VFP is present. */
22064 #undef THUMB_VARIANT
22065 #define THUMB_VARIANT & arm_ext_v8m_main
22066 ToC("vlldm", ec300a00
, 1, (RRnpc
), rn
),
22067 ToC("vlstm", ec200a00
, 1, (RRnpc
), rn
),
22069 /* Armv8.1-M Mainline instructions. */
22070 #undef THUMB_VARIANT
22071 #define THUMB_VARIANT & arm_ext_v8_1m_main
22072 toC("bf", _bf
, 2, (EXPs
, EXPs
), t_branch_future
),
22073 toU("bfcsel", _bfcsel
, 4, (EXPs
, EXPs
, EXPs
, COND
), t_branch_future
),
22074 toC("bfx", _bfx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22075 toC("bfl", _bfl
, 2, (EXPs
, EXPs
), t_branch_future
),
22076 toC("bflx", _bflx
, 2, (EXPs
, RRnpcsp
), t_branch_future
),
22078 toU("dls", _dls
, 2, (LR
, RRnpcsp
), t_loloop
),
22079 toU("wls", _wls
, 3, (LR
, RRnpcsp
, EXP
), t_loloop
),
22080 toU("le", _le
, 2, (oLR
, EXP
), t_loloop
),
22082 ToC("clrm", e89f0000
, 1, (CLRMLST
), t_clrm
),
22083 ToC("vscclrm", ec9f0a00
, 1, (VRSDVLST
), t_vscclrm
)
22086 #undef THUMB_VARIANT
22118 /* MD interface: bits in the object file. */
22120 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
22121 for use in the a.out file, and stores them in the array pointed to by buf.
22122 This knows about the endian-ness of the target machine and does
22123 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
22124 2 (short) and 4 (long) Floating numbers are put out as a series of
22125 LITTLENUMS (shorts, here at least). */
22128 md_number_to_chars (char * buf
, valueT val
, int n
)
22130 if (target_big_endian
)
22131 number_to_chars_bigendian (buf
, val
, n
);
22133 number_to_chars_littleendian (buf
, val
, n
);
22137 md_chars_to_number (char * buf
, int n
)
22140 unsigned char * where
= (unsigned char *) buf
;
22142 if (target_big_endian
)
22147 result
|= (*where
++ & 255);
22155 result
|= (where
[n
] & 255);
22162 /* MD interface: Sections. */
22164 /* Calculate the maximum variable size (i.e., excluding fr_fix)
22165 that an rs_machine_dependent frag may reach. */
22168 arm_frag_max_var (fragS
*fragp
)
22170 /* We only use rs_machine_dependent for variable-size Thumb instructions,
22171 which are either THUMB_SIZE (2) or INSN_SIZE (4).
22173 Note that we generate relaxable instructions even for cases that don't
22174 really need it, like an immediate that's a trivial constant. So we're
22175 overestimating the instruction size for some of those cases. Rather
22176 than putting more intelligence here, it would probably be better to
22177 avoid generating a relaxation frag in the first place when it can be
22178 determined up front that a short instruction will suffice. */
22180 gas_assert (fragp
->fr_type
== rs_machine_dependent
);
22184 /* Estimate the size of a frag before relaxing. Assume everything fits in
22188 md_estimate_size_before_relax (fragS
* fragp
,
22189 segT segtype ATTRIBUTE_UNUSED
)
22195 /* Convert a machine dependent frag. */
22198 md_convert_frag (bfd
*abfd
, segT asec ATTRIBUTE_UNUSED
, fragS
*fragp
)
22200 unsigned long insn
;
22201 unsigned long old_op
;
22209 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22211 old_op
= bfd_get_16(abfd
, buf
);
22212 if (fragp
->fr_symbol
)
22214 exp
.X_op
= O_symbol
;
22215 exp
.X_add_symbol
= fragp
->fr_symbol
;
22219 exp
.X_op
= O_constant
;
22221 exp
.X_add_number
= fragp
->fr_offset
;
22222 opcode
= fragp
->fr_subtype
;
22225 case T_MNEM_ldr_pc
:
22226 case T_MNEM_ldr_pc2
:
22227 case T_MNEM_ldr_sp
:
22228 case T_MNEM_str_sp
:
22235 if (fragp
->fr_var
== 4)
22237 insn
= THUMB_OP32 (opcode
);
22238 if ((old_op
>> 12) == 4 || (old_op
>> 12) == 9)
22240 insn
|= (old_op
& 0x700) << 4;
22244 insn
|= (old_op
& 7) << 12;
22245 insn
|= (old_op
& 0x38) << 13;
22247 insn
|= 0x00000c00;
22248 put_thumb32_insn (buf
, insn
);
22249 reloc_type
= BFD_RELOC_ARM_T32_OFFSET_IMM
;
22253 reloc_type
= BFD_RELOC_ARM_THUMB_OFFSET
;
22255 pc_rel
= (opcode
== T_MNEM_ldr_pc2
);
22258 if (fragp
->fr_var
== 4)
22260 insn
= THUMB_OP32 (opcode
);
22261 insn
|= (old_op
& 0xf0) << 4;
22262 put_thumb32_insn (buf
, insn
);
22263 reloc_type
= BFD_RELOC_ARM_T32_ADD_PC12
;
22267 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22268 exp
.X_add_number
-= 4;
22276 if (fragp
->fr_var
== 4)
22278 int r0off
= (opcode
== T_MNEM_mov
22279 || opcode
== T_MNEM_movs
) ? 0 : 8;
22280 insn
= THUMB_OP32 (opcode
);
22281 insn
= (insn
& 0xe1ffffff) | 0x10000000;
22282 insn
|= (old_op
& 0x700) << r0off
;
22283 put_thumb32_insn (buf
, insn
);
22284 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22288 reloc_type
= BFD_RELOC_ARM_THUMB_IMM
;
22293 if (fragp
->fr_var
== 4)
22295 insn
= THUMB_OP32(opcode
);
22296 put_thumb32_insn (buf
, insn
);
22297 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH25
;
22300 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH12
;
22304 if (fragp
->fr_var
== 4)
22306 insn
= THUMB_OP32(opcode
);
22307 insn
|= (old_op
& 0xf00) << 14;
22308 put_thumb32_insn (buf
, insn
);
22309 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH20
;
22312 reloc_type
= BFD_RELOC_THUMB_PCREL_BRANCH9
;
22315 case T_MNEM_add_sp
:
22316 case T_MNEM_add_pc
:
22317 case T_MNEM_inc_sp
:
22318 case T_MNEM_dec_sp
:
22319 if (fragp
->fr_var
== 4)
22321 /* ??? Choose between add and addw. */
22322 insn
= THUMB_OP32 (opcode
);
22323 insn
|= (old_op
& 0xf0) << 4;
22324 put_thumb32_insn (buf
, insn
);
22325 if (opcode
== T_MNEM_add_pc
)
22326 reloc_type
= BFD_RELOC_ARM_T32_IMM12
;
22328 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
22331 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22339 if (fragp
->fr_var
== 4)
22341 insn
= THUMB_OP32 (opcode
);
22342 insn
|= (old_op
& 0xf0) << 4;
22343 insn
|= (old_op
& 0xf) << 16;
22344 put_thumb32_insn (buf
, insn
);
22345 if (insn
& (1 << 20))
22346 reloc_type
= BFD_RELOC_ARM_T32_ADD_IMM
;
22348 reloc_type
= BFD_RELOC_ARM_T32_IMMEDIATE
;
22351 reloc_type
= BFD_RELOC_ARM_THUMB_ADD
;
22357 fixp
= fix_new_exp (fragp
, fragp
->fr_fix
, fragp
->fr_var
, &exp
, pc_rel
,
22358 (enum bfd_reloc_code_real
) reloc_type
);
22359 fixp
->fx_file
= fragp
->fr_file
;
22360 fixp
->fx_line
= fragp
->fr_line
;
22361 fragp
->fr_fix
+= fragp
->fr_var
;
22363 /* Set whether we use thumb-2 ISA based on final relaxation results. */
22364 if (thumb_mode
&& fragp
->fr_var
== 4 && no_cpu_selected ()
22365 && !ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_t2
))
22366 ARM_MERGE_FEATURE_SETS (arm_arch_used
, thumb_arch_used
, arm_ext_v6t2
);
22369 /* Return the size of a relaxable immediate operand instruction.
22370 SHIFT and SIZE specify the form of the allowable immediate. */
22372 relax_immediate (fragS
*fragp
, int size
, int shift
)
22378 /* ??? Should be able to do better than this. */
22379 if (fragp
->fr_symbol
)
22382 low
= (1 << shift
) - 1;
22383 mask
= (1 << (shift
+ size
)) - (1 << shift
);
22384 offset
= fragp
->fr_offset
;
22385 /* Force misaligned offsets to 32-bit variant. */
22388 if (offset
& ~mask
)
22393 /* Get the address of a symbol during relaxation. */
22395 relaxed_symbol_addr (fragS
*fragp
, long stretch
)
22401 sym
= fragp
->fr_symbol
;
22402 sym_frag
= symbol_get_frag (sym
);
22403 know (S_GET_SEGMENT (sym
) != absolute_section
22404 || sym_frag
== &zero_address_frag
);
22405 addr
= S_GET_VALUE (sym
) + fragp
->fr_offset
;
22407 /* If frag has yet to be reached on this pass, assume it will
22408 move by STRETCH just as we did. If this is not so, it will
22409 be because some frag between grows, and that will force
22413 && sym_frag
->relax_marker
!= fragp
->relax_marker
)
22417 /* Adjust stretch for any alignment frag. Note that if have
22418 been expanding the earlier code, the symbol may be
22419 defined in what appears to be an earlier frag. FIXME:
22420 This doesn't handle the fr_subtype field, which specifies
22421 a maximum number of bytes to skip when doing an
22423 for (f
= fragp
; f
!= NULL
&& f
!= sym_frag
; f
= f
->fr_next
)
22425 if (f
->fr_type
== rs_align
|| f
->fr_type
== rs_align_code
)
22428 stretch
= - ((- stretch
)
22429 & ~ ((1 << (int) f
->fr_offset
) - 1));
22431 stretch
&= ~ ((1 << (int) f
->fr_offset
) - 1);
22443 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
22446 relax_adr (fragS
*fragp
, asection
*sec
, long stretch
)
22451 /* Assume worst case for symbols not known to be in the same section. */
22452 if (fragp
->fr_symbol
== NULL
22453 || !S_IS_DEFINED (fragp
->fr_symbol
)
22454 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
22455 || S_IS_WEAK (fragp
->fr_symbol
))
22458 val
= relaxed_symbol_addr (fragp
, stretch
);
22459 addr
= fragp
->fr_address
+ fragp
->fr_fix
;
22460 addr
= (addr
+ 4) & ~3;
22461 /* Force misaligned targets to 32-bit variant. */
22465 if (val
< 0 || val
> 1020)
22470 /* Return the size of a relaxable add/sub immediate instruction. */
22472 relax_addsub (fragS
*fragp
, asection
*sec
)
22477 buf
= fragp
->fr_literal
+ fragp
->fr_fix
;
22478 op
= bfd_get_16(sec
->owner
, buf
);
22479 if ((op
& 0xf) == ((op
>> 4) & 0xf))
22480 return relax_immediate (fragp
, 8, 0);
22482 return relax_immediate (fragp
, 3, 0);
22485 /* Return TRUE iff the definition of symbol S could be pre-empted
22486 (overridden) at link or load time. */
22488 symbol_preemptible (symbolS
*s
)
22490 /* Weak symbols can always be pre-empted. */
22494 /* Non-global symbols cannot be pre-empted. */
22495 if (! S_IS_EXTERNAL (s
))
22499 /* In ELF, a global symbol can be marked protected, or private. In that
22500 case it can't be pre-empted (other definitions in the same link unit
22501 would violate the ODR). */
22502 if (ELF_ST_VISIBILITY (S_GET_OTHER (s
)) > STV_DEFAULT
)
22506 /* Other global symbols might be pre-empted. */
22510 /* Return the size of a relaxable branch instruction. BITS is the
22511 size of the offset field in the narrow instruction. */
22514 relax_branch (fragS
*fragp
, asection
*sec
, int bits
, long stretch
)
22520 /* Assume worst case for symbols not known to be in the same section. */
22521 if (!S_IS_DEFINED (fragp
->fr_symbol
)
22522 || sec
!= S_GET_SEGMENT (fragp
->fr_symbol
)
22523 || S_IS_WEAK (fragp
->fr_symbol
))
22527 /* A branch to a function in ARM state will require interworking. */
22528 if (S_IS_DEFINED (fragp
->fr_symbol
)
22529 && ARM_IS_FUNC (fragp
->fr_symbol
))
22533 if (symbol_preemptible (fragp
->fr_symbol
))
22536 val
= relaxed_symbol_addr (fragp
, stretch
);
22537 addr
= fragp
->fr_address
+ fragp
->fr_fix
+ 4;
22540 /* Offset is a signed value *2 */
22542 if (val
>= limit
|| val
< -limit
)
22548 /* Relax a machine dependent frag. This returns the amount by which
22549 the current size of the frag should change. */
22552 arm_relax_frag (asection
*sec
, fragS
*fragp
, long stretch
)
22557 oldsize
= fragp
->fr_var
;
22558 switch (fragp
->fr_subtype
)
22560 case T_MNEM_ldr_pc2
:
22561 newsize
= relax_adr (fragp
, sec
, stretch
);
22563 case T_MNEM_ldr_pc
:
22564 case T_MNEM_ldr_sp
:
22565 case T_MNEM_str_sp
:
22566 newsize
= relax_immediate (fragp
, 8, 2);
22570 newsize
= relax_immediate (fragp
, 5, 2);
22574 newsize
= relax_immediate (fragp
, 5, 1);
22578 newsize
= relax_immediate (fragp
, 5, 0);
22581 newsize
= relax_adr (fragp
, sec
, stretch
);
22587 newsize
= relax_immediate (fragp
, 8, 0);
22590 newsize
= relax_branch (fragp
, sec
, 11, stretch
);
22593 newsize
= relax_branch (fragp
, sec
, 8, stretch
);
22595 case T_MNEM_add_sp
:
22596 case T_MNEM_add_pc
:
22597 newsize
= relax_immediate (fragp
, 8, 2);
22599 case T_MNEM_inc_sp
:
22600 case T_MNEM_dec_sp
:
22601 newsize
= relax_immediate (fragp
, 7, 2);
22607 newsize
= relax_addsub (fragp
, sec
);
22613 fragp
->fr_var
= newsize
;
22614 /* Freeze wide instructions that are at or before the same location as
22615 in the previous pass. This avoids infinite loops.
22616 Don't freeze them unconditionally because targets may be artificially
22617 misaligned by the expansion of preceding frags. */
22618 if (stretch
<= 0 && newsize
> 2)
22620 md_convert_frag (sec
->owner
, sec
, fragp
);
22624 return newsize
- oldsize
;
22627 /* Round up a section size to the appropriate boundary. */
22630 md_section_align (segT segment ATTRIBUTE_UNUSED
,
22636 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22637 of an rs_align_code fragment. */
22640 arm_handle_align (fragS
* fragP
)
22642 static unsigned char const arm_noop
[2][2][4] =
22645 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22646 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22649 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22650 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22653 static unsigned char const thumb_noop
[2][2][2] =
22656 {0xc0, 0x46}, /* LE */
22657 {0x46, 0xc0}, /* BE */
22660 {0x00, 0xbf}, /* LE */
22661 {0xbf, 0x00} /* BE */
22664 static unsigned char const wide_thumb_noop
[2][4] =
22665 { /* Wide Thumb-2 */
22666 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22667 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22670 unsigned bytes
, fix
, noop_size
;
22672 const unsigned char * noop
;
22673 const unsigned char *narrow_noop
= NULL
;
22678 if (fragP
->fr_type
!= rs_align_code
)
22681 bytes
= fragP
->fr_next
->fr_address
- fragP
->fr_address
- fragP
->fr_fix
;
22682 p
= fragP
->fr_literal
+ fragP
->fr_fix
;
22685 if (bytes
> MAX_MEM_FOR_RS_ALIGN_CODE
)
22686 bytes
&= MAX_MEM_FOR_RS_ALIGN_CODE
;
22688 gas_assert ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) != 0);
22690 if (fragP
->tc_frag_data
.thumb_mode
& (~ MODE_RECORDED
))
22692 if (ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
22693 ? selected_cpu
: arm_arch_none
, arm_ext_v6t2
))
22695 narrow_noop
= thumb_noop
[1][target_big_endian
];
22696 noop
= wide_thumb_noop
[target_big_endian
];
22699 noop
= thumb_noop
[0][target_big_endian
];
22707 noop
= arm_noop
[ARM_CPU_HAS_FEATURE (selected_cpu_name
[0]
22708 ? selected_cpu
: arm_arch_none
,
22710 [target_big_endian
];
22717 fragP
->fr_var
= noop_size
;
22719 if (bytes
& (noop_size
- 1))
22721 fix
= bytes
& (noop_size
- 1);
22723 insert_data_mapping_symbol (state
, fragP
->fr_fix
, fragP
, fix
);
22725 memset (p
, 0, fix
);
22732 if (bytes
& noop_size
)
22734 /* Insert a narrow noop. */
22735 memcpy (p
, narrow_noop
, noop_size
);
22737 bytes
-= noop_size
;
22741 /* Use wide noops for the remainder */
22745 while (bytes
>= noop_size
)
22747 memcpy (p
, noop
, noop_size
);
22749 bytes
-= noop_size
;
22753 fragP
->fr_fix
+= fix
;
22756 /* Called from md_do_align. Used to create an alignment
22757 frag in a code section. */
22760 arm_frag_align_code (int n
, int max
)
22764 /* We assume that there will never be a requirement
22765 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22766 if (max
> MAX_MEM_FOR_RS_ALIGN_CODE
)
22771 _("alignments greater than %d bytes not supported in .text sections."),
22772 MAX_MEM_FOR_RS_ALIGN_CODE
+ 1);
22773 as_fatal ("%s", err_msg
);
22776 p
= frag_var (rs_align_code
,
22777 MAX_MEM_FOR_RS_ALIGN_CODE
,
22779 (relax_substateT
) max
,
22786 /* Perform target specific initialisation of a frag.
22787 Note - despite the name this initialisation is not done when the frag
22788 is created, but only when its type is assigned. A frag can be created
22789 and used a long time before its type is set, so beware of assuming that
22790 this initialisation is performed first. */
22794 arm_init_frag (fragS
* fragP
, int max_chars ATTRIBUTE_UNUSED
)
22796 /* Record whether this frag is in an ARM or a THUMB area. */
22797 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22800 #else /* OBJ_ELF is defined. */
22802 arm_init_frag (fragS
* fragP
, int max_chars
)
22804 bfd_boolean frag_thumb_mode
;
22806 /* If the current ARM vs THUMB mode has not already
22807 been recorded into this frag then do so now. */
22808 if ((fragP
->tc_frag_data
.thumb_mode
& MODE_RECORDED
) == 0)
22809 fragP
->tc_frag_data
.thumb_mode
= thumb_mode
| MODE_RECORDED
;
22811 /* PR 21809: Do not set a mapping state for debug sections
22812 - it just confuses other tools. */
22813 if (bfd_get_section_flags (NULL
, now_seg
) & SEC_DEBUGGING
)
22816 frag_thumb_mode
= fragP
->tc_frag_data
.thumb_mode
^ MODE_RECORDED
;
22818 /* Record a mapping symbol for alignment frags. We will delete this
22819 later if the alignment ends up empty. */
22820 switch (fragP
->fr_type
)
22823 case rs_align_test
:
22825 mapping_state_2 (MAP_DATA
, max_chars
);
22827 case rs_align_code
:
22828 mapping_state_2 (frag_thumb_mode
? MAP_THUMB
: MAP_ARM
, max_chars
);
22835 /* When we change sections we need to issue a new mapping symbol. */
22838 arm_elf_change_section (void)
22840 /* Link an unlinked unwind index table section to the .text section. */
22841 if (elf_section_type (now_seg
) == SHT_ARM_EXIDX
22842 && elf_linked_to_section (now_seg
) == NULL
)
22843 elf_linked_to_section (now_seg
) = text_section
;
22847 arm_elf_section_type (const char * str
, size_t len
)
22849 if (len
== 5 && strncmp (str
, "exidx", 5) == 0)
22850 return SHT_ARM_EXIDX
;
22855 /* Code to deal with unwinding tables. */
22857 static void add_unwind_adjustsp (offsetT
);
22859 /* Generate any deferred unwind frame offset. */
22862 flush_pending_unwind (void)
22866 offset
= unwind
.pending_offset
;
22867 unwind
.pending_offset
= 0;
22869 add_unwind_adjustsp (offset
);
22872 /* Add an opcode to this list for this function. Two-byte opcodes should
22873 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22877 add_unwind_opcode (valueT op
, int length
)
22879 /* Add any deferred stack adjustment. */
22880 if (unwind
.pending_offset
)
22881 flush_pending_unwind ();
22883 unwind
.sp_restored
= 0;
22885 if (unwind
.opcode_count
+ length
> unwind
.opcode_alloc
)
22887 unwind
.opcode_alloc
+= ARM_OPCODE_CHUNK_SIZE
;
22888 if (unwind
.opcodes
)
22889 unwind
.opcodes
= XRESIZEVEC (unsigned char, unwind
.opcodes
,
22890 unwind
.opcode_alloc
);
22892 unwind
.opcodes
= XNEWVEC (unsigned char, unwind
.opcode_alloc
);
22897 unwind
.opcodes
[unwind
.opcode_count
] = op
& 0xff;
22899 unwind
.opcode_count
++;
22903 /* Add unwind opcodes to adjust the stack pointer. */
22906 add_unwind_adjustsp (offsetT offset
)
22910 if (offset
> 0x200)
22912 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22917 /* Long form: 0xb2, uleb128. */
22918 /* This might not fit in a word so add the individual bytes,
22919 remembering the list is built in reverse order. */
22920 o
= (valueT
) ((offset
- 0x204) >> 2);
22922 add_unwind_opcode (0, 1);
22924 /* Calculate the uleb128 encoding of the offset. */
22928 bytes
[n
] = o
& 0x7f;
22934 /* Add the insn. */
22936 add_unwind_opcode (bytes
[n
- 1], 1);
22937 add_unwind_opcode (0xb2, 1);
22939 else if (offset
> 0x100)
22941 /* Two short opcodes. */
22942 add_unwind_opcode (0x3f, 1);
22943 op
= (offset
- 0x104) >> 2;
22944 add_unwind_opcode (op
, 1);
22946 else if (offset
> 0)
22948 /* Short opcode. */
22949 op
= (offset
- 4) >> 2;
22950 add_unwind_opcode (op
, 1);
22952 else if (offset
< 0)
22955 while (offset
> 0x100)
22957 add_unwind_opcode (0x7f, 1);
22960 op
= ((offset
- 4) >> 2) | 0x40;
22961 add_unwind_opcode (op
, 1);
22965 /* Finish the list of unwind opcodes for this function. */
22968 finish_unwind_opcodes (void)
22972 if (unwind
.fp_used
)
22974 /* Adjust sp as necessary. */
22975 unwind
.pending_offset
+= unwind
.fp_offset
- unwind
.frame_size
;
22976 flush_pending_unwind ();
22978 /* After restoring sp from the frame pointer. */
22979 op
= 0x90 | unwind
.fp_reg
;
22980 add_unwind_opcode (op
, 1);
22983 flush_pending_unwind ();
22987 /* Start an exception table entry. If idx is nonzero this is an index table
22991 start_unwind_section (const segT text_seg
, int idx
)
22993 const char * text_name
;
22994 const char * prefix
;
22995 const char * prefix_once
;
22996 const char * group_name
;
23004 prefix
= ELF_STRING_ARM_unwind
;
23005 prefix_once
= ELF_STRING_ARM_unwind_once
;
23006 type
= SHT_ARM_EXIDX
;
23010 prefix
= ELF_STRING_ARM_unwind_info
;
23011 prefix_once
= ELF_STRING_ARM_unwind_info_once
;
23012 type
= SHT_PROGBITS
;
23015 text_name
= segment_name (text_seg
);
23016 if (streq (text_name
, ".text"))
23019 if (strncmp (text_name
, ".gnu.linkonce.t.",
23020 strlen (".gnu.linkonce.t.")) == 0)
23022 prefix
= prefix_once
;
23023 text_name
+= strlen (".gnu.linkonce.t.");
23026 sec_name
= concat (prefix
, text_name
, (char *) NULL
);
23032 /* Handle COMDAT group. */
23033 if (prefix
!= prefix_once
&& (text_seg
->flags
& SEC_LINK_ONCE
) != 0)
23035 group_name
= elf_group_name (text_seg
);
23036 if (group_name
== NULL
)
23038 as_bad (_("Group section `%s' has no group signature"),
23039 segment_name (text_seg
));
23040 ignore_rest_of_line ();
23043 flags
|= SHF_GROUP
;
23047 obj_elf_change_section (sec_name
, type
, 0, flags
, 0, group_name
,
23050 /* Set the section link for index tables. */
23052 elf_linked_to_section (now_seg
) = text_seg
;
23056 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
23057 personality routine data. Returns zero, or the index table value for
23058 an inline entry. */
23061 create_unwind_entry (int have_data
)
23066 /* The current word of data. */
23068 /* The number of bytes left in this word. */
23071 finish_unwind_opcodes ();
23073 /* Remember the current text section. */
23074 unwind
.saved_seg
= now_seg
;
23075 unwind
.saved_subseg
= now_subseg
;
23077 start_unwind_section (now_seg
, 0);
23079 if (unwind
.personality_routine
== NULL
)
23081 if (unwind
.personality_index
== -2)
23084 as_bad (_("handlerdata in cantunwind frame"));
23085 return 1; /* EXIDX_CANTUNWIND. */
23088 /* Use a default personality routine if none is specified. */
23089 if (unwind
.personality_index
== -1)
23091 if (unwind
.opcode_count
> 3)
23092 unwind
.personality_index
= 1;
23094 unwind
.personality_index
= 0;
23097 /* Space for the personality routine entry. */
23098 if (unwind
.personality_index
== 0)
23100 if (unwind
.opcode_count
> 3)
23101 as_bad (_("too many unwind opcodes for personality routine 0"));
23105 /* All the data is inline in the index table. */
23108 while (unwind
.opcode_count
> 0)
23110 unwind
.opcode_count
--;
23111 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23115 /* Pad with "finish" opcodes. */
23117 data
= (data
<< 8) | 0xb0;
23124 /* We get two opcodes "free" in the first word. */
23125 size
= unwind
.opcode_count
- 2;
23129 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
23130 if (unwind
.personality_index
!= -1)
23132 as_bad (_("attempt to recreate an unwind entry"));
23136 /* An extra byte is required for the opcode count. */
23137 size
= unwind
.opcode_count
+ 1;
23140 size
= (size
+ 3) >> 2;
23142 as_bad (_("too many unwind opcodes"));
23144 frag_align (2, 0, 0);
23145 record_alignment (now_seg
, 2);
23146 unwind
.table_entry
= expr_build_dot ();
23148 /* Allocate the table entry. */
23149 ptr
= frag_more ((size
<< 2) + 4);
23150 /* PR 13449: Zero the table entries in case some of them are not used. */
23151 memset (ptr
, 0, (size
<< 2) + 4);
23152 where
= frag_now_fix () - ((size
<< 2) + 4);
23154 switch (unwind
.personality_index
)
23157 /* ??? Should this be a PLT generating relocation? */
23158 /* Custom personality routine. */
23159 fix_new (frag_now
, where
, 4, unwind
.personality_routine
, 0, 1,
23160 BFD_RELOC_ARM_PREL31
);
23165 /* Set the first byte to the number of additional words. */
23166 data
= size
> 0 ? size
- 1 : 0;
23170 /* ABI defined personality routines. */
23172 /* Three opcodes bytes are packed into the first word. */
23179 /* The size and first two opcode bytes go in the first word. */
23180 data
= ((0x80 + unwind
.personality_index
) << 8) | size
;
23185 /* Should never happen. */
23189 /* Pack the opcodes into words (MSB first), reversing the list at the same
23191 while (unwind
.opcode_count
> 0)
23195 md_number_to_chars (ptr
, data
, 4);
23200 unwind
.opcode_count
--;
23202 data
= (data
<< 8) | unwind
.opcodes
[unwind
.opcode_count
];
23205 /* Finish off the last word. */
23208 /* Pad with "finish" opcodes. */
23210 data
= (data
<< 8) | 0xb0;
23212 md_number_to_chars (ptr
, data
, 4);
23217 /* Add an empty descriptor if there is no user-specified data. */
23218 ptr
= frag_more (4);
23219 md_number_to_chars (ptr
, 0, 4);
23226 /* Initialize the DWARF-2 unwind information for this procedure. */
23229 tc_arm_frame_initial_instructions (void)
23231 cfi_add_CFA_def_cfa (REG_SP
, 0);
23233 #endif /* OBJ_ELF */
23235 /* Convert REGNAME to a DWARF-2 register number. */
23238 tc_arm_regname_to_dw2regnum (char *regname
)
23240 int reg
= arm_reg_parse (®name
, REG_TYPE_RN
);
23244 /* PR 16694: Allow VFP registers as well. */
23245 reg
= arm_reg_parse (®name
, REG_TYPE_VFS
);
23249 reg
= arm_reg_parse (®name
, REG_TYPE_VFD
);
23258 tc_pe_dwarf2_emit_offset (symbolS
*symbol
, unsigned int size
)
23262 exp
.X_op
= O_secrel
;
23263 exp
.X_add_symbol
= symbol
;
23264 exp
.X_add_number
= 0;
23265 emit_expr (&exp
, size
);
23269 /* MD interface: Symbol and relocation handling. */
23271 /* Return the address within the segment that a PC-relative fixup is
23272 relative to. For ARM, PC-relative fixups applied to instructions
23273 are generally relative to the location of the fixup plus 8 bytes.
23274 Thumb branches are offset by 4, and Thumb loads relative to PC
23275 require special handling. */
23278 md_pcrel_from_section (fixS
* fixP
, segT seg
)
23280 offsetT base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23282 /* If this is pc-relative and we are going to emit a relocation
23283 then we just want to put out any pipeline compensation that the linker
23284 will need. Otherwise we want to use the calculated base.
23285 For WinCE we skip the bias for externals as well, since this
23286 is how the MS ARM-CE assembler behaves and we want to be compatible. */
23288 && ((fixP
->fx_addsy
&& S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23289 || (arm_force_relocation (fixP
)
23291 && !S_IS_EXTERNAL (fixP
->fx_addsy
)
23297 switch (fixP
->fx_r_type
)
23299 /* PC relative addressing on the Thumb is slightly odd as the
23300 bottom two bits of the PC are forced to zero for the
23301 calculation. This happens *after* application of the
23302 pipeline offset. However, Thumb adrl already adjusts for
23303 this, so we need not do it again. */
23304 case BFD_RELOC_ARM_THUMB_ADD
:
23307 case BFD_RELOC_ARM_THUMB_OFFSET
:
23308 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
23309 case BFD_RELOC_ARM_T32_ADD_PC12
:
23310 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
23311 return (base
+ 4) & ~3;
23313 /* Thumb branches are simply offset by +4. */
23314 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
23315 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
23316 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
23317 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
23318 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
23319 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
23320 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
23321 case BFD_RELOC_ARM_THUMB_BF17
:
23322 case BFD_RELOC_ARM_THUMB_BF19
:
23323 case BFD_RELOC_ARM_THUMB_BF13
:
23324 case BFD_RELOC_ARM_THUMB_LOOP12
:
23327 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
23329 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23330 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23331 && ARM_IS_FUNC (fixP
->fx_addsy
)
23332 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23333 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23336 /* BLX is like branches above, but forces the low two bits of PC to
23338 case BFD_RELOC_THUMB_PCREL_BLX
:
23340 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23341 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23342 && THUMB_IS_FUNC (fixP
->fx_addsy
)
23343 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23344 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23345 return (base
+ 4) & ~3;
23347 /* ARM mode branches are offset by +8. However, the Windows CE
23348 loader expects the relocation not to take this into account. */
23349 case BFD_RELOC_ARM_PCREL_BLX
:
23351 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23352 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23353 && ARM_IS_FUNC (fixP
->fx_addsy
)
23354 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23355 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23358 case BFD_RELOC_ARM_PCREL_CALL
:
23360 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23361 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
23362 && THUMB_IS_FUNC (fixP
->fx_addsy
)
23363 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
23364 base
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
23367 case BFD_RELOC_ARM_PCREL_BRANCH
:
23368 case BFD_RELOC_ARM_PCREL_JUMP
:
23369 case BFD_RELOC_ARM_PLT32
:
23371 /* When handling fixups immediately, because we have already
23372 discovered the value of a symbol, or the address of the frag involved
23373 we must account for the offset by +8, as the OS loader will never see the reloc.
23374 see fixup_segment() in write.c
23375 The S_IS_EXTERNAL test handles the case of global symbols.
23376 Those need the calculated base, not just the pipe compensation the linker will need. */
23378 && fixP
->fx_addsy
!= NULL
23379 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
23380 && (S_IS_EXTERNAL (fixP
->fx_addsy
) || !arm_force_relocation (fixP
)))
23388 /* ARM mode loads relative to PC are also offset by +8. Unlike
23389 branches, the Windows CE loader *does* expect the relocation
23390 to take this into account. */
23391 case BFD_RELOC_ARM_OFFSET_IMM
:
23392 case BFD_RELOC_ARM_OFFSET_IMM8
:
23393 case BFD_RELOC_ARM_HWLITERAL
:
23394 case BFD_RELOC_ARM_LITERAL
:
23395 case BFD_RELOC_ARM_CP_OFF_IMM
:
23399 /* Other PC-relative relocations are un-offset. */
23405 static bfd_boolean flag_warn_syms
= TRUE
;
23408 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED
, char * name
)
23410 /* PR 18347 - Warn if the user attempts to create a symbol with the same
23411 name as an ARM instruction. Whilst strictly speaking it is allowed, it
23412 does mean that the resulting code might be very confusing to the reader.
23413 Also this warning can be triggered if the user omits an operand before
23414 an immediate address, eg:
23418 GAS treats this as an assignment of the value of the symbol foo to a
23419 symbol LDR, and so (without this code) it will not issue any kind of
23420 warning or error message.
23422 Note - ARM instructions are case-insensitive but the strings in the hash
23423 table are all stored in lower case, so we must first ensure that name is
23425 if (flag_warn_syms
&& arm_ops_hsh
)
23427 char * nbuf
= strdup (name
);
23430 for (p
= nbuf
; *p
; p
++)
23432 if (hash_find (arm_ops_hsh
, nbuf
) != NULL
)
23434 static struct hash_control
* already_warned
= NULL
;
23436 if (already_warned
== NULL
)
23437 already_warned
= hash_new ();
23438 /* Only warn about the symbol once. To keep the code
23439 simple we let hash_insert do the lookup for us. */
23440 if (hash_insert (already_warned
, nbuf
, NULL
) == NULL
)
23441 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name
);
23450 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
23451 Otherwise we have no need to default values of symbols. */
23454 md_undefined_symbol (char * name ATTRIBUTE_UNUSED
)
23457 if (name
[0] == '_' && name
[1] == 'G'
23458 && streq (name
, GLOBAL_OFFSET_TABLE_NAME
))
23462 if (symbol_find (name
))
23463 as_bad (_("GOT already in the symbol table"));
23465 GOT_symbol
= symbol_new (name
, undefined_section
,
23466 (valueT
) 0, & zero_address_frag
);
23476 /* Subroutine of md_apply_fix. Check to see if an immediate can be
23477 computed as two separate immediate values, added together. We
23478 already know that this value cannot be computed by just one ARM
23481 static unsigned int
23482 validate_immediate_twopart (unsigned int val
,
23483 unsigned int * highpart
)
23488 for (i
= 0; i
< 32; i
+= 2)
23489 if (((a
= rotate_left (val
, i
)) & 0xff) != 0)
23495 * highpart
= (a
>> 8) | ((i
+ 24) << 7);
23497 else if (a
& 0xff0000)
23499 if (a
& 0xff000000)
23501 * highpart
= (a
>> 16) | ((i
+ 16) << 7);
23505 gas_assert (a
& 0xff000000);
23506 * highpart
= (a
>> 24) | ((i
+ 8) << 7);
23509 return (a
& 0xff) | (i
<< 7);
23516 validate_offset_imm (unsigned int val
, int hwse
)
23518 if ((hwse
&& val
> 255) || val
> 4095)
23523 /* Subroutine of md_apply_fix. Do those data_ops which can take a
23524 negative immediate constant by altering the instruction. A bit of
23529 by inverting the second operand, and
23532 by negating the second operand. */
23535 negate_data_op (unsigned long * instruction
,
23536 unsigned long value
)
23539 unsigned long negated
, inverted
;
23541 negated
= encode_arm_immediate (-value
);
23542 inverted
= encode_arm_immediate (~value
);
23544 op
= (*instruction
>> DATA_OP_SHIFT
) & 0xf;
23547 /* First negates. */
23548 case OPCODE_SUB
: /* ADD <-> SUB */
23549 new_inst
= OPCODE_ADD
;
23554 new_inst
= OPCODE_SUB
;
23558 case OPCODE_CMP
: /* CMP <-> CMN */
23559 new_inst
= OPCODE_CMN
;
23564 new_inst
= OPCODE_CMP
;
23568 /* Now Inverted ops. */
23569 case OPCODE_MOV
: /* MOV <-> MVN */
23570 new_inst
= OPCODE_MVN
;
23575 new_inst
= OPCODE_MOV
;
23579 case OPCODE_AND
: /* AND <-> BIC */
23580 new_inst
= OPCODE_BIC
;
23585 new_inst
= OPCODE_AND
;
23589 case OPCODE_ADC
: /* ADC <-> SBC */
23590 new_inst
= OPCODE_SBC
;
23595 new_inst
= OPCODE_ADC
;
23599 /* We cannot do anything. */
23604 if (value
== (unsigned) FAIL
)
23607 *instruction
&= OPCODE_MASK
;
23608 *instruction
|= new_inst
<< DATA_OP_SHIFT
;
23612 /* Like negate_data_op, but for Thumb-2. */
23614 static unsigned int
23615 thumb32_negate_data_op (offsetT
*instruction
, unsigned int value
)
23619 unsigned int negated
, inverted
;
23621 negated
= encode_thumb32_immediate (-value
);
23622 inverted
= encode_thumb32_immediate (~value
);
23624 rd
= (*instruction
>> 8) & 0xf;
23625 op
= (*instruction
>> T2_DATA_OP_SHIFT
) & 0xf;
23628 /* ADD <-> SUB. Includes CMP <-> CMN. */
23629 case T2_OPCODE_SUB
:
23630 new_inst
= T2_OPCODE_ADD
;
23634 case T2_OPCODE_ADD
:
23635 new_inst
= T2_OPCODE_SUB
;
23639 /* ORR <-> ORN. Includes MOV <-> MVN. */
23640 case T2_OPCODE_ORR
:
23641 new_inst
= T2_OPCODE_ORN
;
23645 case T2_OPCODE_ORN
:
23646 new_inst
= T2_OPCODE_ORR
;
23650 /* AND <-> BIC. TST has no inverted equivalent. */
23651 case T2_OPCODE_AND
:
23652 new_inst
= T2_OPCODE_BIC
;
23659 case T2_OPCODE_BIC
:
23660 new_inst
= T2_OPCODE_AND
;
23665 case T2_OPCODE_ADC
:
23666 new_inst
= T2_OPCODE_SBC
;
23670 case T2_OPCODE_SBC
:
23671 new_inst
= T2_OPCODE_ADC
;
23675 /* We cannot do anything. */
23680 if (value
== (unsigned int)FAIL
)
23683 *instruction
&= T2_OPCODE_MASK
;
23684 *instruction
|= new_inst
<< T2_DATA_OP_SHIFT
;
23688 /* Read a 32-bit thumb instruction from buf. */
23690 static unsigned long
23691 get_thumb32_insn (char * buf
)
23693 unsigned long insn
;
23694 insn
= md_chars_to_number (buf
, THUMB_SIZE
) << 16;
23695 insn
|= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23700 /* We usually want to set the low bit on the address of thumb function
23701 symbols. In particular .word foo - . should have the low bit set.
23702 Generic code tries to fold the difference of two symbols to
23703 a constant. Prevent this and force a relocation when the first symbols
23704 is a thumb function. */
23707 arm_optimize_expr (expressionS
*l
, operatorT op
, expressionS
*r
)
23709 if (op
== O_subtract
23710 && l
->X_op
== O_symbol
23711 && r
->X_op
== O_symbol
23712 && THUMB_IS_FUNC (l
->X_add_symbol
))
23714 l
->X_op
= O_subtract
;
23715 l
->X_op_symbol
= r
->X_add_symbol
;
23716 l
->X_add_number
-= r
->X_add_number
;
23720 /* Process as normal. */
23724 /* Encode Thumb2 unconditional branches and calls. The encoding
23725 for the 2 are identical for the immediate values. */
23728 encode_thumb2_b_bl_offset (char * buf
, offsetT value
)
23730 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23733 addressT S
, I1
, I2
, lo
, hi
;
23735 S
= (value
>> 24) & 0x01;
23736 I1
= (value
>> 23) & 0x01;
23737 I2
= (value
>> 22) & 0x01;
23738 hi
= (value
>> 12) & 0x3ff;
23739 lo
= (value
>> 1) & 0x7ff;
23740 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
23741 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
23742 newval
|= (S
<< 10) | hi
;
23743 newval2
&= ~T2I1I2MASK
;
23744 newval2
|= (((I1
^ S
) << 13) | ((I2
^ S
) << 11) | lo
) ^ T2I1I2MASK
;
23745 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
23746 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
23750 md_apply_fix (fixS
* fixP
,
23754 offsetT value
= * valP
;
23756 unsigned int newimm
;
23757 unsigned long temp
;
23759 char * buf
= fixP
->fx_where
+ fixP
->fx_frag
->fr_literal
;
23761 gas_assert (fixP
->fx_r_type
<= BFD_RELOC_UNUSED
);
23763 /* Note whether this will delete the relocation. */
23765 if (fixP
->fx_addsy
== 0 && !fixP
->fx_pcrel
)
23768 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23769 consistency with the behaviour on 32-bit hosts. Remember value
23771 value
&= 0xffffffff;
23772 value
^= 0x80000000;
23773 value
-= 0x80000000;
23776 fixP
->fx_addnumber
= value
;
23778 /* Same treatment for fixP->fx_offset. */
23779 fixP
->fx_offset
&= 0xffffffff;
23780 fixP
->fx_offset
^= 0x80000000;
23781 fixP
->fx_offset
-= 0x80000000;
23783 switch (fixP
->fx_r_type
)
23785 case BFD_RELOC_NONE
:
23786 /* This will need to go in the object file. */
23790 case BFD_RELOC_ARM_IMMEDIATE
:
23791 /* We claim that this fixup has been processed here,
23792 even if in fact we generate an error because we do
23793 not have a reloc for it, so tc_gen_reloc will reject it. */
23796 if (fixP
->fx_addsy
)
23798 const char *msg
= 0;
23800 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23801 msg
= _("undefined symbol %s used as an immediate value");
23802 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23803 msg
= _("symbol %s is in a different section");
23804 else if (S_IS_WEAK (fixP
->fx_addsy
))
23805 msg
= _("symbol %s is weak and may be overridden later");
23809 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23810 msg
, S_GET_NAME (fixP
->fx_addsy
));
23815 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23817 /* If the offset is negative, we should use encoding A2 for ADR. */
23818 if ((temp
& 0xfff0000) == 0x28f0000 && value
< 0)
23819 newimm
= negate_data_op (&temp
, value
);
23822 newimm
= encode_arm_immediate (value
);
23824 /* If the instruction will fail, see if we can fix things up by
23825 changing the opcode. */
23826 if (newimm
== (unsigned int) FAIL
)
23827 newimm
= negate_data_op (&temp
, value
);
23828 /* MOV accepts both ARM modified immediate (A1 encoding) and
23829 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23830 When disassembling, MOV is preferred when there is no encoding
23832 if (newimm
== (unsigned int) FAIL
23833 && ((temp
>> DATA_OP_SHIFT
) & 0xf) == OPCODE_MOV
23834 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)
23835 && !((temp
>> SBIT_SHIFT
) & 0x1)
23836 && value
>= 0 && value
<= 0xffff)
23838 /* Clear bits[23:20] to change encoding from A1 to A2. */
23839 temp
&= 0xff0fffff;
23840 /* Encoding high 4bits imm. Code below will encode the remaining
23842 temp
|= (value
& 0x0000f000) << 4;
23843 newimm
= value
& 0x00000fff;
23847 if (newimm
== (unsigned int) FAIL
)
23849 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23850 _("invalid constant (%lx) after fixup"),
23851 (unsigned long) value
);
23855 newimm
|= (temp
& 0xfffff000);
23856 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23859 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
23861 unsigned int highpart
= 0;
23862 unsigned int newinsn
= 0xe1a00000; /* nop. */
23864 if (fixP
->fx_addsy
)
23866 const char *msg
= 0;
23868 if (! S_IS_DEFINED (fixP
->fx_addsy
))
23869 msg
= _("undefined symbol %s used as an immediate value");
23870 else if (S_GET_SEGMENT (fixP
->fx_addsy
) != seg
)
23871 msg
= _("symbol %s is in a different section");
23872 else if (S_IS_WEAK (fixP
->fx_addsy
))
23873 msg
= _("symbol %s is weak and may be overridden later");
23877 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23878 msg
, S_GET_NAME (fixP
->fx_addsy
));
23883 newimm
= encode_arm_immediate (value
);
23884 temp
= md_chars_to_number (buf
, INSN_SIZE
);
23886 /* If the instruction will fail, see if we can fix things up by
23887 changing the opcode. */
23888 if (newimm
== (unsigned int) FAIL
23889 && (newimm
= negate_data_op (& temp
, value
)) == (unsigned int) FAIL
)
23891 /* No ? OK - try using two ADD instructions to generate
23893 newimm
= validate_immediate_twopart (value
, & highpart
);
23895 /* Yes - then make sure that the second instruction is
23897 if (newimm
!= (unsigned int) FAIL
)
23899 /* Still No ? Try using a negated value. */
23900 else if ((newimm
= validate_immediate_twopart (- value
, & highpart
)) != (unsigned int) FAIL
)
23901 temp
= newinsn
= (temp
& OPCODE_MASK
) | OPCODE_SUB
<< DATA_OP_SHIFT
;
23902 /* Otherwise - give up. */
23905 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23906 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23911 /* Replace the first operand in the 2nd instruction (which
23912 is the PC) with the destination register. We have
23913 already added in the PC in the first instruction and we
23914 do not want to do it again. */
23915 newinsn
&= ~ 0xf0000;
23916 newinsn
|= ((newinsn
& 0x0f000) << 4);
23919 newimm
|= (temp
& 0xfffff000);
23920 md_number_to_chars (buf
, (valueT
) newimm
, INSN_SIZE
);
23922 highpart
|= (newinsn
& 0xfffff000);
23923 md_number_to_chars (buf
+ INSN_SIZE
, (valueT
) highpart
, INSN_SIZE
);
23927 case BFD_RELOC_ARM_OFFSET_IMM
:
23928 if (!fixP
->fx_done
&& seg
->use_rela_p
)
23930 /* Fall through. */
23932 case BFD_RELOC_ARM_LITERAL
:
23938 if (validate_offset_imm (value
, 0) == FAIL
)
23940 if (fixP
->fx_r_type
== BFD_RELOC_ARM_LITERAL
)
23941 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23942 _("invalid literal constant: pool needs to be closer"));
23944 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23945 _("bad immediate value for offset (%ld)"),
23950 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23952 newval
&= 0xfffff000;
23955 newval
&= 0xff7ff000;
23956 newval
|= value
| (sign
? INDEX_UP
: 0);
23958 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23961 case BFD_RELOC_ARM_OFFSET_IMM8
:
23962 case BFD_RELOC_ARM_HWLITERAL
:
23968 if (validate_offset_imm (value
, 1) == FAIL
)
23970 if (fixP
->fx_r_type
== BFD_RELOC_ARM_HWLITERAL
)
23971 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23972 _("invalid literal constant: pool needs to be closer"));
23974 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23975 _("bad immediate value for 8-bit offset (%ld)"),
23980 newval
= md_chars_to_number (buf
, INSN_SIZE
);
23982 newval
&= 0xfffff0f0;
23985 newval
&= 0xff7ff0f0;
23986 newval
|= ((value
>> 4) << 8) | (value
& 0xf) | (sign
? INDEX_UP
: 0);
23988 md_number_to_chars (buf
, newval
, INSN_SIZE
);
23991 case BFD_RELOC_ARM_T32_OFFSET_U8
:
23992 if (value
< 0 || value
> 1020 || value
% 4 != 0)
23993 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
23994 _("bad immediate value for offset (%ld)"), (long) value
);
23997 newval
= md_chars_to_number (buf
+2, THUMB_SIZE
);
23999 md_number_to_chars (buf
+2, newval
, THUMB_SIZE
);
24002 case BFD_RELOC_ARM_T32_OFFSET_IMM
:
24003 /* This is a complicated relocation used for all varieties of Thumb32
24004 load/store instruction with immediate offset:
24006 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
24007 *4, optional writeback(W)
24008 (doubleword load/store)
24010 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
24011 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
24012 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
24013 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
24014 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
24016 Uppercase letters indicate bits that are already encoded at
24017 this point. Lowercase letters are our problem. For the
24018 second block of instructions, the secondary opcode nybble
24019 (bits 8..11) is present, and bit 23 is zero, even if this is
24020 a PC-relative operation. */
24021 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24023 newval
|= md_chars_to_number (buf
+THUMB_SIZE
, THUMB_SIZE
);
24025 if ((newval
& 0xf0000000) == 0xe0000000)
24027 /* Doubleword load/store: 8-bit offset, scaled by 4. */
24029 newval
|= (1 << 23);
24032 if (value
% 4 != 0)
24034 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24035 _("offset not a multiple of 4"));
24041 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24042 _("offset out of range"));
24047 else if ((newval
& 0x000f0000) == 0x000f0000)
24049 /* PC-relative, 12-bit offset. */
24051 newval
|= (1 << 23);
24056 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24057 _("offset out of range"));
24062 else if ((newval
& 0x00000100) == 0x00000100)
24064 /* Writeback: 8-bit, +/- offset. */
24066 newval
|= (1 << 9);
24071 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24072 _("offset out of range"));
24077 else if ((newval
& 0x00000f00) == 0x00000e00)
24079 /* T-instruction: positive 8-bit offset. */
24080 if (value
< 0 || value
> 0xff)
24082 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24083 _("offset out of range"));
24091 /* Positive 12-bit or negative 8-bit offset. */
24095 newval
|= (1 << 23);
24105 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24106 _("offset out of range"));
24113 md_number_to_chars (buf
, (newval
>> 16) & 0xffff, THUMB_SIZE
);
24114 md_number_to_chars (buf
+ THUMB_SIZE
, newval
& 0xffff, THUMB_SIZE
);
24117 case BFD_RELOC_ARM_SHIFT_IMM
:
24118 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24119 if (((unsigned long) value
) > 32
24121 && (((newval
& 0x60) == 0) || (newval
& 0x60) == 0x60)))
24123 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24124 _("shift expression is too large"));
24129 /* Shifts of zero must be done as lsl. */
24131 else if (value
== 32)
24133 newval
&= 0xfffff07f;
24134 newval
|= (value
& 0x1f) << 7;
24135 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24138 case BFD_RELOC_ARM_T32_IMMEDIATE
:
24139 case BFD_RELOC_ARM_T32_ADD_IMM
:
24140 case BFD_RELOC_ARM_T32_IMM12
:
24141 case BFD_RELOC_ARM_T32_ADD_PC12
:
24142 /* We claim that this fixup has been processed here,
24143 even if in fact we generate an error because we do
24144 not have a reloc for it, so tc_gen_reloc will reject it. */
24148 && ! S_IS_DEFINED (fixP
->fx_addsy
))
24150 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24151 _("undefined symbol %s used as an immediate value"),
24152 S_GET_NAME (fixP
->fx_addsy
));
24156 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24158 newval
|= md_chars_to_number (buf
+2, THUMB_SIZE
);
24161 if ((fixP
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
24162 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
24163 Thumb2 modified immediate encoding (T2). */
24164 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
))
24165 || fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24167 newimm
= encode_thumb32_immediate (value
);
24168 if (newimm
== (unsigned int) FAIL
)
24169 newimm
= thumb32_negate_data_op (&newval
, value
);
24171 if (newimm
== (unsigned int) FAIL
)
24173 if (fixP
->fx_r_type
!= BFD_RELOC_ARM_T32_IMMEDIATE
)
24175 /* Turn add/sum into addw/subw. */
24176 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
)
24177 newval
= (newval
& 0xfeffffff) | 0x02000000;
24178 /* No flat 12-bit imm encoding for addsw/subsw. */
24179 if ((newval
& 0x00100000) == 0)
24181 /* 12 bit immediate for addw/subw. */
24185 newval
^= 0x00a00000;
24188 newimm
= (unsigned int) FAIL
;
24195 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
24196 UINT16 (T3 encoding), MOVW only accepts UINT16. When
24197 disassembling, MOV is preferred when there is no encoding
24199 if (((newval
>> T2_DATA_OP_SHIFT
) & 0xf) == T2_OPCODE_ORR
24200 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
24201 but with the Rn field [19:16] set to 1111. */
24202 && (((newval
>> 16) & 0xf) == 0xf)
24203 && ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2_v8m
)
24204 && !((newval
>> T2_SBIT_SHIFT
) & 0x1)
24205 && value
>= 0 && value
<= 0xffff)
24207 /* Toggle bit[25] to change encoding from T2 to T3. */
24209 /* Clear bits[19:16]. */
24210 newval
&= 0xfff0ffff;
24211 /* Encoding high 4bits imm. Code below will encode the
24212 remaining low 12bits. */
24213 newval
|= (value
& 0x0000f000) << 4;
24214 newimm
= value
& 0x00000fff;
24219 if (newimm
== (unsigned int)FAIL
)
24221 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24222 _("invalid constant (%lx) after fixup"),
24223 (unsigned long) value
);
24227 newval
|= (newimm
& 0x800) << 15;
24228 newval
|= (newimm
& 0x700) << 4;
24229 newval
|= (newimm
& 0x0ff);
24231 md_number_to_chars (buf
, (valueT
) ((newval
>> 16) & 0xffff), THUMB_SIZE
);
24232 md_number_to_chars (buf
+2, (valueT
) (newval
& 0xffff), THUMB_SIZE
);
24235 case BFD_RELOC_ARM_SMC
:
24236 if (((unsigned long) value
) > 0xffff)
24237 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24238 _("invalid smc expression"));
24239 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24240 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24241 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24244 case BFD_RELOC_ARM_HVC
:
24245 if (((unsigned long) value
) > 0xffff)
24246 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24247 _("invalid hvc expression"));
24248 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24249 newval
|= (value
& 0xf) | ((value
& 0xfff0) << 4);
24250 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24253 case BFD_RELOC_ARM_SWI
:
24254 if (fixP
->tc_fix_data
!= 0)
24256 if (((unsigned long) value
) > 0xff)
24257 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24258 _("invalid swi expression"));
24259 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24261 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24265 if (((unsigned long) value
) > 0x00ffffff)
24266 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24267 _("invalid swi expression"));
24268 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24270 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24274 case BFD_RELOC_ARM_MULTI
:
24275 if (((unsigned long) value
) > 0xffff)
24276 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24277 _("invalid expression in load/store multiple"));
24278 newval
= value
| md_chars_to_number (buf
, INSN_SIZE
);
24279 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24283 case BFD_RELOC_ARM_PCREL_CALL
:
24285 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24287 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24288 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24289 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24290 /* Flip the bl to blx. This is a simple flip
24291 bit here because we generate PCREL_CALL for
24292 unconditional bls. */
24294 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24295 newval
= newval
| 0x10000000;
24296 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24302 goto arm_branch_common
;
24304 case BFD_RELOC_ARM_PCREL_JUMP
:
24305 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24307 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24308 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24309 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24311 /* This would map to a bl<cond>, b<cond>,
24312 b<always> to a Thumb function. We
24313 need to force a relocation for this particular
24315 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24318 /* Fall through. */
24320 case BFD_RELOC_ARM_PLT32
:
24322 case BFD_RELOC_ARM_PCREL_BRANCH
:
24324 goto arm_branch_common
;
24326 case BFD_RELOC_ARM_PCREL_BLX
:
24329 if (ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
24331 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24332 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24333 && ARM_IS_FUNC (fixP
->fx_addsy
))
24335 /* Flip the blx to a bl and warn. */
24336 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
24337 newval
= 0xeb000000;
24338 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
24339 _("blx to '%s' an ARM ISA state function changed to bl"),
24341 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24347 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
24348 fixP
->fx_r_type
= BFD_RELOC_ARM_PCREL_CALL
;
24352 /* We are going to store value (shifted right by two) in the
24353 instruction, in a 24 bit, signed field. Bits 26 through 32 either
24354 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
24357 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24358 _("misaligned branch destination"));
24359 if ((value
& (offsetT
)0xfe000000) != (offsetT
)0
24360 && (value
& (offsetT
)0xfe000000) != (offsetT
)0xfe000000)
24361 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24363 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24365 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24366 newval
|= (value
>> 2) & 0x00ffffff;
24367 /* Set the H bit on BLX instructions. */
24371 newval
|= 0x01000000;
24373 newval
&= ~0x01000000;
24375 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24379 case BFD_RELOC_THUMB_PCREL_BRANCH7
: /* CBZ */
24380 /* CBZ can only branch forward. */
24382 /* Attempts to use CBZ to branch to the next instruction
24383 (which, strictly speaking, are prohibited) will be turned into
24386 FIXME: It may be better to remove the instruction completely and
24387 perform relaxation. */
24390 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24391 newval
= 0xbf00; /* NOP encoding T1 */
24392 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24397 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24399 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24401 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24402 newval
|= ((value
& 0x3e) << 2) | ((value
& 0x40) << 3);
24403 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24408 case BFD_RELOC_THUMB_PCREL_BRANCH9
: /* Conditional branch. */
24409 if ((value
& ~0xff) && ((value
& ~0xff) != ~0xff))
24410 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24412 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24414 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24415 newval
|= (value
& 0x1ff) >> 1;
24416 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24420 case BFD_RELOC_THUMB_PCREL_BRANCH12
: /* Unconditional branch. */
24421 if ((value
& ~0x7ff) && ((value
& ~0x7ff) != ~0x7ff))
24422 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24424 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24426 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24427 newval
|= (value
& 0xfff) >> 1;
24428 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24432 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
24434 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24435 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24436 && ARM_IS_FUNC (fixP
->fx_addsy
)
24437 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24439 /* Force a relocation for a branch 20 bits wide. */
24442 if ((value
& ~0x1fffff) && ((value
& ~0x0fffff) != ~0x0fffff))
24443 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24444 _("conditional branch out of range"));
24446 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24449 addressT S
, J1
, J2
, lo
, hi
;
24451 S
= (value
& 0x00100000) >> 20;
24452 J2
= (value
& 0x00080000) >> 19;
24453 J1
= (value
& 0x00040000) >> 18;
24454 hi
= (value
& 0x0003f000) >> 12;
24455 lo
= (value
& 0x00000ffe) >> 1;
24457 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24458 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24459 newval
|= (S
<< 10) | hi
;
24460 newval2
|= (J1
<< 13) | (J2
<< 11) | lo
;
24461 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24462 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
24466 case BFD_RELOC_THUMB_PCREL_BLX
:
24467 /* If there is a blx from a thumb state function to
24468 another thumb function flip this to a bl and warn
24472 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24473 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24474 && THUMB_IS_FUNC (fixP
->fx_addsy
))
24476 const char *name
= S_GET_NAME (fixP
->fx_addsy
);
24477 as_warn_where (fixP
->fx_file
, fixP
->fx_line
,
24478 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
24480 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24481 newval
= newval
| 0x1000;
24482 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
24483 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24488 goto thumb_bl_common
;
24490 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
24491 /* A bl from Thumb state ISA to an internal ARM state function
24492 is converted to a blx. */
24494 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
24495 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
24496 && ARM_IS_FUNC (fixP
->fx_addsy
)
24497 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
))
24499 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
24500 newval
= newval
& ~0x1000;
24501 md_number_to_chars (buf
+THUMB_SIZE
, newval
, THUMB_SIZE
);
24502 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BLX
;
24508 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
24509 /* For a BLX instruction, make sure that the relocation is rounded up
24510 to a word boundary. This follows the semantics of the instruction
24511 which specifies that bit 1 of the target address will come from bit
24512 1 of the base address. */
24513 value
= (value
+ 3) & ~ 3;
24516 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
24517 && fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BLX
)
24518 fixP
->fx_r_type
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
24521 if ((value
& ~0x3fffff) && ((value
& ~0x3fffff) != ~0x3fffff))
24523 if (!(ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v6t2
)))
24524 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24525 else if ((value
& ~0x1ffffff)
24526 && ((value
& ~0x1ffffff) != ~0x1ffffff))
24527 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24528 _("Thumb2 branch out of range"));
24531 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24532 encode_thumb2_b_bl_offset (buf
, value
);
24536 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
24537 if ((value
& ~0x0ffffff) && ((value
& ~0x0ffffff) != ~0x0ffffff))
24538 as_bad_where (fixP
->fx_file
, fixP
->fx_line
, BAD_RANGE
);
24540 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24541 encode_thumb2_b_bl_offset (buf
, value
);
24546 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24551 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24552 md_number_to_chars (buf
, value
, 2);
24556 case BFD_RELOC_ARM_TLS_CALL
:
24557 case BFD_RELOC_ARM_THM_TLS_CALL
:
24558 case BFD_RELOC_ARM_TLS_DESCSEQ
:
24559 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
24560 case BFD_RELOC_ARM_TLS_GOTDESC
:
24561 case BFD_RELOC_ARM_TLS_GD32
:
24562 case BFD_RELOC_ARM_TLS_LE32
:
24563 case BFD_RELOC_ARM_TLS_IE32
:
24564 case BFD_RELOC_ARM_TLS_LDM32
:
24565 case BFD_RELOC_ARM_TLS_LDO32
:
24566 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
24569 /* Same handling as above, but with the arm_fdpic guard. */
24570 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
24571 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
24572 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
24575 S_SET_THREAD_LOCAL (fixP
->fx_addsy
);
24579 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24580 _("Relocation supported only in FDPIC mode"));
24584 case BFD_RELOC_ARM_GOT32
:
24585 case BFD_RELOC_ARM_GOTOFF
:
24588 case BFD_RELOC_ARM_GOT_PREL
:
24589 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24590 md_number_to_chars (buf
, value
, 4);
24593 case BFD_RELOC_ARM_TARGET2
:
24594 /* TARGET2 is not partial-inplace, so we need to write the
24595 addend here for REL targets, because it won't be written out
24596 during reloc processing later. */
24597 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24598 md_number_to_chars (buf
, fixP
->fx_offset
, 4);
24601 /* Relocations for FDPIC. */
24602 case BFD_RELOC_ARM_GOTFUNCDESC
:
24603 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
24604 case BFD_RELOC_ARM_FUNCDESC
:
24607 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24608 md_number_to_chars (buf
, 0, 4);
24612 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24613 _("Relocation supported only in FDPIC mode"));
24618 case BFD_RELOC_RVA
:
24620 case BFD_RELOC_ARM_TARGET1
:
24621 case BFD_RELOC_ARM_ROSEGREL32
:
24622 case BFD_RELOC_ARM_SBREL32
:
24623 case BFD_RELOC_32_PCREL
:
24625 case BFD_RELOC_32_SECREL
:
24627 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24629 /* For WinCE we only do this for pcrel fixups. */
24630 if (fixP
->fx_done
|| fixP
->fx_pcrel
)
24632 md_number_to_chars (buf
, value
, 4);
24636 case BFD_RELOC_ARM_PREL31
:
24637 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24639 newval
= md_chars_to_number (buf
, 4) & 0x80000000;
24640 if ((value
^ (value
>> 1)) & 0x40000000)
24642 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24643 _("rel31 relocation overflow"));
24645 newval
|= value
& 0x7fffffff;
24646 md_number_to_chars (buf
, newval
, 4);
24651 case BFD_RELOC_ARM_CP_OFF_IMM
:
24652 case BFD_RELOC_ARM_T32_CP_OFF_IMM
:
24653 case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
:
24654 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
)
24655 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24657 newval
= get_thumb32_insn (buf
);
24658 if ((newval
& 0x0f200f00) == 0x0d000900)
24660 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24661 has permitted values that are multiples of 2, in the range 0
24663 if (value
< -510 || value
> 510 || (value
& 1))
24664 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24665 _("co-processor offset out of range"));
24667 else if ((newval
& 0xfe001f80) == 0xec000f80)
24669 if (value
< -511 || value
> 512 || (value
& 3))
24670 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24671 _("co-processor offset out of range"));
24673 else if (value
< -1023 || value
> 1023 || (value
& 3))
24674 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24675 _("co-processor offset out of range"));
24680 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24681 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
24682 newval
= md_chars_to_number (buf
, INSN_SIZE
);
24684 newval
= get_thumb32_insn (buf
);
24687 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
24688 newval
&= 0xffffff80;
24690 newval
&= 0xffffff00;
24694 if (fixP
->fx_r_type
== BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM
)
24695 newval
&= 0xff7fff80;
24697 newval
&= 0xff7fff00;
24698 if ((newval
& 0x0f200f00) == 0x0d000900)
24700 /* This is a fp16 vstr/vldr.
24702 It requires the immediate offset in the instruction is shifted
24703 left by 1 to be a half-word offset.
24705 Here, left shift by 1 first, and later right shift by 2
24706 should get the right offset. */
24709 newval
|= (value
>> 2) | (sign
? INDEX_UP
: 0);
24711 if (fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
24712 || fixP
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
)
24713 md_number_to_chars (buf
, newval
, INSN_SIZE
);
24715 put_thumb32_insn (buf
, newval
);
24718 case BFD_RELOC_ARM_CP_OFF_IMM_S2
:
24719 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
:
24720 if (value
< -255 || value
> 255)
24721 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24722 _("co-processor offset out of range"));
24724 goto cp_off_common
;
24726 case BFD_RELOC_ARM_THUMB_OFFSET
:
24727 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24728 /* Exactly what ranges, and where the offset is inserted depends
24729 on the type of instruction, we can establish this from the
24731 switch (newval
>> 12)
24733 case 4: /* PC load. */
24734 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24735 forced to zero for these loads; md_pcrel_from has already
24736 compensated for this. */
24738 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24739 _("invalid offset, target not word aligned (0x%08lX)"),
24740 (((unsigned long) fixP
->fx_frag
->fr_address
24741 + (unsigned long) fixP
->fx_where
) & ~3)
24742 + (unsigned long) value
);
24744 if (value
& ~0x3fc)
24745 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24746 _("invalid offset, value too big (0x%08lX)"),
24749 newval
|= value
>> 2;
24752 case 9: /* SP load/store. */
24753 if (value
& ~0x3fc)
24754 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24755 _("invalid offset, value too big (0x%08lX)"),
24757 newval
|= value
>> 2;
24760 case 6: /* Word load/store. */
24762 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24763 _("invalid offset, value too big (0x%08lX)"),
24765 newval
|= value
<< 4; /* 6 - 2. */
24768 case 7: /* Byte load/store. */
24770 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24771 _("invalid offset, value too big (0x%08lX)"),
24773 newval
|= value
<< 6;
24776 case 8: /* Halfword load/store. */
24778 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24779 _("invalid offset, value too big (0x%08lX)"),
24781 newval
|= value
<< 5; /* 6 - 1. */
24785 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24786 "Unable to process relocation for thumb opcode: %lx",
24787 (unsigned long) newval
);
24790 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24793 case BFD_RELOC_ARM_THUMB_ADD
:
24794 /* This is a complicated relocation, since we use it for all of
24795 the following immediate relocations:
24799 9bit ADD/SUB SP word-aligned
24800 10bit ADD PC/SP word-aligned
24802 The type of instruction being processed is encoded in the
24809 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24811 int rd
= (newval
>> 4) & 0xf;
24812 int rs
= newval
& 0xf;
24813 int subtract
= !!(newval
& 0x8000);
24815 /* Check for HI regs, only very restricted cases allowed:
24816 Adjusting SP, and using PC or SP to get an address. */
24817 if ((rd
> 7 && (rd
!= REG_SP
|| rs
!= REG_SP
))
24818 || (rs
> 7 && rs
!= REG_SP
&& rs
!= REG_PC
))
24819 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24820 _("invalid Hi register with immediate"));
24822 /* If value is negative, choose the opposite instruction. */
24826 subtract
= !subtract
;
24828 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24829 _("immediate value out of range"));
24834 if (value
& ~0x1fc)
24835 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24836 _("invalid immediate for stack address calculation"));
24837 newval
= subtract
? T_OPCODE_SUB_ST
: T_OPCODE_ADD_ST
;
24838 newval
|= value
>> 2;
24840 else if (rs
== REG_PC
|| rs
== REG_SP
)
24842 /* PR gas/18541. If the addition is for a defined symbol
24843 within range of an ADR instruction then accept it. */
24846 && fixP
->fx_addsy
!= NULL
)
24850 if (! S_IS_DEFINED (fixP
->fx_addsy
)
24851 || S_GET_SEGMENT (fixP
->fx_addsy
) != seg
24852 || S_IS_WEAK (fixP
->fx_addsy
))
24854 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24855 _("address calculation needs a strongly defined nearby symbol"));
24859 offsetT v
= fixP
->fx_where
+ fixP
->fx_frag
->fr_address
;
24861 /* Round up to the next 4-byte boundary. */
24866 v
= S_GET_VALUE (fixP
->fx_addsy
) - v
;
24870 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24871 _("symbol too far away"));
24881 if (subtract
|| value
& ~0x3fc)
24882 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24883 _("invalid immediate for address calculation (value = 0x%08lX)"),
24884 (unsigned long) (subtract
? - value
: value
));
24885 newval
= (rs
== REG_PC
? T_OPCODE_ADD_PC
: T_OPCODE_ADD_SP
);
24887 newval
|= value
>> 2;
24892 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24893 _("immediate value out of range"));
24894 newval
= subtract
? T_OPCODE_SUB_I8
: T_OPCODE_ADD_I8
;
24895 newval
|= (rd
<< 8) | value
;
24900 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24901 _("immediate value out of range"));
24902 newval
= subtract
? T_OPCODE_SUB_I3
: T_OPCODE_ADD_I3
;
24903 newval
|= rd
| (rs
<< 3) | (value
<< 6);
24906 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24909 case BFD_RELOC_ARM_THUMB_IMM
:
24910 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
24911 if (value
< 0 || value
> 255)
24912 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24913 _("invalid immediate: %ld is out of range"),
24916 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24919 case BFD_RELOC_ARM_THUMB_SHIFT
:
24920 /* 5bit shift value (0..32). LSL cannot take 32. */
24921 newval
= md_chars_to_number (buf
, THUMB_SIZE
) & 0xf83f;
24922 temp
= newval
& 0xf800;
24923 if (value
< 0 || value
> 32 || (value
== 32 && temp
== T_OPCODE_LSL_I
))
24924 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24925 _("invalid shift value: %ld"), (long) value
);
24926 /* Shifts of zero must be encoded as LSL. */
24928 newval
= (newval
& 0x003f) | T_OPCODE_LSL_I
;
24929 /* Shifts of 32 are encoded as zero. */
24930 else if (value
== 32)
24932 newval
|= value
<< 6;
24933 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
24936 case BFD_RELOC_VTABLE_INHERIT
:
24937 case BFD_RELOC_VTABLE_ENTRY
:
24941 case BFD_RELOC_ARM_MOVW
:
24942 case BFD_RELOC_ARM_MOVT
:
24943 case BFD_RELOC_ARM_THUMB_MOVW
:
24944 case BFD_RELOC_ARM_THUMB_MOVT
:
24945 if (fixP
->fx_done
|| !seg
->use_rela_p
)
24947 /* REL format relocations are limited to a 16-bit addend. */
24948 if (!fixP
->fx_done
)
24950 if (value
< -0x8000 || value
> 0x7fff)
24951 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24952 _("offset out of range"));
24954 else if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
24955 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24960 if (fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
24961 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
)
24963 newval
= get_thumb32_insn (buf
);
24964 newval
&= 0xfbf08f00;
24965 newval
|= (value
& 0xf000) << 4;
24966 newval
|= (value
& 0x0800) << 15;
24967 newval
|= (value
& 0x0700) << 4;
24968 newval
|= (value
& 0x00ff);
24969 put_thumb32_insn (buf
, newval
);
24973 newval
= md_chars_to_number (buf
, 4);
24974 newval
&= 0xfff0f000;
24975 newval
|= value
& 0x0fff;
24976 newval
|= (value
& 0xf000) << 4;
24977 md_number_to_chars (buf
, newval
, 4);
24982 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
24983 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
24984 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
24985 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
24986 gas_assert (!fixP
->fx_done
);
24989 bfd_boolean is_mov
;
24990 bfd_vma encoded_addend
= value
;
24992 /* Check that addend can be encoded in instruction. */
24993 if (!seg
->use_rela_p
&& (value
< 0 || value
> 255))
24994 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
24995 _("the offset 0x%08lX is not representable"),
24996 (unsigned long) encoded_addend
);
24998 /* Extract the instruction. */
24999 insn
= md_chars_to_number (buf
, THUMB_SIZE
);
25000 is_mov
= (insn
& 0xf800) == 0x2000;
25005 if (!seg
->use_rela_p
)
25006 insn
|= encoded_addend
;
25012 /* Extract the instruction. */
25013 /* Encoding is the following
25018 /* The following conditions must be true :
25023 rd
= (insn
>> 4) & 0xf;
25025 if ((insn
& 0x8000) || (rd
!= rs
) || rd
> 7)
25026 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25027 _("Unable to process relocation for thumb opcode: %lx"),
25028 (unsigned long) insn
);
25030 /* Encode as ADD immediate8 thumb 1 code. */
25031 insn
= 0x3000 | (rd
<< 8);
25033 /* Place the encoded addend into the first 8 bits of the
25035 if (!seg
->use_rela_p
)
25036 insn
|= encoded_addend
;
25039 /* Update the instruction. */
25040 md_number_to_chars (buf
, insn
, THUMB_SIZE
);
25044 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25045 case BFD_RELOC_ARM_ALU_PC_G0
:
25046 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25047 case BFD_RELOC_ARM_ALU_PC_G1
:
25048 case BFD_RELOC_ARM_ALU_PC_G2
:
25049 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25050 case BFD_RELOC_ARM_ALU_SB_G0
:
25051 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25052 case BFD_RELOC_ARM_ALU_SB_G1
:
25053 case BFD_RELOC_ARM_ALU_SB_G2
:
25054 gas_assert (!fixP
->fx_done
);
25055 if (!seg
->use_rela_p
)
25058 bfd_vma encoded_addend
;
25059 bfd_vma addend_abs
= llabs (value
);
25061 /* Check that the absolute value of the addend can be
25062 expressed as an 8-bit constant plus a rotation. */
25063 encoded_addend
= encode_arm_immediate (addend_abs
);
25064 if (encoded_addend
== (unsigned int) FAIL
)
25065 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25066 _("the offset 0x%08lX is not representable"),
25067 (unsigned long) addend_abs
);
25069 /* Extract the instruction. */
25070 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25072 /* If the addend is positive, use an ADD instruction.
25073 Otherwise use a SUB. Take care not to destroy the S bit. */
25074 insn
&= 0xff1fffff;
25080 /* Place the encoded addend into the first 12 bits of the
25082 insn
&= 0xfffff000;
25083 insn
|= encoded_addend
;
25085 /* Update the instruction. */
25086 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25090 case BFD_RELOC_ARM_LDR_PC_G0
:
25091 case BFD_RELOC_ARM_LDR_PC_G1
:
25092 case BFD_RELOC_ARM_LDR_PC_G2
:
25093 case BFD_RELOC_ARM_LDR_SB_G0
:
25094 case BFD_RELOC_ARM_LDR_SB_G1
:
25095 case BFD_RELOC_ARM_LDR_SB_G2
:
25096 gas_assert (!fixP
->fx_done
);
25097 if (!seg
->use_rela_p
)
25100 bfd_vma addend_abs
= llabs (value
);
25102 /* Check that the absolute value of the addend can be
25103 encoded in 12 bits. */
25104 if (addend_abs
>= 0x1000)
25105 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25106 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
25107 (unsigned long) addend_abs
);
25109 /* Extract the instruction. */
25110 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25112 /* If the addend is negative, clear bit 23 of the instruction.
25113 Otherwise set it. */
25115 insn
&= ~(1 << 23);
25119 /* Place the absolute value of the addend into the first 12 bits
25120 of the instruction. */
25121 insn
&= 0xfffff000;
25122 insn
|= addend_abs
;
25124 /* Update the instruction. */
25125 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25129 case BFD_RELOC_ARM_LDRS_PC_G0
:
25130 case BFD_RELOC_ARM_LDRS_PC_G1
:
25131 case BFD_RELOC_ARM_LDRS_PC_G2
:
25132 case BFD_RELOC_ARM_LDRS_SB_G0
:
25133 case BFD_RELOC_ARM_LDRS_SB_G1
:
25134 case BFD_RELOC_ARM_LDRS_SB_G2
:
25135 gas_assert (!fixP
->fx_done
);
25136 if (!seg
->use_rela_p
)
25139 bfd_vma addend_abs
= llabs (value
);
25141 /* Check that the absolute value of the addend can be
25142 encoded in 8 bits. */
25143 if (addend_abs
>= 0x100)
25144 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25145 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
25146 (unsigned long) addend_abs
);
25148 /* Extract the instruction. */
25149 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25151 /* If the addend is negative, clear bit 23 of the instruction.
25152 Otherwise set it. */
25154 insn
&= ~(1 << 23);
25158 /* Place the first four bits of the absolute value of the addend
25159 into the first 4 bits of the instruction, and the remaining
25160 four into bits 8 .. 11. */
25161 insn
&= 0xfffff0f0;
25162 insn
|= (addend_abs
& 0xf) | ((addend_abs
& 0xf0) << 4);
25164 /* Update the instruction. */
25165 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25169 case BFD_RELOC_ARM_LDC_PC_G0
:
25170 case BFD_RELOC_ARM_LDC_PC_G1
:
25171 case BFD_RELOC_ARM_LDC_PC_G2
:
25172 case BFD_RELOC_ARM_LDC_SB_G0
:
25173 case BFD_RELOC_ARM_LDC_SB_G1
:
25174 case BFD_RELOC_ARM_LDC_SB_G2
:
25175 gas_assert (!fixP
->fx_done
);
25176 if (!seg
->use_rela_p
)
25179 bfd_vma addend_abs
= llabs (value
);
25181 /* Check that the absolute value of the addend is a multiple of
25182 four and, when divided by four, fits in 8 bits. */
25183 if (addend_abs
& 0x3)
25184 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25185 _("bad offset 0x%08lX (must be word-aligned)"),
25186 (unsigned long) addend_abs
);
25188 if ((addend_abs
>> 2) > 0xff)
25189 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25190 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
25191 (unsigned long) addend_abs
);
25193 /* Extract the instruction. */
25194 insn
= md_chars_to_number (buf
, INSN_SIZE
);
25196 /* If the addend is negative, clear bit 23 of the instruction.
25197 Otherwise set it. */
25199 insn
&= ~(1 << 23);
25203 /* Place the addend (divided by four) into the first eight
25204 bits of the instruction. */
25205 insn
&= 0xfffffff0;
25206 insn
|= addend_abs
>> 2;
25208 /* Update the instruction. */
25209 md_number_to_chars (buf
, insn
, INSN_SIZE
);
25213 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25215 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25216 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25217 && ARM_IS_FUNC (fixP
->fx_addsy
)
25218 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25220 /* Force a relocation for a branch 5 bits wide. */
25223 if (v8_1_branch_value_check (value
, 5, FALSE
) == FAIL
)
25224 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25227 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25229 addressT boff
= value
>> 1;
25231 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25232 newval
|= (boff
<< 7);
25233 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25237 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25239 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25240 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25241 && ARM_IS_FUNC (fixP
->fx_addsy
)
25242 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25246 if ((value
& ~0x7f) && ((value
& ~0x3f) != ~0x3f))
25247 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25248 _("branch out of range"));
25250 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25252 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25254 addressT boff
= ((newval
& 0x0780) >> 7) << 1;
25255 addressT diff
= value
- boff
;
25259 newval
|= 1 << 1; /* T bit. */
25261 else if (diff
!= 2)
25263 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25264 _("out of range label-relative fixup value"));
25266 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25270 case BFD_RELOC_ARM_THUMB_BF17
:
25272 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25273 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25274 && ARM_IS_FUNC (fixP
->fx_addsy
)
25275 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25277 /* Force a relocation for a branch 17 bits wide. */
25281 if (v8_1_branch_value_check (value
, 17, TRUE
) == FAIL
)
25282 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25285 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25288 addressT immA
, immB
, immC
;
25290 immA
= (value
& 0x0001f000) >> 12;
25291 immB
= (value
& 0x00000ffc) >> 2;
25292 immC
= (value
& 0x00000002) >> 1;
25294 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25295 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25297 newval2
|= (immC
<< 11) | (immB
<< 1);
25298 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25299 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25303 case BFD_RELOC_ARM_THUMB_BF19
:
25305 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25306 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25307 && ARM_IS_FUNC (fixP
->fx_addsy
)
25308 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25310 /* Force a relocation for a branch 19 bits wide. */
25314 if (v8_1_branch_value_check (value
, 19, TRUE
) == FAIL
)
25315 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25318 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25321 addressT immA
, immB
, immC
;
25323 immA
= (value
& 0x0007f000) >> 12;
25324 immB
= (value
& 0x00000ffc) >> 2;
25325 immC
= (value
& 0x00000002) >> 1;
25327 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25328 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25330 newval2
|= (immC
<< 11) | (immB
<< 1);
25331 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25332 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25336 case BFD_RELOC_ARM_THUMB_BF13
:
25338 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25339 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25340 && ARM_IS_FUNC (fixP
->fx_addsy
)
25341 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25343 /* Force a relocation for a branch 13 bits wide. */
25347 if (v8_1_branch_value_check (value
, 13, TRUE
) == FAIL
)
25348 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25351 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25354 addressT immA
, immB
, immC
;
25356 immA
= (value
& 0x00001000) >> 12;
25357 immB
= (value
& 0x00000ffc) >> 2;
25358 immC
= (value
& 0x00000002) >> 1;
25360 newval
= md_chars_to_number (buf
, THUMB_SIZE
);
25361 newval2
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25363 newval2
|= (immC
<< 11) | (immB
<< 1);
25364 md_number_to_chars (buf
, newval
, THUMB_SIZE
);
25365 md_number_to_chars (buf
+ THUMB_SIZE
, newval2
, THUMB_SIZE
);
25369 case BFD_RELOC_ARM_THUMB_LOOP12
:
25371 && (S_GET_SEGMENT (fixP
->fx_addsy
) == seg
)
25372 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
)
25373 && ARM_IS_FUNC (fixP
->fx_addsy
)
25374 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v8_1m_main
))
25376 /* Force a relocation for a branch 12 bits wide. */
25380 bfd_vma insn
= get_thumb32_insn (buf
);
25381 /* le lr, <label> or le <label> */
25382 if (((insn
& 0xffffffff) == 0xf00fc001)
25383 || ((insn
& 0xffffffff) == 0xf02fc001))
25386 if (v8_1_branch_value_check (value
, 12, FALSE
) == FAIL
)
25387 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25389 if (fixP
->fx_done
|| !seg
->use_rela_p
)
25391 addressT imml
, immh
;
25393 immh
= (value
& 0x00000ffc) >> 2;
25394 imml
= (value
& 0x00000002) >> 1;
25396 newval
= md_chars_to_number (buf
+ THUMB_SIZE
, THUMB_SIZE
);
25397 newval
|= (imml
<< 11) | (immh
<< 1);
25398 md_number_to_chars (buf
+ THUMB_SIZE
, newval
, THUMB_SIZE
);
25402 case BFD_RELOC_ARM_V4BX
:
25403 /* This will need to go in the object file. */
25407 case BFD_RELOC_UNUSED
:
25409 as_bad_where (fixP
->fx_file
, fixP
->fx_line
,
25410 _("bad relocation fixup type (%d)"), fixP
->fx_r_type
);
25414 /* Translate internal representation of relocation info to BFD target
25418 tc_gen_reloc (asection
*section
, fixS
*fixp
)
25421 bfd_reloc_code_real_type code
;
25423 reloc
= XNEW (arelent
);
25425 reloc
->sym_ptr_ptr
= XNEW (asymbol
*);
25426 *reloc
->sym_ptr_ptr
= symbol_get_bfdsym (fixp
->fx_addsy
);
25427 reloc
->address
= fixp
->fx_frag
->fr_address
+ fixp
->fx_where
;
25429 if (fixp
->fx_pcrel
)
25431 if (section
->use_rela_p
)
25432 fixp
->fx_offset
-= md_pcrel_from_section (fixp
, section
);
25434 fixp
->fx_offset
= reloc
->address
;
25436 reloc
->addend
= fixp
->fx_offset
;
25438 switch (fixp
->fx_r_type
)
25441 if (fixp
->fx_pcrel
)
25443 code
= BFD_RELOC_8_PCREL
;
25446 /* Fall through. */
25449 if (fixp
->fx_pcrel
)
25451 code
= BFD_RELOC_16_PCREL
;
25454 /* Fall through. */
25457 if (fixp
->fx_pcrel
)
25459 code
= BFD_RELOC_32_PCREL
;
25462 /* Fall through. */
25464 case BFD_RELOC_ARM_MOVW
:
25465 if (fixp
->fx_pcrel
)
25467 code
= BFD_RELOC_ARM_MOVW_PCREL
;
25470 /* Fall through. */
25472 case BFD_RELOC_ARM_MOVT
:
25473 if (fixp
->fx_pcrel
)
25475 code
= BFD_RELOC_ARM_MOVT_PCREL
;
25478 /* Fall through. */
25480 case BFD_RELOC_ARM_THUMB_MOVW
:
25481 if (fixp
->fx_pcrel
)
25483 code
= BFD_RELOC_ARM_THUMB_MOVW_PCREL
;
25486 /* Fall through. */
25488 case BFD_RELOC_ARM_THUMB_MOVT
:
25489 if (fixp
->fx_pcrel
)
25491 code
= BFD_RELOC_ARM_THUMB_MOVT_PCREL
;
25494 /* Fall through. */
25496 case BFD_RELOC_NONE
:
25497 case BFD_RELOC_ARM_PCREL_BRANCH
:
25498 case BFD_RELOC_ARM_PCREL_BLX
:
25499 case BFD_RELOC_RVA
:
25500 case BFD_RELOC_THUMB_PCREL_BRANCH7
:
25501 case BFD_RELOC_THUMB_PCREL_BRANCH9
:
25502 case BFD_RELOC_THUMB_PCREL_BRANCH12
:
25503 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25504 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25505 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25506 case BFD_RELOC_VTABLE_ENTRY
:
25507 case BFD_RELOC_VTABLE_INHERIT
:
25509 case BFD_RELOC_32_SECREL
:
25511 code
= fixp
->fx_r_type
;
25514 case BFD_RELOC_THUMB_PCREL_BLX
:
25516 if (EF_ARM_EABI_VERSION (meabi_flags
) >= EF_ARM_EABI_VER4
)
25517 code
= BFD_RELOC_THUMB_PCREL_BRANCH23
;
25520 code
= BFD_RELOC_THUMB_PCREL_BLX
;
25523 case BFD_RELOC_ARM_LITERAL
:
25524 case BFD_RELOC_ARM_HWLITERAL
:
25525 /* If this is called then the a literal has
25526 been referenced across a section boundary. */
25527 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25528 _("literal referenced across section boundary"));
25532 case BFD_RELOC_ARM_TLS_CALL
:
25533 case BFD_RELOC_ARM_THM_TLS_CALL
:
25534 case BFD_RELOC_ARM_TLS_DESCSEQ
:
25535 case BFD_RELOC_ARM_THM_TLS_DESCSEQ
:
25536 case BFD_RELOC_ARM_GOT32
:
25537 case BFD_RELOC_ARM_GOTOFF
:
25538 case BFD_RELOC_ARM_GOT_PREL
:
25539 case BFD_RELOC_ARM_PLT32
:
25540 case BFD_RELOC_ARM_TARGET1
:
25541 case BFD_RELOC_ARM_ROSEGREL32
:
25542 case BFD_RELOC_ARM_SBREL32
:
25543 case BFD_RELOC_ARM_PREL31
:
25544 case BFD_RELOC_ARM_TARGET2
:
25545 case BFD_RELOC_ARM_TLS_LDO32
:
25546 case BFD_RELOC_ARM_PCREL_CALL
:
25547 case BFD_RELOC_ARM_PCREL_JUMP
:
25548 case BFD_RELOC_ARM_ALU_PC_G0_NC
:
25549 case BFD_RELOC_ARM_ALU_PC_G0
:
25550 case BFD_RELOC_ARM_ALU_PC_G1_NC
:
25551 case BFD_RELOC_ARM_ALU_PC_G1
:
25552 case BFD_RELOC_ARM_ALU_PC_G2
:
25553 case BFD_RELOC_ARM_LDR_PC_G0
:
25554 case BFD_RELOC_ARM_LDR_PC_G1
:
25555 case BFD_RELOC_ARM_LDR_PC_G2
:
25556 case BFD_RELOC_ARM_LDRS_PC_G0
:
25557 case BFD_RELOC_ARM_LDRS_PC_G1
:
25558 case BFD_RELOC_ARM_LDRS_PC_G2
:
25559 case BFD_RELOC_ARM_LDC_PC_G0
:
25560 case BFD_RELOC_ARM_LDC_PC_G1
:
25561 case BFD_RELOC_ARM_LDC_PC_G2
:
25562 case BFD_RELOC_ARM_ALU_SB_G0_NC
:
25563 case BFD_RELOC_ARM_ALU_SB_G0
:
25564 case BFD_RELOC_ARM_ALU_SB_G1_NC
:
25565 case BFD_RELOC_ARM_ALU_SB_G1
:
25566 case BFD_RELOC_ARM_ALU_SB_G2
:
25567 case BFD_RELOC_ARM_LDR_SB_G0
:
25568 case BFD_RELOC_ARM_LDR_SB_G1
:
25569 case BFD_RELOC_ARM_LDR_SB_G2
:
25570 case BFD_RELOC_ARM_LDRS_SB_G0
:
25571 case BFD_RELOC_ARM_LDRS_SB_G1
:
25572 case BFD_RELOC_ARM_LDRS_SB_G2
:
25573 case BFD_RELOC_ARM_LDC_SB_G0
:
25574 case BFD_RELOC_ARM_LDC_SB_G1
:
25575 case BFD_RELOC_ARM_LDC_SB_G2
:
25576 case BFD_RELOC_ARM_V4BX
:
25577 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
:
25578 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC
:
25579 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC
:
25580 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
:
25581 case BFD_RELOC_ARM_GOTFUNCDESC
:
25582 case BFD_RELOC_ARM_GOTOFFFUNCDESC
:
25583 case BFD_RELOC_ARM_FUNCDESC
:
25584 case BFD_RELOC_ARM_THUMB_BF17
:
25585 case BFD_RELOC_ARM_THUMB_BF19
:
25586 case BFD_RELOC_ARM_THUMB_BF13
:
25587 code
= fixp
->fx_r_type
;
25590 case BFD_RELOC_ARM_TLS_GOTDESC
:
25591 case BFD_RELOC_ARM_TLS_GD32
:
25592 case BFD_RELOC_ARM_TLS_GD32_FDPIC
:
25593 case BFD_RELOC_ARM_TLS_LE32
:
25594 case BFD_RELOC_ARM_TLS_IE32
:
25595 case BFD_RELOC_ARM_TLS_IE32_FDPIC
:
25596 case BFD_RELOC_ARM_TLS_LDM32
:
25597 case BFD_RELOC_ARM_TLS_LDM32_FDPIC
:
25598 /* BFD will include the symbol's address in the addend.
25599 But we don't want that, so subtract it out again here. */
25600 if (!S_IS_COMMON (fixp
->fx_addsy
))
25601 reloc
->addend
-= (*reloc
->sym_ptr_ptr
)->value
;
25602 code
= fixp
->fx_r_type
;
25606 case BFD_RELOC_ARM_IMMEDIATE
:
25607 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25608 _("internal relocation (type: IMMEDIATE) not fixed up"));
25611 case BFD_RELOC_ARM_ADRL_IMMEDIATE
:
25612 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25613 _("ADRL used for a symbol not defined in the same file"));
25616 case BFD_RELOC_THUMB_PCREL_BRANCH5
:
25617 case BFD_RELOC_THUMB_PCREL_BFCSEL
:
25618 case BFD_RELOC_ARM_THUMB_LOOP12
:
25619 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25620 _("%s used for a symbol not defined in the same file"),
25621 bfd_get_reloc_code_name (fixp
->fx_r_type
));
25624 case BFD_RELOC_ARM_OFFSET_IMM
:
25625 if (section
->use_rela_p
)
25627 code
= fixp
->fx_r_type
;
25631 if (fixp
->fx_addsy
!= NULL
25632 && !S_IS_DEFINED (fixp
->fx_addsy
)
25633 && S_IS_LOCAL (fixp
->fx_addsy
))
25635 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25636 _("undefined local label `%s'"),
25637 S_GET_NAME (fixp
->fx_addsy
));
25641 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25642 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
25649 switch (fixp
->fx_r_type
)
25651 case BFD_RELOC_NONE
: type
= "NONE"; break;
25652 case BFD_RELOC_ARM_OFFSET_IMM8
: type
= "OFFSET_IMM8"; break;
25653 case BFD_RELOC_ARM_SHIFT_IMM
: type
= "SHIFT_IMM"; break;
25654 case BFD_RELOC_ARM_SMC
: type
= "SMC"; break;
25655 case BFD_RELOC_ARM_SWI
: type
= "SWI"; break;
25656 case BFD_RELOC_ARM_MULTI
: type
= "MULTI"; break;
25657 case BFD_RELOC_ARM_CP_OFF_IMM
: type
= "CP_OFF_IMM"; break;
25658 case BFD_RELOC_ARM_T32_OFFSET_IMM
: type
= "T32_OFFSET_IMM"; break;
25659 case BFD_RELOC_ARM_T32_CP_OFF_IMM
: type
= "T32_CP_OFF_IMM"; break;
25660 case BFD_RELOC_ARM_THUMB_ADD
: type
= "THUMB_ADD"; break;
25661 case BFD_RELOC_ARM_THUMB_SHIFT
: type
= "THUMB_SHIFT"; break;
25662 case BFD_RELOC_ARM_THUMB_IMM
: type
= "THUMB_IMM"; break;
25663 case BFD_RELOC_ARM_THUMB_OFFSET
: type
= "THUMB_OFFSET"; break;
25664 default: type
= _("<unknown>"); break;
25666 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25667 _("cannot represent %s relocation in this object file format"),
25674 if ((code
== BFD_RELOC_32_PCREL
|| code
== BFD_RELOC_32
)
25676 && fixp
->fx_addsy
== GOT_symbol
)
25678 code
= BFD_RELOC_ARM_GOTPC
;
25679 reloc
->addend
= fixp
->fx_offset
= reloc
->address
;
25683 reloc
->howto
= bfd_reloc_type_lookup (stdoutput
, code
);
25685 if (reloc
->howto
== NULL
)
25687 as_bad_where (fixp
->fx_file
, fixp
->fx_line
,
25688 _("cannot represent %s relocation in this object file format"),
25689 bfd_get_reloc_code_name (code
));
25693 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
25694 vtable entry to be used in the relocation's section offset. */
25695 if (fixp
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
25696 reloc
->address
= fixp
->fx_offset
;
25701 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
25704 cons_fix_new_arm (fragS
* frag
,
25708 bfd_reloc_code_real_type reloc
)
25713 FIXME: @@ Should look at CPU word size. */
25717 reloc
= BFD_RELOC_8
;
25720 reloc
= BFD_RELOC_16
;
25724 reloc
= BFD_RELOC_32
;
25727 reloc
= BFD_RELOC_64
;
25732 if (exp
->X_op
== O_secrel
)
25734 exp
->X_op
= O_symbol
;
25735 reloc
= BFD_RELOC_32_SECREL
;
25739 fix_new_exp (frag
, where
, size
, exp
, pcrel
, reloc
);
25742 #if defined (OBJ_COFF)
25744 arm_validate_fix (fixS
* fixP
)
25746 /* If the destination of the branch is a defined symbol which does not have
25747 the THUMB_FUNC attribute, then we must be calling a function which has
25748 the (interfacearm) attribute. We look for the Thumb entry point to that
25749 function and change the branch to refer to that function instead. */
25750 if (fixP
->fx_r_type
== BFD_RELOC_THUMB_PCREL_BRANCH23
25751 && fixP
->fx_addsy
!= NULL
25752 && S_IS_DEFINED (fixP
->fx_addsy
)
25753 && ! THUMB_IS_FUNC (fixP
->fx_addsy
))
25755 fixP
->fx_addsy
= find_real_start (fixP
->fx_addsy
);
25762 arm_force_relocation (struct fix
* fixp
)
25764 #if defined (OBJ_COFF) && defined (TE_PE)
25765 if (fixp
->fx_r_type
== BFD_RELOC_RVA
)
25769 /* In case we have a call or a branch to a function in ARM ISA mode from
25770 a thumb function or vice-versa force the relocation. These relocations
25771 are cleared off for some cores that might have blx and simple transformations
25775 switch (fixp
->fx_r_type
)
25777 case BFD_RELOC_ARM_PCREL_JUMP
:
25778 case BFD_RELOC_ARM_PCREL_CALL
:
25779 case BFD_RELOC_THUMB_PCREL_BLX
:
25780 if (THUMB_IS_FUNC (fixp
->fx_addsy
))
25784 case BFD_RELOC_ARM_PCREL_BLX
:
25785 case BFD_RELOC_THUMB_PCREL_BRANCH25
:
25786 case BFD_RELOC_THUMB_PCREL_BRANCH20
:
25787 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
25788 if (ARM_IS_FUNC (fixp
->fx_addsy
))
25797 /* Resolve these relocations even if the symbol is extern or weak.
25798 Technically this is probably wrong due to symbol preemption.
25799 In practice these relocations do not have enough range to be useful
25800 at dynamic link time, and some code (e.g. in the Linux kernel)
25801 expects these references to be resolved. */
25802 if (fixp
->fx_r_type
== BFD_RELOC_ARM_IMMEDIATE
25803 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM
25804 || fixp
->fx_r_type
== BFD_RELOC_ARM_OFFSET_IMM8
25805 || fixp
->fx_r_type
== BFD_RELOC_ARM_ADRL_IMMEDIATE
25806 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM
25807 || fixp
->fx_r_type
== BFD_RELOC_ARM_CP_OFF_IMM_S2
25808 || fixp
->fx_r_type
== BFD_RELOC_ARM_THUMB_OFFSET
25809 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_IMM
25810 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMMEDIATE
25811 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_IMM12
25812 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_OFFSET_IMM
25813 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_ADD_PC12
25814 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM
25815 || fixp
->fx_r_type
== BFD_RELOC_ARM_T32_CP_OFF_IMM_S2
)
25818 /* Always leave these relocations for the linker. */
25819 if ((fixp
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
25820 && fixp
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
25821 || fixp
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
25824 /* Always generate relocations against function symbols. */
25825 if (fixp
->fx_r_type
== BFD_RELOC_32
25827 && (symbol_get_bfdsym (fixp
->fx_addsy
)->flags
& BSF_FUNCTION
))
25830 return generic_force_reloc (fixp
);
25833 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25834 /* Relocations against function names must be left unadjusted,
25835 so that the linker can use this information to generate interworking
25836 stubs. The MIPS version of this function
25837 also prevents relocations that are mips-16 specific, but I do not
25838 know why it does this.
25841 There is one other problem that ought to be addressed here, but
25842 which currently is not: Taking the address of a label (rather
25843 than a function) and then later jumping to that address. Such
25844 addresses also ought to have their bottom bit set (assuming that
25845 they reside in Thumb code), but at the moment they will not. */
25848 arm_fix_adjustable (fixS
* fixP
)
25850 if (fixP
->fx_addsy
== NULL
)
25853 /* Preserve relocations against symbols with function type. */
25854 if (symbol_get_bfdsym (fixP
->fx_addsy
)->flags
& BSF_FUNCTION
)
25857 if (THUMB_IS_FUNC (fixP
->fx_addsy
)
25858 && fixP
->fx_subsy
== NULL
)
25861 /* We need the symbol name for the VTABLE entries. */
25862 if ( fixP
->fx_r_type
== BFD_RELOC_VTABLE_INHERIT
25863 || fixP
->fx_r_type
== BFD_RELOC_VTABLE_ENTRY
)
25866 /* Don't allow symbols to be discarded on GOT related relocs. */
25867 if (fixP
->fx_r_type
== BFD_RELOC_ARM_PLT32
25868 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOT32
25869 || fixP
->fx_r_type
== BFD_RELOC_ARM_GOTOFF
25870 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32
25871 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GD32_FDPIC
25872 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LE32
25873 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32
25874 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_IE32_FDPIC
25875 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32
25876 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDM32_FDPIC
25877 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_LDO32
25878 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_GOTDESC
25879 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_CALL
25880 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_CALL
25881 || fixP
->fx_r_type
== BFD_RELOC_ARM_TLS_DESCSEQ
25882 || fixP
->fx_r_type
== BFD_RELOC_ARM_THM_TLS_DESCSEQ
25883 || fixP
->fx_r_type
== BFD_RELOC_ARM_TARGET2
)
25886 /* Similarly for group relocations. */
25887 if ((fixP
->fx_r_type
>= BFD_RELOC_ARM_ALU_PC_G0_NC
25888 && fixP
->fx_r_type
<= BFD_RELOC_ARM_LDC_SB_G2
)
25889 || fixP
->fx_r_type
== BFD_RELOC_ARM_LDR_PC_G0
)
25892 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25893 if (fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW
25894 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT
25895 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVW_PCREL
25896 || fixP
->fx_r_type
== BFD_RELOC_ARM_MOVT_PCREL
25897 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW
25898 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT
25899 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVW_PCREL
25900 || fixP
->fx_r_type
== BFD_RELOC_ARM_THUMB_MOVT_PCREL
)
25903 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25904 offsets, so keep these symbols. */
25905 if (fixP
->fx_r_type
>= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25906 && fixP
->fx_r_type
<= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC
)
25911 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25915 elf32_arm_target_format (void)
25918 return (target_big_endian
25919 ? "elf32-bigarm-symbian"
25920 : "elf32-littlearm-symbian");
25921 #elif defined (TE_VXWORKS)
25922 return (target_big_endian
25923 ? "elf32-bigarm-vxworks"
25924 : "elf32-littlearm-vxworks");
25925 #elif defined (TE_NACL)
25926 return (target_big_endian
25927 ? "elf32-bigarm-nacl"
25928 : "elf32-littlearm-nacl");
25932 if (target_big_endian
)
25933 return "elf32-bigarm-fdpic";
25935 return "elf32-littlearm-fdpic";
25939 if (target_big_endian
)
25940 return "elf32-bigarm";
25942 return "elf32-littlearm";
25948 armelf_frob_symbol (symbolS
* symp
,
25951 elf_frob_symbol (symp
, puntp
);
25955 /* MD interface: Finalization. */
25960 literal_pool
* pool
;
25962 /* Ensure that all the IT blocks are properly closed. */
25963 check_it_blocks_finished ();
25965 for (pool
= list_of_pools
; pool
; pool
= pool
->next
)
25967 /* Put it at the end of the relevant section. */
25968 subseg_set (pool
->section
, pool
->sub_section
);
25970 arm_elf_change_section ();
25977 /* Remove any excess mapping symbols generated for alignment frags in
25978 SEC. We may have created a mapping symbol before a zero byte
25979 alignment; remove it if there's a mapping symbol after the
25982 check_mapping_symbols (bfd
*abfd ATTRIBUTE_UNUSED
, asection
*sec
,
25983 void *dummy ATTRIBUTE_UNUSED
)
25985 segment_info_type
*seginfo
= seg_info (sec
);
25988 if (seginfo
== NULL
|| seginfo
->frchainP
== NULL
)
25991 for (fragp
= seginfo
->frchainP
->frch_root
;
25993 fragp
= fragp
->fr_next
)
25995 symbolS
*sym
= fragp
->tc_frag_data
.last_map
;
25996 fragS
*next
= fragp
->fr_next
;
25998 /* Variable-sized frags have been converted to fixed size by
25999 this point. But if this was variable-sized to start with,
26000 there will be a fixed-size frag after it. So don't handle
26002 if (sym
== NULL
|| next
== NULL
)
26005 if (S_GET_VALUE (sym
) < next
->fr_address
)
26006 /* Not at the end of this frag. */
26008 know (S_GET_VALUE (sym
) == next
->fr_address
);
26012 if (next
->tc_frag_data
.first_map
!= NULL
)
26014 /* Next frag starts with a mapping symbol. Discard this
26016 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26020 if (next
->fr_next
== NULL
)
26022 /* This mapping symbol is at the end of the section. Discard
26024 know (next
->fr_fix
== 0 && next
->fr_var
== 0);
26025 symbol_remove (sym
, &symbol_rootP
, &symbol_lastP
);
26029 /* As long as we have empty frags without any mapping symbols,
26031 /* If the next frag is non-empty and does not start with a
26032 mapping symbol, then this mapping symbol is required. */
26033 if (next
->fr_address
!= next
->fr_next
->fr_address
)
26036 next
= next
->fr_next
;
26038 while (next
!= NULL
);
26043 /* Adjust the symbol table. This marks Thumb symbols as distinct from
26047 arm_adjust_symtab (void)
26052 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26054 if (ARM_IS_THUMB (sym
))
26056 if (THUMB_IS_FUNC (sym
))
26058 /* Mark the symbol as a Thumb function. */
26059 if ( S_GET_STORAGE_CLASS (sym
) == C_STAT
26060 || S_GET_STORAGE_CLASS (sym
) == C_LABEL
) /* This can happen! */
26061 S_SET_STORAGE_CLASS (sym
, C_THUMBSTATFUNC
);
26063 else if (S_GET_STORAGE_CLASS (sym
) == C_EXT
)
26064 S_SET_STORAGE_CLASS (sym
, C_THUMBEXTFUNC
);
26066 as_bad (_("%s: unexpected function type: %d"),
26067 S_GET_NAME (sym
), S_GET_STORAGE_CLASS (sym
));
26069 else switch (S_GET_STORAGE_CLASS (sym
))
26072 S_SET_STORAGE_CLASS (sym
, C_THUMBEXT
);
26075 S_SET_STORAGE_CLASS (sym
, C_THUMBSTAT
);
26078 S_SET_STORAGE_CLASS (sym
, C_THUMBLABEL
);
26086 if (ARM_IS_INTERWORK (sym
))
26087 coffsymbol (symbol_get_bfdsym (sym
))->native
->u
.syment
.n_flags
= 0xFF;
26094 for (sym
= symbol_rootP
; sym
!= NULL
; sym
= symbol_next (sym
))
26096 if (ARM_IS_THUMB (sym
))
26098 elf_symbol_type
* elf_sym
;
26100 elf_sym
= elf_symbol (symbol_get_bfdsym (sym
));
26101 bind
= ELF_ST_BIND (elf_sym
->internal_elf_sym
.st_info
);
26103 if (! bfd_is_arm_special_symbol_name (elf_sym
->symbol
.name
,
26104 BFD_ARM_SPECIAL_SYM_TYPE_ANY
))
26106 /* If it's a .thumb_func, declare it as so,
26107 otherwise tag label as .code 16. */
26108 if (THUMB_IS_FUNC (sym
))
26109 ARM_SET_SYM_BRANCH_TYPE (elf_sym
->internal_elf_sym
.st_target_internal
,
26110 ST_BRANCH_TO_THUMB
);
26111 else if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
26112 elf_sym
->internal_elf_sym
.st_info
=
26113 ELF_ST_INFO (bind
, STT_ARM_16BIT
);
26118 /* Remove any overlapping mapping symbols generated by alignment frags. */
26119 bfd_map_over_sections (stdoutput
, check_mapping_symbols
, (char *) 0);
26120 /* Now do generic ELF adjustments. */
26121 elf_adjust_symtab ();
26125 /* MD interface: Initialization. */
26128 set_constant_flonums (void)
26132 for (i
= 0; i
< NUM_FLOAT_VALS
; i
++)
26133 if (atof_ieee ((char *) fp_const
[i
], 'x', fp_values
[i
]) == NULL
)
26137 /* Auto-select Thumb mode if it's the only available instruction set for the
26138 given architecture. */
26141 autoselect_thumb_from_cpu_variant (void)
26143 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v1
))
26144 opcode_select (16);
26153 if ( (arm_ops_hsh
= hash_new ()) == NULL
26154 || (arm_cond_hsh
= hash_new ()) == NULL
26155 || (arm_shift_hsh
= hash_new ()) == NULL
26156 || (arm_psr_hsh
= hash_new ()) == NULL
26157 || (arm_v7m_psr_hsh
= hash_new ()) == NULL
26158 || (arm_reg_hsh
= hash_new ()) == NULL
26159 || (arm_reloc_hsh
= hash_new ()) == NULL
26160 || (arm_barrier_opt_hsh
= hash_new ()) == NULL
)
26161 as_fatal (_("virtual memory exhausted"));
26163 for (i
= 0; i
< sizeof (insns
) / sizeof (struct asm_opcode
); i
++)
26164 hash_insert (arm_ops_hsh
, insns
[i
].template_name
, (void *) (insns
+ i
));
26165 for (i
= 0; i
< sizeof (conds
) / sizeof (struct asm_cond
); i
++)
26166 hash_insert (arm_cond_hsh
, conds
[i
].template_name
, (void *) (conds
+ i
));
26167 for (i
= 0; i
< sizeof (shift_names
) / sizeof (struct asm_shift_name
); i
++)
26168 hash_insert (arm_shift_hsh
, shift_names
[i
].name
, (void *) (shift_names
+ i
));
26169 for (i
= 0; i
< sizeof (psrs
) / sizeof (struct asm_psr
); i
++)
26170 hash_insert (arm_psr_hsh
, psrs
[i
].template_name
, (void *) (psrs
+ i
));
26171 for (i
= 0; i
< sizeof (v7m_psrs
) / sizeof (struct asm_psr
); i
++)
26172 hash_insert (arm_v7m_psr_hsh
, v7m_psrs
[i
].template_name
,
26173 (void *) (v7m_psrs
+ i
));
26174 for (i
= 0; i
< sizeof (reg_names
) / sizeof (struct reg_entry
); i
++)
26175 hash_insert (arm_reg_hsh
, reg_names
[i
].name
, (void *) (reg_names
+ i
));
26177 i
< sizeof (barrier_opt_names
) / sizeof (struct asm_barrier_opt
);
26179 hash_insert (arm_barrier_opt_hsh
, barrier_opt_names
[i
].template_name
,
26180 (void *) (barrier_opt_names
+ i
));
26182 for (i
= 0; i
< ARRAY_SIZE (reloc_names
); i
++)
26184 struct reloc_entry
* entry
= reloc_names
+ i
;
26186 if (arm_is_eabi() && entry
->reloc
== BFD_RELOC_ARM_PLT32
)
26187 /* This makes encode_branch() use the EABI versions of this relocation. */
26188 entry
->reloc
= BFD_RELOC_UNUSED
;
26190 hash_insert (arm_reloc_hsh
, entry
->name
, (void *) entry
);
26194 set_constant_flonums ();
26196 /* Set the cpu variant based on the command-line options. We prefer
26197 -mcpu= over -march= if both are set (as for GCC); and we prefer
26198 -mfpu= over any other way of setting the floating point unit.
26199 Use of legacy options with new options are faulted. */
26202 if (mcpu_cpu_opt
|| march_cpu_opt
)
26203 as_bad (_("use of old and new-style options to set CPU type"));
26205 selected_arch
= *legacy_cpu
;
26207 else if (mcpu_cpu_opt
)
26209 selected_arch
= *mcpu_cpu_opt
;
26210 selected_ext
= *mcpu_ext_opt
;
26212 else if (march_cpu_opt
)
26214 selected_arch
= *march_cpu_opt
;
26215 selected_ext
= *march_ext_opt
;
26217 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
26222 as_bad (_("use of old and new-style options to set FPU type"));
26224 selected_fpu
= *legacy_fpu
;
26227 selected_fpu
= *mfpu_opt
;
26230 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
26231 || defined (TE_NetBSD) || defined (TE_VXWORKS))
26232 /* Some environments specify a default FPU. If they don't, infer it
26233 from the processor. */
26235 selected_fpu
= *mcpu_fpu_opt
;
26236 else if (march_fpu_opt
)
26237 selected_fpu
= *march_fpu_opt
;
26239 selected_fpu
= fpu_default
;
26243 if (ARM_FEATURE_ZERO (selected_fpu
))
26245 if (!no_cpu_selected ())
26246 selected_fpu
= fpu_default
;
26248 selected_fpu
= fpu_arch_fpa
;
26252 if (ARM_FEATURE_ZERO (selected_arch
))
26254 selected_arch
= cpu_default
;
26255 selected_cpu
= selected_arch
;
26257 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26259 /* Autodection of feature mode: allow all features in cpu_variant but leave
26260 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
26261 after all instruction have been processed and we can decide what CPU
26262 should be selected. */
26263 if (ARM_FEATURE_ZERO (selected_arch
))
26264 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
26266 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
26269 autoselect_thumb_from_cpu_variant ();
26271 arm_arch_used
= thumb_arch_used
= arm_arch_none
;
26273 #if defined OBJ_COFF || defined OBJ_ELF
26275 unsigned int flags
= 0;
26277 #if defined OBJ_ELF
26278 flags
= meabi_flags
;
26280 switch (meabi_flags
)
26282 case EF_ARM_EABI_UNKNOWN
:
26284 /* Set the flags in the private structure. */
26285 if (uses_apcs_26
) flags
|= F_APCS26
;
26286 if (support_interwork
) flags
|= F_INTERWORK
;
26287 if (uses_apcs_float
) flags
|= F_APCS_FLOAT
;
26288 if (pic_code
) flags
|= F_PIC
;
26289 if (!ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_any_hard
))
26290 flags
|= F_SOFT_FLOAT
;
26292 switch (mfloat_abi_opt
)
26294 case ARM_FLOAT_ABI_SOFT
:
26295 case ARM_FLOAT_ABI_SOFTFP
:
26296 flags
|= F_SOFT_FLOAT
;
26299 case ARM_FLOAT_ABI_HARD
:
26300 if (flags
& F_SOFT_FLOAT
)
26301 as_bad (_("hard-float conflicts with specified fpu"));
26305 /* Using pure-endian doubles (even if soft-float). */
26306 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_endian_pure
))
26307 flags
|= F_VFP_FLOAT
;
26309 #if defined OBJ_ELF
26310 if (ARM_CPU_HAS_FEATURE (cpu_variant
, fpu_arch_maverick
))
26311 flags
|= EF_ARM_MAVERICK_FLOAT
;
26314 case EF_ARM_EABI_VER4
:
26315 case EF_ARM_EABI_VER5
:
26316 /* No additional flags to set. */
26323 bfd_set_private_flags (stdoutput
, flags
);
26325 /* We have run out flags in the COFF header to encode the
26326 status of ATPCS support, so instead we create a dummy,
26327 empty, debug section called .arm.atpcs. */
26332 sec
= bfd_make_section (stdoutput
, ".arm.atpcs");
26336 bfd_set_section_flags
26337 (stdoutput
, sec
, SEC_READONLY
| SEC_DEBUGGING
/* | SEC_HAS_CONTENTS */);
26338 bfd_set_section_size (stdoutput
, sec
, 0);
26339 bfd_set_section_contents (stdoutput
, sec
, NULL
, 0, 0);
26345 /* Record the CPU type as well. */
26346 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt2
))
26347 mach
= bfd_mach_arm_iWMMXt2
;
26348 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_iwmmxt
))
26349 mach
= bfd_mach_arm_iWMMXt
;
26350 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_xscale
))
26351 mach
= bfd_mach_arm_XScale
;
26352 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_cext_maverick
))
26353 mach
= bfd_mach_arm_ep9312
;
26354 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5e
))
26355 mach
= bfd_mach_arm_5TE
;
26356 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v5
))
26358 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
26359 mach
= bfd_mach_arm_5T
;
26361 mach
= bfd_mach_arm_5
;
26363 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4
))
26365 if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v4t
))
26366 mach
= bfd_mach_arm_4T
;
26368 mach
= bfd_mach_arm_4
;
26370 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3m
))
26371 mach
= bfd_mach_arm_3M
;
26372 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v3
))
26373 mach
= bfd_mach_arm_3
;
26374 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2s
))
26375 mach
= bfd_mach_arm_2a
;
26376 else if (ARM_CPU_HAS_FEATURE (cpu_variant
, arm_ext_v2
))
26377 mach
= bfd_mach_arm_2
;
26379 mach
= bfd_mach_arm_unknown
;
26381 bfd_set_arch_mach (stdoutput
, TARGET_ARCH
, mach
);
26384 /* Command line processing. */
26387 Invocation line includes a switch not recognized by the base assembler.
26388 See if it's a processor-specific option.
26390 This routine is somewhat complicated by the need for backwards
26391 compatibility (since older releases of gcc can't be changed).
26392 The new options try to make the interface as compatible as
26395 New options (supported) are:
26397 -mcpu=<cpu name> Assemble for selected processor
26398 -march=<architecture name> Assemble for selected architecture
26399 -mfpu=<fpu architecture> Assemble for selected FPU.
26400 -EB/-mbig-endian Big-endian
26401 -EL/-mlittle-endian Little-endian
26402 -k Generate PIC code
26403 -mthumb Start in Thumb mode
26404 -mthumb-interwork Code supports ARM/Thumb interworking
26406 -m[no-]warn-deprecated Warn about deprecated features
26407 -m[no-]warn-syms Warn when symbols match instructions
26409 For now we will also provide support for:
26411 -mapcs-32 32-bit Program counter
26412 -mapcs-26 26-bit Program counter
26413 -macps-float Floats passed in FP registers
26414 -mapcs-reentrant Reentrant code
26416 (sometime these will probably be replaced with -mapcs=<list of options>
26417 and -matpcs=<list of options>)
26419 The remaining options are only supported for back-wards compatibility.
26420 Cpu variants, the arm part is optional:
26421 -m[arm]1 Currently not supported.
26422 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
26423 -m[arm]3 Arm 3 processor
26424 -m[arm]6[xx], Arm 6 processors
26425 -m[arm]7[xx][t][[d]m] Arm 7 processors
26426 -m[arm]8[10] Arm 8 processors
26427 -m[arm]9[20][tdmi] Arm 9 processors
26428 -mstrongarm[110[0]] StrongARM processors
26429 -mxscale XScale processors
26430 -m[arm]v[2345[t[e]]] Arm architectures
26431 -mall All (except the ARM1)
26433 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
26434 -mfpe-old (No float load/store multiples)
26435 -mvfpxd VFP Single precision
26437 -mno-fpu Disable all floating point instructions
26439 The following CPU names are recognized:
26440 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
26441 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
26442 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
26443 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
26444 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
26445 arm10t arm10e, arm1020t, arm1020e, arm10200e,
26446 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
26450 const char * md_shortopts
= "m:k";
26452 #ifdef ARM_BI_ENDIAN
26453 #define OPTION_EB (OPTION_MD_BASE + 0)
26454 #define OPTION_EL (OPTION_MD_BASE + 1)
26456 #if TARGET_BYTES_BIG_ENDIAN
26457 #define OPTION_EB (OPTION_MD_BASE + 0)
26459 #define OPTION_EL (OPTION_MD_BASE + 1)
26462 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
26463 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
26465 struct option md_longopts
[] =
26468 {"EB", no_argument
, NULL
, OPTION_EB
},
26471 {"EL", no_argument
, NULL
, OPTION_EL
},
26473 {"fix-v4bx", no_argument
, NULL
, OPTION_FIX_V4BX
},
26475 {"fdpic", no_argument
, NULL
, OPTION_FDPIC
},
26477 {NULL
, no_argument
, NULL
, 0}
26480 size_t md_longopts_size
= sizeof (md_longopts
);
26482 struct arm_option_table
26484 const char * option
; /* Option name to match. */
26485 const char * help
; /* Help information. */
26486 int * var
; /* Variable to change. */
26487 int value
; /* What to change it to. */
26488 const char * deprecated
; /* If non-null, print this message. */
26491 struct arm_option_table arm_opts
[] =
26493 {"k", N_("generate PIC code"), &pic_code
, 1, NULL
},
26494 {"mthumb", N_("assemble Thumb code"), &thumb_mode
, 1, NULL
},
26495 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
26496 &support_interwork
, 1, NULL
},
26497 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26
, 0, NULL
},
26498 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26
, 1, NULL
},
26499 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float
,
26501 {"mapcs-reentrant", N_("re-entrant code"), &pic_code
, 1, NULL
},
26502 {"matpcs", N_("code is ATPCS conformant"), &atpcs
, 1, NULL
},
26503 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian
, 1, NULL
},
26504 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian
, 0,
26507 /* These are recognized by the assembler, but have no affect on code. */
26508 {"mapcs-frame", N_("use frame pointer"), NULL
, 0, NULL
},
26509 {"mapcs-stack-check", N_("use stack size checking"), NULL
, 0, NULL
},
26511 {"mwarn-deprecated", NULL
, &warn_on_deprecated
, 1, NULL
},
26512 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
26513 &warn_on_deprecated
, 0, NULL
},
26514 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms
), TRUE
, NULL
},
26515 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms
), FALSE
, NULL
},
26516 {NULL
, NULL
, NULL
, 0, NULL
}
26519 struct arm_legacy_option_table
26521 const char * option
; /* Option name to match. */
26522 const arm_feature_set
** var
; /* Variable to change. */
26523 const arm_feature_set value
; /* What to change it to. */
26524 const char * deprecated
; /* If non-null, print this message. */
26527 const struct arm_legacy_option_table arm_legacy_opts
[] =
26529 /* DON'T add any new processors to this list -- we want the whole list
26530 to go away... Add them to the processors table instead. */
26531 {"marm1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
26532 {"m1", &legacy_cpu
, ARM_ARCH_V1
, N_("use -mcpu=arm1")},
26533 {"marm2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
26534 {"m2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -mcpu=arm2")},
26535 {"marm250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
26536 {"m250", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm250")},
26537 {"marm3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
26538 {"m3", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -mcpu=arm3")},
26539 {"marm6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
26540 {"m6", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm6")},
26541 {"marm600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
26542 {"m600", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm600")},
26543 {"marm610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
26544 {"m610", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm610")},
26545 {"marm620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
26546 {"m620", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm620")},
26547 {"marm7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
26548 {"m7", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7")},
26549 {"marm70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
26550 {"m70", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm70")},
26551 {"marm700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
26552 {"m700", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700")},
26553 {"marm700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
26554 {"m700i", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm700i")},
26555 {"marm710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
26556 {"m710", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710")},
26557 {"marm710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
26558 {"m710c", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm710c")},
26559 {"marm720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
26560 {"m720", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm720")},
26561 {"marm7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
26562 {"m7d", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7d")},
26563 {"marm7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
26564 {"m7di", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7di")},
26565 {"marm7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
26566 {"m7m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7m")},
26567 {"marm7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
26568 {"m7dm", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dm")},
26569 {"marm7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
26570 {"m7dmi", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -mcpu=arm7dmi")},
26571 {"marm7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
26572 {"m7100", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7100")},
26573 {"marm7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
26574 {"m7500", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500")},
26575 {"marm7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
26576 {"m7500fe", &legacy_cpu
, ARM_ARCH_V3
, N_("use -mcpu=arm7500fe")},
26577 {"marm7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26578 {"m7t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26579 {"marm7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26580 {"m7tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm7tdmi")},
26581 {"marm710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
26582 {"m710t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm710t")},
26583 {"marm720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
26584 {"m720t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm720t")},
26585 {"marm740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
26586 {"m740t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm740t")},
26587 {"marm8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
26588 {"m8", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm8")},
26589 {"marm810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
26590 {"m810", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=arm810")},
26591 {"marm9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
26592 {"m9", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9")},
26593 {"marm9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
26594 {"m9tdmi", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm9tdmi")},
26595 {"marm920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
26596 {"m920", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm920")},
26597 {"marm940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
26598 {"m940", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -mcpu=arm940")},
26599 {"mstrongarm", &legacy_cpu
, ARM_ARCH_V4
, N_("use -mcpu=strongarm")},
26600 {"mstrongarm110", &legacy_cpu
, ARM_ARCH_V4
,
26601 N_("use -mcpu=strongarm110")},
26602 {"mstrongarm1100", &legacy_cpu
, ARM_ARCH_V4
,
26603 N_("use -mcpu=strongarm1100")},
26604 {"mstrongarm1110", &legacy_cpu
, ARM_ARCH_V4
,
26605 N_("use -mcpu=strongarm1110")},
26606 {"mxscale", &legacy_cpu
, ARM_ARCH_XSCALE
, N_("use -mcpu=xscale")},
26607 {"miwmmxt", &legacy_cpu
, ARM_ARCH_IWMMXT
, N_("use -mcpu=iwmmxt")},
26608 {"mall", &legacy_cpu
, ARM_ANY
, N_("use -mcpu=all")},
26610 /* Architecture variants -- don't add any more to this list either. */
26611 {"mv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
26612 {"marmv2", &legacy_cpu
, ARM_ARCH_V2
, N_("use -march=armv2")},
26613 {"mv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
26614 {"marmv2a", &legacy_cpu
, ARM_ARCH_V2S
, N_("use -march=armv2a")},
26615 {"mv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
26616 {"marmv3", &legacy_cpu
, ARM_ARCH_V3
, N_("use -march=armv3")},
26617 {"mv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
26618 {"marmv3m", &legacy_cpu
, ARM_ARCH_V3M
, N_("use -march=armv3m")},
26619 {"mv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
26620 {"marmv4", &legacy_cpu
, ARM_ARCH_V4
, N_("use -march=armv4")},
26621 {"mv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
26622 {"marmv4t", &legacy_cpu
, ARM_ARCH_V4T
, N_("use -march=armv4t")},
26623 {"mv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
26624 {"marmv5", &legacy_cpu
, ARM_ARCH_V5
, N_("use -march=armv5")},
26625 {"mv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
26626 {"marmv5t", &legacy_cpu
, ARM_ARCH_V5T
, N_("use -march=armv5t")},
26627 {"mv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
26628 {"marmv5e", &legacy_cpu
, ARM_ARCH_V5TE
, N_("use -march=armv5te")},
26630 /* Floating point variants -- don't add any more to this list either. */
26631 {"mfpe-old", &legacy_fpu
, FPU_ARCH_FPE
, N_("use -mfpu=fpe")},
26632 {"mfpa10", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa10")},
26633 {"mfpa11", &legacy_fpu
, FPU_ARCH_FPA
, N_("use -mfpu=fpa11")},
26634 {"mno-fpu", &legacy_fpu
, ARM_ARCH_NONE
,
26635 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
26637 {NULL
, NULL
, ARM_ARCH_NONE
, NULL
}
26640 struct arm_cpu_option_table
26644 const arm_feature_set value
;
26645 const arm_feature_set ext
;
26646 /* For some CPUs we assume an FPU unless the user explicitly sets
26648 const arm_feature_set default_fpu
;
26649 /* The canonical name of the CPU, or NULL to use NAME converted to upper
26651 const char * canonical_name
;
26654 /* This list should, at a minimum, contain all the cpu names
26655 recognized by GCC. */
26656 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
26658 static const struct arm_cpu_option_table arm_cpus
[] =
26660 ARM_CPU_OPT ("all", NULL
, ARM_ANY
,
26663 ARM_CPU_OPT ("arm1", NULL
, ARM_ARCH_V1
,
26666 ARM_CPU_OPT ("arm2", NULL
, ARM_ARCH_V2
,
26669 ARM_CPU_OPT ("arm250", NULL
, ARM_ARCH_V2S
,
26672 ARM_CPU_OPT ("arm3", NULL
, ARM_ARCH_V2S
,
26675 ARM_CPU_OPT ("arm6", NULL
, ARM_ARCH_V3
,
26678 ARM_CPU_OPT ("arm60", NULL
, ARM_ARCH_V3
,
26681 ARM_CPU_OPT ("arm600", NULL
, ARM_ARCH_V3
,
26684 ARM_CPU_OPT ("arm610", NULL
, ARM_ARCH_V3
,
26687 ARM_CPU_OPT ("arm620", NULL
, ARM_ARCH_V3
,
26690 ARM_CPU_OPT ("arm7", NULL
, ARM_ARCH_V3
,
26693 ARM_CPU_OPT ("arm7m", NULL
, ARM_ARCH_V3M
,
26696 ARM_CPU_OPT ("arm7d", NULL
, ARM_ARCH_V3
,
26699 ARM_CPU_OPT ("arm7dm", NULL
, ARM_ARCH_V3M
,
26702 ARM_CPU_OPT ("arm7di", NULL
, ARM_ARCH_V3
,
26705 ARM_CPU_OPT ("arm7dmi", NULL
, ARM_ARCH_V3M
,
26708 ARM_CPU_OPT ("arm70", NULL
, ARM_ARCH_V3
,
26711 ARM_CPU_OPT ("arm700", NULL
, ARM_ARCH_V3
,
26714 ARM_CPU_OPT ("arm700i", NULL
, ARM_ARCH_V3
,
26717 ARM_CPU_OPT ("arm710", NULL
, ARM_ARCH_V3
,
26720 ARM_CPU_OPT ("arm710t", NULL
, ARM_ARCH_V4T
,
26723 ARM_CPU_OPT ("arm720", NULL
, ARM_ARCH_V3
,
26726 ARM_CPU_OPT ("arm720t", NULL
, ARM_ARCH_V4T
,
26729 ARM_CPU_OPT ("arm740t", NULL
, ARM_ARCH_V4T
,
26732 ARM_CPU_OPT ("arm710c", NULL
, ARM_ARCH_V3
,
26735 ARM_CPU_OPT ("arm7100", NULL
, ARM_ARCH_V3
,
26738 ARM_CPU_OPT ("arm7500", NULL
, ARM_ARCH_V3
,
26741 ARM_CPU_OPT ("arm7500fe", NULL
, ARM_ARCH_V3
,
26744 ARM_CPU_OPT ("arm7t", NULL
, ARM_ARCH_V4T
,
26747 ARM_CPU_OPT ("arm7tdmi", NULL
, ARM_ARCH_V4T
,
26750 ARM_CPU_OPT ("arm7tdmi-s", NULL
, ARM_ARCH_V4T
,
26753 ARM_CPU_OPT ("arm8", NULL
, ARM_ARCH_V4
,
26756 ARM_CPU_OPT ("arm810", NULL
, ARM_ARCH_V4
,
26759 ARM_CPU_OPT ("strongarm", NULL
, ARM_ARCH_V4
,
26762 ARM_CPU_OPT ("strongarm1", NULL
, ARM_ARCH_V4
,
26765 ARM_CPU_OPT ("strongarm110", NULL
, ARM_ARCH_V4
,
26768 ARM_CPU_OPT ("strongarm1100", NULL
, ARM_ARCH_V4
,
26771 ARM_CPU_OPT ("strongarm1110", NULL
, ARM_ARCH_V4
,
26774 ARM_CPU_OPT ("arm9", NULL
, ARM_ARCH_V4T
,
26777 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T
,
26780 ARM_CPU_OPT ("arm920t", NULL
, ARM_ARCH_V4T
,
26783 ARM_CPU_OPT ("arm922t", NULL
, ARM_ARCH_V4T
,
26786 ARM_CPU_OPT ("arm940t", NULL
, ARM_ARCH_V4T
,
26789 ARM_CPU_OPT ("arm9tdmi", NULL
, ARM_ARCH_V4T
,
26792 ARM_CPU_OPT ("fa526", NULL
, ARM_ARCH_V4
,
26795 ARM_CPU_OPT ("fa626", NULL
, ARM_ARCH_V4
,
26799 /* For V5 or later processors we default to using VFP; but the user
26800 should really set the FPU type explicitly. */
26801 ARM_CPU_OPT ("arm9e-r0", NULL
, ARM_ARCH_V5TExP
,
26804 ARM_CPU_OPT ("arm9e", NULL
, ARM_ARCH_V5TE
,
26807 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
26810 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ
,
26813 ARM_CPU_OPT ("arm926ej-s", NULL
, ARM_ARCH_V5TEJ
,
26816 ARM_CPU_OPT ("arm946e-r0", NULL
, ARM_ARCH_V5TExP
,
26819 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE
,
26822 ARM_CPU_OPT ("arm946e-s", NULL
, ARM_ARCH_V5TE
,
26825 ARM_CPU_OPT ("arm966e-r0", NULL
, ARM_ARCH_V5TExP
,
26828 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE
,
26831 ARM_CPU_OPT ("arm966e-s", NULL
, ARM_ARCH_V5TE
,
26834 ARM_CPU_OPT ("arm968e-s", NULL
, ARM_ARCH_V5TE
,
26837 ARM_CPU_OPT ("arm10t", NULL
, ARM_ARCH_V5T
,
26840 ARM_CPU_OPT ("arm10tdmi", NULL
, ARM_ARCH_V5T
,
26843 ARM_CPU_OPT ("arm10e", NULL
, ARM_ARCH_V5TE
,
26846 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE
,
26849 ARM_CPU_OPT ("arm1020t", NULL
, ARM_ARCH_V5T
,
26852 ARM_CPU_OPT ("arm1020e", NULL
, ARM_ARCH_V5TE
,
26855 ARM_CPU_OPT ("arm1022e", NULL
, ARM_ARCH_V5TE
,
26858 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ
,
26861 ARM_CPU_OPT ("arm1026ej-s", NULL
, ARM_ARCH_V5TEJ
,
26864 ARM_CPU_OPT ("fa606te", NULL
, ARM_ARCH_V5TE
,
26867 ARM_CPU_OPT ("fa616te", NULL
, ARM_ARCH_V5TE
,
26870 ARM_CPU_OPT ("fa626te", NULL
, ARM_ARCH_V5TE
,
26873 ARM_CPU_OPT ("fmp626", NULL
, ARM_ARCH_V5TE
,
26876 ARM_CPU_OPT ("fa726te", NULL
, ARM_ARCH_V5TE
,
26879 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6
,
26882 ARM_CPU_OPT ("arm1136j-s", NULL
, ARM_ARCH_V6
,
26885 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6
,
26888 ARM_CPU_OPT ("arm1136jf-s", NULL
, ARM_ARCH_V6
,
26891 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K
,
26894 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K
,
26897 ARM_CPU_OPT ("arm1156t2-s", NULL
, ARM_ARCH_V6T2
,
26900 ARM_CPU_OPT ("arm1156t2f-s", NULL
, ARM_ARCH_V6T2
,
26903 ARM_CPU_OPT ("arm1176jz-s", NULL
, ARM_ARCH_V6KZ
,
26906 ARM_CPU_OPT ("arm1176jzf-s", NULL
, ARM_ARCH_V6KZ
,
26909 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A
,
26910 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26912 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE
,
26914 FPU_ARCH_NEON_VFP_V4
),
26915 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A
,
26916 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
26917 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
26918 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A
,
26919 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
26920 ARM_FEATURE_COPROC (FPU_VFP_V3
| FPU_NEON_EXT_V1
)),
26921 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE
,
26923 FPU_ARCH_NEON_VFP_V4
),
26924 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE
,
26926 FPU_ARCH_NEON_VFP_V4
),
26927 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE
,
26929 FPU_ARCH_NEON_VFP_V4
),
26930 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A
,
26931 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26932 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26933 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A
,
26934 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26935 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26936 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A
,
26937 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26938 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26939 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A
,
26940 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26941 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26942 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A
,
26943 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26944 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26945 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A
,
26946 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26947 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26948 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A
,
26949 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26950 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
26951 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A
,
26952 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26953 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26954 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A
,
26955 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26956 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26957 ARM_CPU_OPT ("ares", "Ares", ARM_ARCH_V8_2A
,
26958 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
26959 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
26960 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R
,
26963 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R
,
26965 FPU_ARCH_VFP_V3D16
),
26966 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R
,
26967 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26969 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R
,
26970 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26971 FPU_ARCH_VFP_V3D16
),
26972 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R
,
26973 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
),
26974 FPU_ARCH_VFP_V3D16
),
26975 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R
,
26976 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
26977 FPU_ARCH_NEON_VFP_ARMV8
),
26978 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN
,
26979 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
26981 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE
,
26984 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM
,
26987 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM
,
26990 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M
,
26993 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM
,
26996 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM
,
26999 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM
,
27002 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A
,
27003 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27004 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27005 ARM_CPU_OPT ("neoverse-n1", "Neoverse N1", ARM_ARCH_V8_2A
,
27006 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27007 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD
),
27008 /* ??? XSCALE is really an architecture. */
27009 ARM_CPU_OPT ("xscale", NULL
, ARM_ARCH_XSCALE
,
27013 /* ??? iwmmxt is not a processor. */
27014 ARM_CPU_OPT ("iwmmxt", NULL
, ARM_ARCH_IWMMXT
,
27017 ARM_CPU_OPT ("iwmmxt2", NULL
, ARM_ARCH_IWMMXT2
,
27020 ARM_CPU_OPT ("i80200", NULL
, ARM_ARCH_XSCALE
,
27025 ARM_CPU_OPT ("ep9312", "ARM920T",
27026 ARM_FEATURE_LOW (ARM_AEXT_V4T
, ARM_CEXT_MAVERICK
),
27027 ARM_ARCH_NONE
, FPU_ARCH_MAVERICK
),
27029 /* Marvell processors. */
27030 ARM_CPU_OPT ("marvell-pj4", NULL
, ARM_ARCH_V7A
,
27031 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27032 FPU_ARCH_VFP_V3D16
),
27033 ARM_CPU_OPT ("marvell-whitney", NULL
, ARM_ARCH_V7A
,
27034 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
| ARM_EXT_SEC
),
27035 FPU_ARCH_NEON_VFP_V4
),
27037 /* APM X-Gene family. */
27038 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A
,
27040 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27041 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A
,
27042 ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27043 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
),
27045 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27049 struct arm_ext_table
27053 const arm_feature_set merge
;
27054 const arm_feature_set clear
;
27057 struct arm_arch_option_table
27061 const arm_feature_set value
;
27062 const arm_feature_set default_fpu
;
27063 const struct arm_ext_table
* ext_table
;
27066 /* Used to add support for +E and +noE extension. */
27067 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
27068 /* Used to add support for a +E extension. */
27069 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
27070 /* Used to add support for a +noE extension. */
27071 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
27073 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
27074 ~0 & ~FPU_ENDIAN_PURE)
27076 static const struct arm_ext_table armv5te_ext_table
[] =
27078 ARM_EXT ("fp", FPU_ARCH_VFP_V2
, ALL_FP
),
27079 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27082 static const struct arm_ext_table armv7_ext_table
[] =
27084 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27085 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27088 static const struct arm_ext_table armv7ve_ext_table
[] =
27090 ARM_EXT ("fp", FPU_ARCH_VFP_V4D16
, ALL_FP
),
27091 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
),
27092 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27093 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27094 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27095 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
), /* Alias for +fp. */
27096 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27098 ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4
,
27099 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27101 /* Aliases for +simd. */
27102 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27104 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27105 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27106 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27108 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27111 static const struct arm_ext_table armv7a_ext_table
[] =
27113 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27114 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27115 ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3
),
27116 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27117 ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
),
27118 ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16
),
27119 ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4
),
27121 ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1
,
27122 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1
| FPU_NEON_EXT_FMA
)),
27124 /* Aliases for +simd. */
27125 ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27126 ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
),
27128 ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16
),
27129 ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4
),
27131 ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
)),
27132 ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
)),
27133 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27136 static const struct arm_ext_table armv7r_ext_table
[] =
27138 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD
),
27139 ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD
), /* Alias for +fp.sp. */
27140 ARM_EXT ("fp", FPU_ARCH_VFP_V3D16
, ALL_FP
),
27141 ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16
), /* Alias for +fp. */
27142 ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
),
27143 ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
),
27144 ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27145 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
)),
27146 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27149 static const struct arm_ext_table armv7em_ext_table
[] =
27151 ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16
, ALL_FP
),
27152 /* Alias for +fp, used to be known as fpv4-sp-d16. */
27153 ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
),
27154 ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16
),
27155 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27156 ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16
),
27157 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27160 static const struct arm_ext_table armv8a_ext_table
[] =
27162 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27163 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27164 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27165 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27167 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27168 should use the +simd option to turn on FP. */
27169 ARM_REMOVE ("fp", ALL_FP
),
27170 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27171 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27172 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27176 static const struct arm_ext_table armv81a_ext_table
[] =
27178 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27179 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27180 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27182 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27183 should use the +simd option to turn on FP. */
27184 ARM_REMOVE ("fp", ALL_FP
),
27185 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27186 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27187 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27190 static const struct arm_ext_table armv82a_ext_table
[] =
27192 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1
),
27193 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16
),
27194 ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML
),
27195 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
,
27196 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27197 ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27199 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27200 should use the +simd option to turn on FP. */
27201 ARM_REMOVE ("fp", ALL_FP
),
27202 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27203 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27204 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27207 static const struct arm_ext_table armv84a_ext_table
[] =
27209 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27210 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27211 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27212 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27214 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27215 should use the +simd option to turn on FP. */
27216 ARM_REMOVE ("fp", ALL_FP
),
27217 ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
)),
27218 ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
)),
27219 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27222 static const struct arm_ext_table armv85a_ext_table
[] =
27224 ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
),
27225 ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML
),
27226 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4
,
27227 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27229 /* Armv8-a does not allow an FP implementation without SIMD, so the user
27230 should use the +simd option to turn on FP. */
27231 ARM_REMOVE ("fp", ALL_FP
),
27232 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27235 static const struct arm_ext_table armv8m_main_ext_table
[] =
27237 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27238 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27239 ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16
, ALL_FP
),
27240 ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16
),
27241 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27244 static const struct arm_ext_table armv8_1m_main_ext_table
[] =
27246 ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27247 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
)),
27249 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27250 FPU_VFP_V5_SP_D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
),
27253 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27254 FPU_VFP_V5D16
| FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
27255 ARM_EXT ("mve", ARM_FEATURE_COPROC (FPU_MVE
),
27256 ARM_FEATURE_COPROC (FPU_MVE
| FPU_MVE_FP
)),
27258 ARM_FEATURE (0, ARM_EXT2_FP16_INST
,
27259 FPU_MVE
| FPU_MVE_FP
| FPU_VFP_V5_SP_D16
|
27260 FPU_VFP_EXT_FP16
| FPU_VFP_EXT_FMA
)),
27261 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27264 static const struct arm_ext_table armv8r_ext_table
[] =
27266 ARM_ADD ("crc", ARCH_CRC_ARMV8
),
27267 ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8
),
27268 ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27269 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
)),
27270 ARM_REMOVE ("fp", ALL_FP
),
27271 ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16
),
27272 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
}
27275 /* This list should, at a minimum, contain all the architecture names
27276 recognized by GCC. */
27277 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
27278 #define ARM_ARCH_OPT2(N, V, DF, ext) \
27279 { N, sizeof (N) - 1, V, DF, ext##_ext_table }
27281 static const struct arm_arch_option_table arm_archs
[] =
27283 ARM_ARCH_OPT ("all", ARM_ANY
, FPU_ARCH_FPA
),
27284 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1
, FPU_ARCH_FPA
),
27285 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2
, FPU_ARCH_FPA
),
27286 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27287 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S
, FPU_ARCH_FPA
),
27288 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3
, FPU_ARCH_FPA
),
27289 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M
, FPU_ARCH_FPA
),
27290 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4
, FPU_ARCH_FPA
),
27291 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM
, FPU_ARCH_FPA
),
27292 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T
, FPU_ARCH_FPA
),
27293 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM
, FPU_ARCH_FPA
),
27294 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5
, FPU_ARCH_VFP
),
27295 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T
, FPU_ARCH_VFP
),
27296 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM
, FPU_ARCH_VFP
),
27297 ARM_ARCH_OPT2 ("armv5te", ARM_ARCH_V5TE
, FPU_ARCH_VFP
, armv5te
),
27298 ARM_ARCH_OPT2 ("armv5texp", ARM_ARCH_V5TExP
, FPU_ARCH_VFP
, armv5te
),
27299 ARM_ARCH_OPT2 ("armv5tej", ARM_ARCH_V5TEJ
, FPU_ARCH_VFP
, armv5te
),
27300 ARM_ARCH_OPT2 ("armv6", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
27301 ARM_ARCH_OPT2 ("armv6j", ARM_ARCH_V6
, FPU_ARCH_VFP
, armv5te
),
27302 ARM_ARCH_OPT2 ("armv6k", ARM_ARCH_V6K
, FPU_ARCH_VFP
, armv5te
),
27303 ARM_ARCH_OPT2 ("armv6z", ARM_ARCH_V6Z
, FPU_ARCH_VFP
, armv5te
),
27304 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
27305 kept to preserve existing behaviour. */
27306 ARM_ARCH_OPT2 ("armv6kz", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
27307 ARM_ARCH_OPT2 ("armv6zk", ARM_ARCH_V6KZ
, FPU_ARCH_VFP
, armv5te
),
27308 ARM_ARCH_OPT2 ("armv6t2", ARM_ARCH_V6T2
, FPU_ARCH_VFP
, armv5te
),
27309 ARM_ARCH_OPT2 ("armv6kt2", ARM_ARCH_V6KT2
, FPU_ARCH_VFP
, armv5te
),
27310 ARM_ARCH_OPT2 ("armv6zt2", ARM_ARCH_V6ZT2
, FPU_ARCH_VFP
, armv5te
),
27311 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
27312 kept to preserve existing behaviour. */
27313 ARM_ARCH_OPT2 ("armv6kzt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
27314 ARM_ARCH_OPT2 ("armv6zkt2", ARM_ARCH_V6KZT2
, FPU_ARCH_VFP
, armv5te
),
27315 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M
, FPU_ARCH_VFP
),
27316 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM
, FPU_ARCH_VFP
),
27317 ARM_ARCH_OPT2 ("armv7", ARM_ARCH_V7
, FPU_ARCH_VFP
, armv7
),
27318 /* The official spelling of the ARMv7 profile variants is the dashed form.
27319 Accept the non-dashed form for compatibility with old toolchains. */
27320 ARM_ARCH_OPT2 ("armv7a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
27321 ARM_ARCH_OPT2 ("armv7ve", ARM_ARCH_V7VE
, FPU_ARCH_VFP
, armv7ve
),
27322 ARM_ARCH_OPT2 ("armv7r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
27323 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
27324 ARM_ARCH_OPT2 ("armv7-a", ARM_ARCH_V7A
, FPU_ARCH_VFP
, armv7a
),
27325 ARM_ARCH_OPT2 ("armv7-r", ARM_ARCH_V7R
, FPU_ARCH_VFP
, armv7r
),
27326 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M
, FPU_ARCH_VFP
),
27327 ARM_ARCH_OPT2 ("armv7e-m", ARM_ARCH_V7EM
, FPU_ARCH_VFP
, armv7em
),
27328 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE
, FPU_ARCH_VFP
),
27329 ARM_ARCH_OPT2 ("armv8-m.main", ARM_ARCH_V8M_MAIN
, FPU_ARCH_VFP
,
27331 ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN
, FPU_ARCH_VFP
,
27333 ARM_ARCH_OPT2 ("armv8-a", ARM_ARCH_V8A
, FPU_ARCH_VFP
, armv8a
),
27334 ARM_ARCH_OPT2 ("armv8.1-a", ARM_ARCH_V8_1A
, FPU_ARCH_VFP
, armv81a
),
27335 ARM_ARCH_OPT2 ("armv8.2-a", ARM_ARCH_V8_2A
, FPU_ARCH_VFP
, armv82a
),
27336 ARM_ARCH_OPT2 ("armv8.3-a", ARM_ARCH_V8_3A
, FPU_ARCH_VFP
, armv82a
),
27337 ARM_ARCH_OPT2 ("armv8-r", ARM_ARCH_V8R
, FPU_ARCH_VFP
, armv8r
),
27338 ARM_ARCH_OPT2 ("armv8.4-a", ARM_ARCH_V8_4A
, FPU_ARCH_VFP
, armv84a
),
27339 ARM_ARCH_OPT2 ("armv8.5-a", ARM_ARCH_V8_5A
, FPU_ARCH_VFP
, armv85a
),
27340 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE
, FPU_ARCH_VFP
),
27341 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT
, FPU_ARCH_VFP
),
27342 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2
, FPU_ARCH_VFP
),
27343 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, NULL
}
27345 #undef ARM_ARCH_OPT
27347 /* ISA extensions in the co-processor and main instruction set space. */
27349 struct arm_option_extension_value_table
27353 const arm_feature_set merge_value
;
27354 const arm_feature_set clear_value
;
27355 /* List of architectures for which an extension is available. ARM_ARCH_NONE
27356 indicates that an extension is available for all architectures while
27357 ARM_ANY marks an empty entry. */
27358 const arm_feature_set allowed_archs
[2];
27361 /* The following table must be in alphabetical order with a NULL last entry. */
27363 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
27364 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
27366 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
27367 use the context sensitive approach using arm_ext_table's. */
27368 static const struct arm_option_extension_value_table arm_extensions
[] =
27370 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8
, ARM_FEATURE_COPROC (CRC_EXT_ARMV8
),
27371 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27372 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
,
27373 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8
),
27374 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27375 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8
,
27376 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD
),
27378 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27379 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP
| ARM_EXT_V6_DSP
),
27380 ARM_FEATURE_CORE (ARM_EXT_V7M
, ARM_EXT2_V8M
)),
27381 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8
, ARM_FEATURE_COPROC (FPU_VFP_ARMV8
),
27382 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27383 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27384 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
),
27386 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27387 | ARM_EXT2_FP16_FML
),
27388 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
27389 | ARM_EXT2_FP16_FML
),
27391 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27392 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV
| ARM_EXT_DIV
),
27393 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
27394 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
27395 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
27396 Thumb divide instruction. Due to this having the same name as the
27397 previous entry, this will be ignored when doing command-line parsing and
27398 only considered by build attribute selection code. */
27399 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
27400 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV
),
27401 ARM_FEATURE_CORE_LOW (ARM_EXT_V7
)),
27402 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
),
27403 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT
), ARM_ARCH_NONE
),
27404 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
),
27405 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2
), ARM_ARCH_NONE
),
27406 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
),
27407 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK
), ARM_ARCH_NONE
),
27408 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
27409 ARM_FEATURE_CORE_LOW (ARM_EXT_MP
),
27410 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
),
27411 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R
)),
27412 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
27413 ARM_FEATURE_CORE_LOW (ARM_EXT_OS
),
27414 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M
)),
27415 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN
),
27416 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_PAN
, 0),
27417 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27418 ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
27419 ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES
),
27421 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS
),
27422 ARM_FEATURE (ARM_EXT_V8
, ARM_EXT2_RAS
, 0),
27423 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27424 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1
,
27425 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
| FPU_NEON_EXT_RDMA
),
27426 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A
)),
27427 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
27428 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB
),
27430 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27431 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC
),
27432 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K
),
27433 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
27434 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8
,
27435 ARM_FEATURE_COPROC (FPU_NEON_ARMV8
),
27436 ARM_FEATURE_CORE_LOW (ARM_EXT_V8
)),
27437 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
| ARM_EXT_ADIV
27439 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT
),
27440 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A
)),
27441 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
),
27442 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE
), ARM_ARCH_NONE
),
27443 { NULL
, 0, ARM_ARCH_NONE
, ARM_ARCH_NONE
, { ARM_ARCH_NONE
, ARM_ARCH_NONE
} }
27447 /* ISA floating-point and Advanced SIMD extensions. */
27448 struct arm_option_fpu_value_table
27451 const arm_feature_set value
;
27454 /* This list should, at a minimum, contain all the fpu names
27455 recognized by GCC. */
27456 static const struct arm_option_fpu_value_table arm_fpus
[] =
27458 {"softfpa", FPU_NONE
},
27459 {"fpe", FPU_ARCH_FPE
},
27460 {"fpe2", FPU_ARCH_FPE
},
27461 {"fpe3", FPU_ARCH_FPA
}, /* Third release supports LFM/SFM. */
27462 {"fpa", FPU_ARCH_FPA
},
27463 {"fpa10", FPU_ARCH_FPA
},
27464 {"fpa11", FPU_ARCH_FPA
},
27465 {"arm7500fe", FPU_ARCH_FPA
},
27466 {"softvfp", FPU_ARCH_VFP
},
27467 {"softvfp+vfp", FPU_ARCH_VFP_V2
},
27468 {"vfp", FPU_ARCH_VFP_V2
},
27469 {"vfp9", FPU_ARCH_VFP_V2
},
27470 {"vfp3", FPU_ARCH_VFP_V3
}, /* Undocumented, use vfpv3. */
27471 {"vfp10", FPU_ARCH_VFP_V2
},
27472 {"vfp10-r0", FPU_ARCH_VFP_V1
},
27473 {"vfpxd", FPU_ARCH_VFP_V1xD
},
27474 {"vfpv2", FPU_ARCH_VFP_V2
},
27475 {"vfpv3", FPU_ARCH_VFP_V3
},
27476 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16
},
27477 {"vfpv3-d16", FPU_ARCH_VFP_V3D16
},
27478 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16
},
27479 {"vfpv3xd", FPU_ARCH_VFP_V3xD
},
27480 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16
},
27481 {"arm1020t", FPU_ARCH_VFP_V1
},
27482 {"arm1020e", FPU_ARCH_VFP_V2
},
27483 {"arm1136jfs", FPU_ARCH_VFP_V2
}, /* Undocumented, use arm1136jf-s. */
27484 {"arm1136jf-s", FPU_ARCH_VFP_V2
},
27485 {"maverick", FPU_ARCH_MAVERICK
},
27486 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
27487 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1
},
27488 {"neon-fp16", FPU_ARCH_NEON_FP16
},
27489 {"vfpv4", FPU_ARCH_VFP_V4
},
27490 {"vfpv4-d16", FPU_ARCH_VFP_V4D16
},
27491 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16
},
27492 {"fpv5-d16", FPU_ARCH_VFP_V5D16
},
27493 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16
},
27494 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4
},
27495 {"fp-armv8", FPU_ARCH_VFP_ARMV8
},
27496 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8
},
27497 {"crypto-neon-fp-armv8",
27498 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8
},
27499 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1
},
27500 {"crypto-neon-fp-armv8.1",
27501 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1
},
27502 {NULL
, ARM_ARCH_NONE
}
27505 struct arm_option_value_table
27511 static const struct arm_option_value_table arm_float_abis
[] =
27513 {"hard", ARM_FLOAT_ABI_HARD
},
27514 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
27515 {"soft", ARM_FLOAT_ABI_SOFT
},
27520 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
27521 static const struct arm_option_value_table arm_eabis
[] =
27523 {"gnu", EF_ARM_EABI_UNKNOWN
},
27524 {"4", EF_ARM_EABI_VER4
},
27525 {"5", EF_ARM_EABI_VER5
},
27530 struct arm_long_option_table
27532 const char * option
; /* Substring to match. */
27533 const char * help
; /* Help information. */
27534 int (* func
) (const char * subopt
); /* Function to decode sub-option. */
27535 const char * deprecated
; /* If non-null, print this message. */
27539 arm_parse_extension (const char *str
, const arm_feature_set
*opt_set
,
27540 arm_feature_set
*ext_set
,
27541 const struct arm_ext_table
*ext_table
)
27543 /* We insist on extensions being specified in alphabetical order, and with
27544 extensions being added before being removed. We achieve this by having
27545 the global ARM_EXTENSIONS table in alphabetical order, and using the
27546 ADDING_VALUE variable to indicate whether we are adding an extension (1)
27547 or removing it (0) and only allowing it to change in the order
27549 const struct arm_option_extension_value_table
* opt
= NULL
;
27550 const arm_feature_set arm_any
= ARM_ANY
;
27551 int adding_value
= -1;
27553 while (str
!= NULL
&& *str
!= 0)
27560 as_bad (_("invalid architectural extension"));
27565 ext
= strchr (str
, '+');
27570 len
= strlen (str
);
27572 if (len
>= 2 && strncmp (str
, "no", 2) == 0)
27574 if (adding_value
!= 0)
27577 opt
= arm_extensions
;
27585 if (adding_value
== -1)
27588 opt
= arm_extensions
;
27590 else if (adding_value
!= 1)
27592 as_bad (_("must specify extensions to add before specifying "
27593 "those to remove"));
27600 as_bad (_("missing architectural extension"));
27604 gas_assert (adding_value
!= -1);
27605 gas_assert (opt
!= NULL
);
27607 if (ext_table
!= NULL
)
27609 const struct arm_ext_table
* ext_opt
= ext_table
;
27610 bfd_boolean found
= FALSE
;
27611 for (; ext_opt
->name
!= NULL
; ext_opt
++)
27612 if (ext_opt
->name_len
== len
27613 && strncmp (ext_opt
->name
, str
, len
) == 0)
27617 if (ARM_FEATURE_ZERO (ext_opt
->merge
))
27618 /* TODO: Option not supported. When we remove the
27619 legacy table this case should error out. */
27622 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, ext_opt
->merge
);
27626 if (ARM_FEATURE_ZERO (ext_opt
->clear
))
27627 /* TODO: Option not supported. When we remove the
27628 legacy table this case should error out. */
27630 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, ext_opt
->clear
);
27642 /* Scan over the options table trying to find an exact match. */
27643 for (; opt
->name
!= NULL
; opt
++)
27644 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27646 int i
, nb_allowed_archs
=
27647 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
27648 /* Check we can apply the extension to this architecture. */
27649 for (i
= 0; i
< nb_allowed_archs
; i
++)
27652 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_any
))
27654 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *opt_set
))
27657 if (i
== nb_allowed_archs
)
27659 as_bad (_("extension does not apply to the base architecture"));
27663 /* Add or remove the extension. */
27665 ARM_MERGE_FEATURE_SETS (*ext_set
, *ext_set
, opt
->merge_value
);
27667 ARM_CLEAR_FEATURE (*ext_set
, *ext_set
, opt
->clear_value
);
27669 /* Allowing Thumb division instructions for ARMv7 in autodetection
27670 rely on this break so that duplicate extensions (extensions
27671 with the same name as a previous extension in the list) are not
27672 considered for command-line parsing. */
27676 if (opt
->name
== NULL
)
27678 /* Did we fail to find an extension because it wasn't specified in
27679 alphabetical order, or because it does not exist? */
27681 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
27682 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27685 if (opt
->name
== NULL
)
27686 as_bad (_("unknown architectural extension `%s'"), str
);
27688 as_bad (_("architectural extensions must be specified in "
27689 "alphabetical order"));
27695 /* We should skip the extension we've just matched the next time
27707 arm_parse_cpu (const char *str
)
27709 const struct arm_cpu_option_table
*opt
;
27710 const char *ext
= strchr (str
, '+');
27716 len
= strlen (str
);
27720 as_bad (_("missing cpu name `%s'"), str
);
27724 for (opt
= arm_cpus
; opt
->name
!= NULL
; opt
++)
27725 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27727 mcpu_cpu_opt
= &opt
->value
;
27728 if (mcpu_ext_opt
== NULL
)
27729 mcpu_ext_opt
= XNEW (arm_feature_set
);
27730 *mcpu_ext_opt
= opt
->ext
;
27731 mcpu_fpu_opt
= &opt
->default_fpu
;
27732 if (opt
->canonical_name
)
27734 gas_assert (sizeof selected_cpu_name
> strlen (opt
->canonical_name
));
27735 strcpy (selected_cpu_name
, opt
->canonical_name
);
27741 if (len
>= sizeof selected_cpu_name
)
27742 len
= (sizeof selected_cpu_name
) - 1;
27744 for (i
= 0; i
< len
; i
++)
27745 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
27746 selected_cpu_name
[i
] = 0;
27750 return arm_parse_extension (ext
, mcpu_cpu_opt
, mcpu_ext_opt
, NULL
);
27755 as_bad (_("unknown cpu `%s'"), str
);
27760 arm_parse_arch (const char *str
)
27762 const struct arm_arch_option_table
*opt
;
27763 const char *ext
= strchr (str
, '+');
27769 len
= strlen (str
);
27773 as_bad (_("missing architecture name `%s'"), str
);
27777 for (opt
= arm_archs
; opt
->name
!= NULL
; opt
++)
27778 if (opt
->name_len
== len
&& strncmp (opt
->name
, str
, len
) == 0)
27780 march_cpu_opt
= &opt
->value
;
27781 if (march_ext_opt
== NULL
)
27782 march_ext_opt
= XNEW (arm_feature_set
);
27783 *march_ext_opt
= arm_arch_none
;
27784 march_fpu_opt
= &opt
->default_fpu
;
27785 strcpy (selected_cpu_name
, opt
->name
);
27788 return arm_parse_extension (ext
, march_cpu_opt
, march_ext_opt
,
27794 as_bad (_("unknown architecture `%s'\n"), str
);
27799 arm_parse_fpu (const char * str
)
27801 const struct arm_option_fpu_value_table
* opt
;
27803 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
27804 if (streq (opt
->name
, str
))
27806 mfpu_opt
= &opt
->value
;
27810 as_bad (_("unknown floating point format `%s'\n"), str
);
27815 arm_parse_float_abi (const char * str
)
27817 const struct arm_option_value_table
* opt
;
27819 for (opt
= arm_float_abis
; opt
->name
!= NULL
; opt
++)
27820 if (streq (opt
->name
, str
))
27822 mfloat_abi_opt
= opt
->value
;
27826 as_bad (_("unknown floating point abi `%s'\n"), str
);
27832 arm_parse_eabi (const char * str
)
27834 const struct arm_option_value_table
*opt
;
27836 for (opt
= arm_eabis
; opt
->name
!= NULL
; opt
++)
27837 if (streq (opt
->name
, str
))
27839 meabi_flags
= opt
->value
;
27842 as_bad (_("unknown EABI `%s'\n"), str
);
27848 arm_parse_it_mode (const char * str
)
27850 bfd_boolean ret
= TRUE
;
27852 if (streq ("arm", str
))
27853 implicit_it_mode
= IMPLICIT_IT_MODE_ARM
;
27854 else if (streq ("thumb", str
))
27855 implicit_it_mode
= IMPLICIT_IT_MODE_THUMB
;
27856 else if (streq ("always", str
))
27857 implicit_it_mode
= IMPLICIT_IT_MODE_ALWAYS
;
27858 else if (streq ("never", str
))
27859 implicit_it_mode
= IMPLICIT_IT_MODE_NEVER
;
27862 as_bad (_("unknown implicit IT mode `%s', should be "\
27863 "arm, thumb, always, or never."), str
);
27871 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED
)
27873 codecomposer_syntax
= TRUE
;
27874 arm_comment_chars
[0] = ';';
27875 arm_line_separator_chars
[0] = 0;
27879 struct arm_long_option_table arm_long_opts
[] =
27881 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
27882 arm_parse_cpu
, NULL
},
27883 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
27884 arm_parse_arch
, NULL
},
27885 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
27886 arm_parse_fpu
, NULL
},
27887 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
27888 arm_parse_float_abi
, NULL
},
27890 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
27891 arm_parse_eabi
, NULL
},
27893 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
27894 arm_parse_it_mode
, NULL
},
27895 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
27896 arm_ccs_mode
, NULL
},
27897 {NULL
, NULL
, 0, NULL
}
27901 md_parse_option (int c
, const char * arg
)
27903 struct arm_option_table
*opt
;
27904 const struct arm_legacy_option_table
*fopt
;
27905 struct arm_long_option_table
*lopt
;
27911 target_big_endian
= 1;
27917 target_big_endian
= 0;
27921 case OPTION_FIX_V4BX
:
27929 #endif /* OBJ_ELF */
27932 /* Listing option. Just ignore these, we don't support additional
27937 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
27939 if (c
== opt
->option
[0]
27940 && ((arg
== NULL
&& opt
->option
[1] == 0)
27941 || streq (arg
, opt
->option
+ 1)))
27943 /* If the option is deprecated, tell the user. */
27944 if (warn_on_deprecated
&& opt
->deprecated
!= NULL
)
27945 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
27946 arg
? arg
: "", _(opt
->deprecated
));
27948 if (opt
->var
!= NULL
)
27949 *opt
->var
= opt
->value
;
27955 for (fopt
= arm_legacy_opts
; fopt
->option
!= NULL
; fopt
++)
27957 if (c
== fopt
->option
[0]
27958 && ((arg
== NULL
&& fopt
->option
[1] == 0)
27959 || streq (arg
, fopt
->option
+ 1)))
27961 /* If the option is deprecated, tell the user. */
27962 if (warn_on_deprecated
&& fopt
->deprecated
!= NULL
)
27963 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
,
27964 arg
? arg
: "", _(fopt
->deprecated
));
27966 if (fopt
->var
!= NULL
)
27967 *fopt
->var
= &fopt
->value
;
27973 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
27975 /* These options are expected to have an argument. */
27976 if (c
== lopt
->option
[0]
27978 && strncmp (arg
, lopt
->option
+ 1,
27979 strlen (lopt
->option
+ 1)) == 0)
27981 /* If the option is deprecated, tell the user. */
27982 if (warn_on_deprecated
&& lopt
->deprecated
!= NULL
)
27983 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c
, arg
,
27984 _(lopt
->deprecated
));
27986 /* Call the sup-option parser. */
27987 return lopt
->func (arg
+ strlen (lopt
->option
) - 1);
27998 md_show_usage (FILE * fp
)
28000 struct arm_option_table
*opt
;
28001 struct arm_long_option_table
*lopt
;
28003 fprintf (fp
, _(" ARM-specific assembler options:\n"));
28005 for (opt
= arm_opts
; opt
->option
!= NULL
; opt
++)
28006 if (opt
->help
!= NULL
)
28007 fprintf (fp
, " -%-23s%s\n", opt
->option
, _(opt
->help
));
28009 for (lopt
= arm_long_opts
; lopt
->option
!= NULL
; lopt
++)
28010 if (lopt
->help
!= NULL
)
28011 fprintf (fp
, " -%s%s\n", lopt
->option
, _(lopt
->help
));
28015 -EB assemble code for a big-endian cpu\n"));
28020 -EL assemble code for a little-endian cpu\n"));
28024 --fix-v4bx Allow BX in ARMv4 code\n"));
28028 --fdpic generate an FDPIC object file\n"));
28029 #endif /* OBJ_ELF */
28037 arm_feature_set flags
;
28038 } cpu_arch_ver_table
;
28040 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
28041 chronologically for architectures, with an exception for ARMv6-M and
28042 ARMv6S-M due to legacy reasons. No new architecture should have a
28043 special case. This allows for build attribute selection results to be
28044 stable when new architectures are added. */
28045 static const cpu_arch_ver_table cpu_arch_ver
[] =
28047 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V1
},
28048 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2
},
28049 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V2S
},
28050 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3
},
28051 {TAG_CPU_ARCH_PRE_V4
, ARM_ARCH_V3M
},
28052 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4xM
},
28053 {TAG_CPU_ARCH_V4
, ARM_ARCH_V4
},
28054 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4TxM
},
28055 {TAG_CPU_ARCH_V4T
, ARM_ARCH_V4T
},
28056 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5xM
},
28057 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5
},
28058 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5TxM
},
28059 {TAG_CPU_ARCH_V5T
, ARM_ARCH_V5T
},
28060 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TExP
},
28061 {TAG_CPU_ARCH_V5TE
, ARM_ARCH_V5TE
},
28062 {TAG_CPU_ARCH_V5TEJ
, ARM_ARCH_V5TEJ
},
28063 {TAG_CPU_ARCH_V6
, ARM_ARCH_V6
},
28064 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6Z
},
28065 {TAG_CPU_ARCH_V6KZ
, ARM_ARCH_V6KZ
},
28066 {TAG_CPU_ARCH_V6K
, ARM_ARCH_V6K
},
28067 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6T2
},
28068 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KT2
},
28069 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6ZT2
},
28070 {TAG_CPU_ARCH_V6T2
, ARM_ARCH_V6KZT2
},
28072 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
28073 always selected build attributes to match those of ARMv6-M
28074 (resp. ARMv6S-M). However, due to these architectures being a strict
28075 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
28076 would be selected when fully respecting chronology of architectures.
28077 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
28078 move them before ARMv7 architectures. */
28079 {TAG_CPU_ARCH_V6_M
, ARM_ARCH_V6M
},
28080 {TAG_CPU_ARCH_V6S_M
, ARM_ARCH_V6SM
},
28082 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7
},
28083 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7A
},
28084 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7R
},
28085 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7M
},
28086 {TAG_CPU_ARCH_V7
, ARM_ARCH_V7VE
},
28087 {TAG_CPU_ARCH_V7E_M
, ARM_ARCH_V7EM
},
28088 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8A
},
28089 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_1A
},
28090 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_2A
},
28091 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_3A
},
28092 {TAG_CPU_ARCH_V8M_BASE
, ARM_ARCH_V8M_BASE
},
28093 {TAG_CPU_ARCH_V8M_MAIN
, ARM_ARCH_V8M_MAIN
},
28094 {TAG_CPU_ARCH_V8R
, ARM_ARCH_V8R
},
28095 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_4A
},
28096 {TAG_CPU_ARCH_V8
, ARM_ARCH_V8_5A
},
28097 {TAG_CPU_ARCH_V8_1M_MAIN
, ARM_ARCH_V8_1M_MAIN
},
28098 {-1, ARM_ARCH_NONE
}
28101 /* Set an attribute if it has not already been set by the user. */
28104 aeabi_set_attribute_int (int tag
, int value
)
28107 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28108 || !attributes_set_explicitly
[tag
])
28109 bfd_elf_add_proc_attr_int (stdoutput
, tag
, value
);
28113 aeabi_set_attribute_string (int tag
, const char *value
)
28116 || tag
>= NUM_KNOWN_OBJ_ATTRIBUTES
28117 || !attributes_set_explicitly
[tag
])
28118 bfd_elf_add_proc_attr_string (stdoutput
, tag
, value
);
28121 /* Return whether features in the *NEEDED feature set are available via
28122 extensions for the architecture whose feature set is *ARCH_FSET. */
28125 have_ext_for_needed_feat_p (const arm_feature_set
*arch_fset
,
28126 const arm_feature_set
*needed
)
28128 int i
, nb_allowed_archs
;
28129 arm_feature_set ext_fset
;
28130 const struct arm_option_extension_value_table
*opt
;
28132 ext_fset
= arm_arch_none
;
28133 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28135 /* Extension does not provide any feature we need. */
28136 if (!ARM_CPU_HAS_FEATURE (*needed
, opt
->merge_value
))
28140 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[0]);
28141 for (i
= 0; i
< nb_allowed_archs
; i
++)
28144 if (ARM_FEATURE_EQUAL (opt
->allowed_archs
[i
], arm_arch_any
))
28147 /* Extension is available, add it. */
28148 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], *arch_fset
))
28149 ARM_MERGE_FEATURE_SETS (ext_fset
, ext_fset
, opt
->merge_value
);
28153 /* Can we enable all features in *needed? */
28154 return ARM_FSET_CPU_SUBSET (*needed
, ext_fset
);
28157 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
28158 a given architecture feature set *ARCH_EXT_FSET including extension feature
28159 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
28160 - if true, check for an exact match of the architecture modulo extensions;
28161 - otherwise, select build attribute value of the first superset
28162 architecture released so that results remains stable when new architectures
28164 For -march/-mcpu=all the build attribute value of the most featureful
28165 architecture is returned. Tag_CPU_arch_profile result is returned in
28169 get_aeabi_cpu_arch_from_fset (const arm_feature_set
*arch_ext_fset
,
28170 const arm_feature_set
*ext_fset
,
28171 char *profile
, int exact_match
)
28173 arm_feature_set arch_fset
;
28174 const cpu_arch_ver_table
*p_ver
, *p_ver_ret
= NULL
;
28176 /* Select most featureful architecture with all its extensions if building
28177 for -march=all as the feature sets used to set build attributes. */
28178 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, arm_arch_any
))
28180 /* Force revisiting of decision for each new architecture. */
28181 gas_assert (MAX_TAG_CPU_ARCH
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28183 return TAG_CPU_ARCH_V8
;
28186 ARM_CLEAR_FEATURE (arch_fset
, *arch_ext_fset
, *ext_fset
);
28188 for (p_ver
= cpu_arch_ver
; p_ver
->val
!= -1; p_ver
++)
28190 arm_feature_set known_arch_fset
;
28192 ARM_CLEAR_FEATURE (known_arch_fset
, p_ver
->flags
, fpu_any
);
28195 /* Base architecture match user-specified architecture and
28196 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
28197 if (ARM_FEATURE_EQUAL (*arch_ext_fset
, known_arch_fset
))
28202 /* Base architecture match user-specified architecture only
28203 (eg. ARMv6-M in the same case as above). Record it in case we
28204 find a match with above condition. */
28205 else if (p_ver_ret
== NULL
28206 && ARM_FEATURE_EQUAL (arch_fset
, known_arch_fset
))
28212 /* Architecture has all features wanted. */
28213 if (ARM_FSET_CPU_SUBSET (arch_fset
, known_arch_fset
))
28215 arm_feature_set added_fset
;
28217 /* Compute features added by this architecture over the one
28218 recorded in p_ver_ret. */
28219 if (p_ver_ret
!= NULL
)
28220 ARM_CLEAR_FEATURE (added_fset
, known_arch_fset
,
28222 /* First architecture that match incl. with extensions, or the
28223 only difference in features over the recorded match is
28224 features that were optional and are now mandatory. */
28225 if (p_ver_ret
== NULL
28226 || ARM_FSET_CPU_SUBSET (added_fset
, arch_fset
))
28232 else if (p_ver_ret
== NULL
)
28234 arm_feature_set needed_ext_fset
;
28236 ARM_CLEAR_FEATURE (needed_ext_fset
, arch_fset
, known_arch_fset
);
28238 /* Architecture has all features needed when using some
28239 extensions. Record it and continue searching in case there
28240 exist an architecture providing all needed features without
28241 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
28243 if (have_ext_for_needed_feat_p (&known_arch_fset
,
28250 if (p_ver_ret
== NULL
)
28254 /* Tag_CPU_arch_profile. */
28255 if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7a
)
28256 || ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8
)
28257 || (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_atomics
)
28258 && !ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v8m_m_only
)))
28260 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_v7r
))
28262 else if (ARM_CPU_HAS_FEATURE (p_ver_ret
->flags
, arm_ext_m
))
28266 return p_ver_ret
->val
;
28269 /* Set the public EABI object attributes. */
28272 aeabi_set_public_attributes (void)
28274 char profile
= '\0';
28277 int fp16_optional
= 0;
28278 int skip_exact_match
= 0;
28279 arm_feature_set flags
, flags_arch
, flags_ext
;
28281 /* Autodetection mode, choose the architecture based the instructions
28283 if (no_cpu_selected ())
28285 ARM_MERGE_FEATURE_SETS (flags
, arm_arch_used
, thumb_arch_used
);
28287 if (ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
))
28288 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v1
);
28290 if (ARM_CPU_HAS_FEATURE (thumb_arch_used
, arm_arch_any
))
28291 ARM_MERGE_FEATURE_SETS (flags
, flags
, arm_ext_v4t
);
28293 /* Code run during relaxation relies on selected_cpu being set. */
28294 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28295 flags_ext
= arm_arch_none
;
28296 ARM_CLEAR_FEATURE (selected_arch
, flags_arch
, flags_ext
);
28297 selected_ext
= flags_ext
;
28298 selected_cpu
= flags
;
28300 /* Otherwise, choose the architecture based on the capabilities of the
28304 ARM_MERGE_FEATURE_SETS (flags_arch
, selected_arch
, selected_ext
);
28305 ARM_CLEAR_FEATURE (flags_arch
, flags_arch
, fpu_any
);
28306 flags_ext
= selected_ext
;
28307 flags
= selected_cpu
;
28309 ARM_MERGE_FEATURE_SETS (flags
, flags
, selected_fpu
);
28311 /* Allow the user to override the reported architecture. */
28312 if (!ARM_FEATURE_ZERO (selected_object_arch
))
28314 ARM_CLEAR_FEATURE (flags_arch
, selected_object_arch
, fpu_any
);
28315 flags_ext
= arm_arch_none
;
28318 skip_exact_match
= ARM_FEATURE_EQUAL (selected_cpu
, arm_arch_any
);
28320 /* When this function is run again after relaxation has happened there is no
28321 way to determine whether an architecture or CPU was specified by the user:
28322 - selected_cpu is set above for relaxation to work;
28323 - march_cpu_opt is not set if only -mcpu or .cpu is used;
28324 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
28325 Therefore, if not in -march=all case we first try an exact match and fall
28326 back to autodetection. */
28327 if (!skip_exact_match
)
28328 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 1);
28330 arch
= get_aeabi_cpu_arch_from_fset (&flags_arch
, &flags_ext
, &profile
, 0);
28332 as_bad (_("no architecture contains all the instructions used\n"));
28334 /* Tag_CPU_name. */
28335 if (selected_cpu_name
[0])
28339 q
= selected_cpu_name
;
28340 if (strncmp (q
, "armv", 4) == 0)
28345 for (i
= 0; q
[i
]; i
++)
28346 q
[i
] = TOUPPER (q
[i
]);
28348 aeabi_set_attribute_string (Tag_CPU_name
, q
);
28351 /* Tag_CPU_arch. */
28352 aeabi_set_attribute_int (Tag_CPU_arch
, arch
);
28354 /* Tag_CPU_arch_profile. */
28355 if (profile
!= '\0')
28356 aeabi_set_attribute_int (Tag_CPU_arch_profile
, profile
);
28358 /* Tag_DSP_extension. */
28359 if (ARM_CPU_HAS_FEATURE (selected_ext
, arm_ext_dsp
))
28360 aeabi_set_attribute_int (Tag_DSP_extension
, 1);
28362 ARM_CLEAR_FEATURE (flags_arch
, flags
, fpu_any
);
28363 /* Tag_ARM_ISA_use. */
28364 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v1
)
28365 || ARM_FEATURE_ZERO (flags_arch
))
28366 aeabi_set_attribute_int (Tag_ARM_ISA_use
, 1);
28368 /* Tag_THUMB_ISA_use. */
28369 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v4t
)
28370 || ARM_FEATURE_ZERO (flags_arch
))
28374 if (!ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
28375 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m_m_only
))
28377 else if (ARM_CPU_HAS_FEATURE (flags
, arm_arch_t2
))
28381 aeabi_set_attribute_int (Tag_THUMB_ISA_use
, thumb_isa_use
);
28384 /* Tag_VFP_arch. */
28385 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_armv8xd
))
28386 aeabi_set_attribute_int (Tag_VFP_arch
,
28387 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
28389 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_fma
))
28390 aeabi_set_attribute_int (Tag_VFP_arch
,
28391 ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
)
28393 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_d32
))
28396 aeabi_set_attribute_int (Tag_VFP_arch
, 3);
28398 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v3xd
))
28400 aeabi_set_attribute_int (Tag_VFP_arch
, 4);
28403 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v2
))
28404 aeabi_set_attribute_int (Tag_VFP_arch
, 2);
28405 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
)
28406 || ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
))
28407 aeabi_set_attribute_int (Tag_VFP_arch
, 1);
28409 /* Tag_ABI_HardFP_use. */
28410 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1xd
)
28411 && !ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_ext_v1
))
28412 aeabi_set_attribute_int (Tag_ABI_HardFP_use
, 1);
28414 /* Tag_WMMX_arch. */
28415 if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt2
))
28416 aeabi_set_attribute_int (Tag_WMMX_arch
, 2);
28417 else if (ARM_CPU_HAS_FEATURE (flags
, arm_cext_iwmmxt
))
28418 aeabi_set_attribute_int (Tag_WMMX_arch
, 1);
28420 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
28421 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v8_1
))
28422 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 4);
28423 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_armv8
))
28424 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 3);
28425 else if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_v1
))
28427 if (ARM_CPU_HAS_FEATURE (flags
, fpu_neon_ext_fma
))
28429 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 2);
28433 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch
, 1);
28438 if (ARM_CPU_HAS_FEATURE (flags
, mve_fp_ext
))
28439 aeabi_set_attribute_int (Tag_MVE_arch
, 2);
28440 else if (ARM_CPU_HAS_FEATURE (flags
, mve_ext
))
28441 aeabi_set_attribute_int (Tag_MVE_arch
, 1);
28443 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
28444 if (ARM_CPU_HAS_FEATURE (flags
, fpu_vfp_fp16
) && fp16_optional
)
28445 aeabi_set_attribute_int (Tag_VFP_HP_extension
, 1);
28449 We set Tag_DIV_use to two when integer divide instructions have been used
28450 in ARM state, or when Thumb integer divide instructions have been used,
28451 but we have no architecture profile set, nor have we any ARM instructions.
28453 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
28454 by the base architecture.
28456 For new architectures we will have to check these tests. */
28457 gas_assert (arch
<= TAG_CPU_ARCH_V8_1M_MAIN
);
28458 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8
)
28459 || ARM_CPU_HAS_FEATURE (flags
, arm_ext_v8m
))
28460 aeabi_set_attribute_int (Tag_DIV_use
, 0);
28461 else if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_adiv
)
28462 || (profile
== '\0'
28463 && ARM_CPU_HAS_FEATURE (flags
, arm_ext_div
)
28464 && !ARM_CPU_HAS_FEATURE (arm_arch_used
, arm_arch_any
)))
28465 aeabi_set_attribute_int (Tag_DIV_use
, 2);
28467 /* Tag_MP_extension_use. */
28468 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_mp
))
28469 aeabi_set_attribute_int (Tag_MPextension_use
, 1);
28471 /* Tag Virtualization_use. */
28472 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_sec
))
28474 if (ARM_CPU_HAS_FEATURE (flags
, arm_ext_virt
))
28477 aeabi_set_attribute_int (Tag_Virtualization_use
, virt_sec
);
28480 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
28481 finished and free extension feature bits which will not be used anymore. */
28484 arm_md_post_relax (void)
28486 aeabi_set_public_attributes ();
28487 XDELETE (mcpu_ext_opt
);
28488 mcpu_ext_opt
= NULL
;
28489 XDELETE (march_ext_opt
);
28490 march_ext_opt
= NULL
;
28493 /* Add the default contents for the .ARM.attributes section. */
28498 if (EF_ARM_EABI_VERSION (meabi_flags
) < EF_ARM_EABI_VER4
)
28501 aeabi_set_public_attributes ();
28503 #endif /* OBJ_ELF */
28505 /* Parse a .cpu directive. */
28508 s_arm_cpu (int ignored ATTRIBUTE_UNUSED
)
28510 const struct arm_cpu_option_table
*opt
;
28514 name
= input_line_pointer
;
28515 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28516 input_line_pointer
++;
28517 saved_char
= *input_line_pointer
;
28518 *input_line_pointer
= 0;
28520 /* Skip the first "all" entry. */
28521 for (opt
= arm_cpus
+ 1; opt
->name
!= NULL
; opt
++)
28522 if (streq (opt
->name
, name
))
28524 selected_arch
= opt
->value
;
28525 selected_ext
= opt
->ext
;
28526 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
28527 if (opt
->canonical_name
)
28528 strcpy (selected_cpu_name
, opt
->canonical_name
);
28532 for (i
= 0; opt
->name
[i
]; i
++)
28533 selected_cpu_name
[i
] = TOUPPER (opt
->name
[i
]);
28535 selected_cpu_name
[i
] = 0;
28537 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28539 *input_line_pointer
= saved_char
;
28540 demand_empty_rest_of_line ();
28543 as_bad (_("unknown cpu `%s'"), name
);
28544 *input_line_pointer
= saved_char
;
28545 ignore_rest_of_line ();
28548 /* Parse a .arch directive. */
28551 s_arm_arch (int ignored ATTRIBUTE_UNUSED
)
28553 const struct arm_arch_option_table
*opt
;
28557 name
= input_line_pointer
;
28558 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28559 input_line_pointer
++;
28560 saved_char
= *input_line_pointer
;
28561 *input_line_pointer
= 0;
28563 /* Skip the first "all" entry. */
28564 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
28565 if (streq (opt
->name
, name
))
28567 selected_arch
= opt
->value
;
28568 selected_ext
= arm_arch_none
;
28569 selected_cpu
= selected_arch
;
28570 strcpy (selected_cpu_name
, opt
->name
);
28571 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28572 *input_line_pointer
= saved_char
;
28573 demand_empty_rest_of_line ();
28577 as_bad (_("unknown architecture `%s'\n"), name
);
28578 *input_line_pointer
= saved_char
;
28579 ignore_rest_of_line ();
28582 /* Parse a .object_arch directive. */
28585 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED
)
28587 const struct arm_arch_option_table
*opt
;
28591 name
= input_line_pointer
;
28592 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28593 input_line_pointer
++;
28594 saved_char
= *input_line_pointer
;
28595 *input_line_pointer
= 0;
28597 /* Skip the first "all" entry. */
28598 for (opt
= arm_archs
+ 1; opt
->name
!= NULL
; opt
++)
28599 if (streq (opt
->name
, name
))
28601 selected_object_arch
= opt
->value
;
28602 *input_line_pointer
= saved_char
;
28603 demand_empty_rest_of_line ();
28607 as_bad (_("unknown architecture `%s'\n"), name
);
28608 *input_line_pointer
= saved_char
;
28609 ignore_rest_of_line ();
28612 /* Parse a .arch_extension directive. */
28615 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED
)
28617 const struct arm_option_extension_value_table
*opt
;
28620 int adding_value
= 1;
28622 name
= input_line_pointer
;
28623 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28624 input_line_pointer
++;
28625 saved_char
= *input_line_pointer
;
28626 *input_line_pointer
= 0;
28628 if (strlen (name
) >= 2
28629 && strncmp (name
, "no", 2) == 0)
28635 for (opt
= arm_extensions
; opt
->name
!= NULL
; opt
++)
28636 if (streq (opt
->name
, name
))
28638 int i
, nb_allowed_archs
=
28639 sizeof (opt
->allowed_archs
) / sizeof (opt
->allowed_archs
[i
]);
28640 for (i
= 0; i
< nb_allowed_archs
; i
++)
28643 if (ARM_CPU_IS_ANY (opt
->allowed_archs
[i
]))
28645 if (ARM_FSET_CPU_SUBSET (opt
->allowed_archs
[i
], selected_arch
))
28649 if (i
== nb_allowed_archs
)
28651 as_bad (_("architectural extension `%s' is not allowed for the "
28652 "current base architecture"), name
);
28657 ARM_MERGE_FEATURE_SETS (selected_ext
, selected_ext
,
28660 ARM_CLEAR_FEATURE (selected_ext
, selected_ext
, opt
->clear_value
);
28662 ARM_MERGE_FEATURE_SETS (selected_cpu
, selected_arch
, selected_ext
);
28663 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28664 *input_line_pointer
= saved_char
;
28665 demand_empty_rest_of_line ();
28666 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
28667 on this return so that duplicate extensions (extensions with the
28668 same name as a previous extension in the list) are not considered
28669 for command-line parsing. */
28673 if (opt
->name
== NULL
)
28674 as_bad (_("unknown architecture extension `%s'\n"), name
);
28676 *input_line_pointer
= saved_char
;
28677 ignore_rest_of_line ();
28680 /* Parse a .fpu directive. */
28683 s_arm_fpu (int ignored ATTRIBUTE_UNUSED
)
28685 const struct arm_option_fpu_value_table
*opt
;
28689 name
= input_line_pointer
;
28690 while (*input_line_pointer
&& !ISSPACE (*input_line_pointer
))
28691 input_line_pointer
++;
28692 saved_char
= *input_line_pointer
;
28693 *input_line_pointer
= 0;
28695 for (opt
= arm_fpus
; opt
->name
!= NULL
; opt
++)
28696 if (streq (opt
->name
, name
))
28698 selected_fpu
= opt
->value
;
28699 #ifndef CPU_DEFAULT
28700 if (no_cpu_selected ())
28701 ARM_MERGE_FEATURE_SETS (cpu_variant
, arm_arch_any
, selected_fpu
);
28704 ARM_MERGE_FEATURE_SETS (cpu_variant
, selected_cpu
, selected_fpu
);
28705 *input_line_pointer
= saved_char
;
28706 demand_empty_rest_of_line ();
28710 as_bad (_("unknown floating point format `%s'\n"), name
);
28711 *input_line_pointer
= saved_char
;
28712 ignore_rest_of_line ();
28715 /* Copy symbol information. */
28718 arm_copy_symbol_attributes (symbolS
*dest
, symbolS
*src
)
28720 ARM_GET_FLAG (dest
) = ARM_GET_FLAG (src
);
28724 /* Given a symbolic attribute NAME, return the proper integer value.
28725 Returns -1 if the attribute is not known. */
28728 arm_convert_symbolic_attribute (const char *name
)
28730 static const struct
28735 attribute_table
[] =
28737 /* When you modify this table you should
28738 also modify the list in doc/c-arm.texi. */
28739 #define T(tag) {#tag, tag}
28740 T (Tag_CPU_raw_name
),
28743 T (Tag_CPU_arch_profile
),
28744 T (Tag_ARM_ISA_use
),
28745 T (Tag_THUMB_ISA_use
),
28749 T (Tag_Advanced_SIMD_arch
),
28750 T (Tag_PCS_config
),
28751 T (Tag_ABI_PCS_R9_use
),
28752 T (Tag_ABI_PCS_RW_data
),
28753 T (Tag_ABI_PCS_RO_data
),
28754 T (Tag_ABI_PCS_GOT_use
),
28755 T (Tag_ABI_PCS_wchar_t
),
28756 T (Tag_ABI_FP_rounding
),
28757 T (Tag_ABI_FP_denormal
),
28758 T (Tag_ABI_FP_exceptions
),
28759 T (Tag_ABI_FP_user_exceptions
),
28760 T (Tag_ABI_FP_number_model
),
28761 T (Tag_ABI_align_needed
),
28762 T (Tag_ABI_align8_needed
),
28763 T (Tag_ABI_align_preserved
),
28764 T (Tag_ABI_align8_preserved
),
28765 T (Tag_ABI_enum_size
),
28766 T (Tag_ABI_HardFP_use
),
28767 T (Tag_ABI_VFP_args
),
28768 T (Tag_ABI_WMMX_args
),
28769 T (Tag_ABI_optimization_goals
),
28770 T (Tag_ABI_FP_optimization_goals
),
28771 T (Tag_compatibility
),
28772 T (Tag_CPU_unaligned_access
),
28773 T (Tag_FP_HP_extension
),
28774 T (Tag_VFP_HP_extension
),
28775 T (Tag_ABI_FP_16bit_format
),
28776 T (Tag_MPextension_use
),
28778 T (Tag_nodefaults
),
28779 T (Tag_also_compatible_with
),
28780 T (Tag_conformance
),
28782 T (Tag_Virtualization_use
),
28783 T (Tag_DSP_extension
),
28785 /* We deliberately do not include Tag_MPextension_use_legacy. */
28793 for (i
= 0; i
< ARRAY_SIZE (attribute_table
); i
++)
28794 if (streq (name
, attribute_table
[i
].name
))
28795 return attribute_table
[i
].tag
;
28800 /* Apply sym value for relocations only in the case that they are for
28801 local symbols in the same segment as the fixup and you have the
28802 respective architectural feature for blx and simple switches. */
28805 arm_apply_sym_value (struct fix
* fixP
, segT this_seg
)
28808 && ARM_CPU_HAS_FEATURE (selected_cpu
, arm_ext_v5t
)
28809 /* PR 17444: If the local symbol is in a different section then a reloc
28810 will always be generated for it, so applying the symbol value now
28811 will result in a double offset being stored in the relocation. */
28812 && (S_GET_SEGMENT (fixP
->fx_addsy
) == this_seg
)
28813 && !S_FORCE_RELOC (fixP
->fx_addsy
, TRUE
))
28815 switch (fixP
->fx_r_type
)
28817 case BFD_RELOC_ARM_PCREL_BLX
:
28818 case BFD_RELOC_THUMB_PCREL_BRANCH23
:
28819 if (ARM_IS_FUNC (fixP
->fx_addsy
))
28823 case BFD_RELOC_ARM_PCREL_CALL
:
28824 case BFD_RELOC_THUMB_PCREL_BLX
:
28825 if (THUMB_IS_FUNC (fixP
->fx_addsy
))
28836 #endif /* OBJ_ELF */