1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
27 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "integrate.h"
52 #include "target-def.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode
;
58 typedef struct minipool_fixup Mfix
;
60 const struct attribute_spec arm_attribute_table
[];
62 /* Forward function declarations. */
63 static arm_stack_offsets
*arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code
, enum machine_mode
, rtx
,
66 HOST_WIDE_INT
, rtx
, rtx
, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx
, int);
69 static int arm_legitimate_index_p (enum machine_mode
, rtx
, RTX_CODE
, int);
70 static int thumb_base_register_rtx_p (rtx
, enum machine_mode
, int);
71 inline static int thumb_index_register_rtx_p (rtx
, int);
72 static int thumb_far_jump_used_p (void);
73 static bool thumb_force_lr_save (void);
74 static int const_ok_for_op (HOST_WIDE_INT
, enum rtx_code
);
75 static rtx
emit_sfm (int, int);
76 static int arm_size_return_regs (void);
78 static bool arm_assemble_integer (rtx
, unsigned int, int);
80 static const char *fp_const_from_val (REAL_VALUE_TYPE
*);
81 static arm_cc
get_arm_condition_code (rtx
);
82 static HOST_WIDE_INT
int_log2 (HOST_WIDE_INT
);
83 static rtx
is_jump_table (rtx
);
84 static const char *output_multi_immediate (rtx
*, const char *, const char *,
86 static const char *shift_op (rtx
, HOST_WIDE_INT
*);
87 static struct machine_function
*arm_init_machine_status (void);
88 static void thumb_exit (FILE *, int);
89 static rtx
is_jump_table (rtx
);
90 static HOST_WIDE_INT
get_jump_table_size (rtx
);
91 static Mnode
*move_minipool_fix_forward_ref (Mnode
*, Mnode
*, HOST_WIDE_INT
);
92 static Mnode
*add_minipool_forward_ref (Mfix
*);
93 static Mnode
*move_minipool_fix_backward_ref (Mnode
*, Mnode
*, HOST_WIDE_INT
);
94 static Mnode
*add_minipool_backward_ref (Mfix
*);
95 static void assign_minipool_offsets (Mfix
*);
96 static void arm_print_value (FILE *, rtx
);
97 static void dump_minipool (rtx
);
98 static int arm_barrier_cost (rtx
);
99 static Mfix
*create_fix_barrier (Mfix
*, HOST_WIDE_INT
);
100 static void push_minipool_barrier (rtx
, HOST_WIDE_INT
);
101 static void push_minipool_fix (rtx
, HOST_WIDE_INT
, rtx
*, enum machine_mode
,
103 static void arm_reorg (void);
104 static bool note_invalid_constants (rtx
, HOST_WIDE_INT
, int);
105 static int current_file_function_operand (rtx
);
106 static unsigned long arm_compute_save_reg0_reg12_mask (void);
107 static unsigned long arm_compute_save_reg_mask (void);
108 static unsigned long arm_isr_value (tree
);
109 static unsigned long arm_compute_func_type (void);
110 static tree
arm_handle_fndecl_attribute (tree
*, tree
, tree
, int, bool *);
111 static tree
arm_handle_isr_attribute (tree
*, tree
, tree
, int, bool *);
112 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
113 static tree
arm_handle_notshared_attribute (tree
*, tree
, tree
, int, bool *);
115 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT
);
116 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT
);
117 static void thumb_output_function_prologue (FILE *, HOST_WIDE_INT
);
118 static int arm_comp_type_attributes (tree
, tree
);
119 static void arm_set_default_type_attributes (tree
);
120 static int arm_adjust_cost (rtx
, rtx
, rtx
, int);
121 static int count_insns_for_constant (HOST_WIDE_INT
, int);
122 static int arm_get_strip_length (int);
123 static bool arm_function_ok_for_sibcall (tree
, tree
);
124 static void arm_internal_label (FILE *, const char *, unsigned long);
125 static void arm_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
, HOST_WIDE_INT
,
127 static int arm_rtx_costs_1 (rtx
, enum rtx_code
, enum rtx_code
);
128 static bool arm_size_rtx_costs (rtx
, int, int, int *);
129 static bool arm_slowmul_rtx_costs (rtx
, int, int, int *);
130 static bool arm_fastmul_rtx_costs (rtx
, int, int, int *);
131 static bool arm_xscale_rtx_costs (rtx
, int, int, int *);
132 static bool arm_9e_rtx_costs (rtx
, int, int, int *);
133 static int arm_address_cost (rtx
);
134 static bool arm_memory_load_p (rtx
);
135 static bool arm_cirrus_insn_p (rtx
);
136 static void cirrus_reorg (rtx
);
137 static void arm_init_builtins (void);
138 static rtx
arm_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
139 static void arm_init_iwmmxt_builtins (void);
140 static rtx
safe_vector_operand (rtx
, enum machine_mode
);
141 static rtx
arm_expand_binop_builtin (enum insn_code
, tree
, rtx
);
142 static rtx
arm_expand_unop_builtin (enum insn_code
, tree
, rtx
, int);
143 static rtx
arm_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
144 static void emit_constant_insn (rtx cond
, rtx pattern
);
145 static int arm_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
148 #ifdef OBJECT_FORMAT_ELF
149 static void arm_elf_asm_constructor (rtx
, int);
152 static void arm_encode_section_info (tree
, rtx
, int);
155 static void arm_file_end (void);
158 static void aof_globalize_label (FILE *, const char *);
159 static void aof_dump_imports (FILE *);
160 static void aof_dump_pic_table (FILE *);
161 static void aof_file_start (void);
162 static void aof_file_end (void);
164 static rtx
arm_struct_value_rtx (tree
, int);
165 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS
*, enum machine_mode
,
167 static bool arm_pass_by_reference (CUMULATIVE_ARGS
*,
168 enum machine_mode
, tree
, bool);
169 static bool arm_promote_prototypes (tree
);
170 static bool arm_default_short_enums (void);
171 static bool arm_align_anon_bitfield (void);
172 static bool arm_return_in_msb (tree
);
173 static bool arm_must_pass_in_stack (enum machine_mode
, tree
);
174 #ifdef TARGET_UNWIND_INFO
175 static void arm_unwind_emit (FILE *, rtx
);
176 static bool arm_output_ttype (rtx
);
179 static tree
arm_cxx_guard_type (void);
180 static bool arm_cxx_guard_mask_bit (void);
181 static tree
arm_get_cookie_size (tree
);
182 static bool arm_cookie_has_size (void);
183 static bool arm_cxx_cdtor_returns_this (void);
184 static bool arm_cxx_key_method_may_be_inline (void);
185 static void arm_cxx_determine_class_data_visibility (tree
);
186 static bool arm_cxx_class_data_always_comdat (void);
187 static bool arm_cxx_use_aeabi_atexit (void);
188 static void arm_init_libfuncs (void);
189 static bool arm_handle_option (size_t, const char *, int);
190 static unsigned HOST_WIDE_INT
arm_shift_truncation_mask (enum machine_mode
);
191 static bool arm_cannot_copy_insn_p (rtx
);
192 static bool arm_tls_symbol_p (rtx x
);
195 /* Initialize the GCC target structure. */
196 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
197 #undef TARGET_MERGE_DECL_ATTRIBUTES
198 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
201 #undef TARGET_ATTRIBUTE_TABLE
202 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
204 #undef TARGET_ASM_FILE_END
205 #define TARGET_ASM_FILE_END arm_file_end
208 #undef TARGET_ASM_BYTE_OP
209 #define TARGET_ASM_BYTE_OP "\tDCB\t"
210 #undef TARGET_ASM_ALIGNED_HI_OP
211 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
212 #undef TARGET_ASM_ALIGNED_SI_OP
213 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
214 #undef TARGET_ASM_GLOBALIZE_LABEL
215 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
216 #undef TARGET_ASM_FILE_START
217 #define TARGET_ASM_FILE_START aof_file_start
218 #undef TARGET_ASM_FILE_END
219 #define TARGET_ASM_FILE_END aof_file_end
221 #undef TARGET_ASM_ALIGNED_SI_OP
222 #define TARGET_ASM_ALIGNED_SI_OP NULL
223 #undef TARGET_ASM_INTEGER
224 #define TARGET_ASM_INTEGER arm_assemble_integer
227 #undef TARGET_ASM_FUNCTION_PROLOGUE
228 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
230 #undef TARGET_ASM_FUNCTION_EPILOGUE
231 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
233 #undef TARGET_DEFAULT_TARGET_FLAGS
234 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
235 #undef TARGET_HANDLE_OPTION
236 #define TARGET_HANDLE_OPTION arm_handle_option
238 #undef TARGET_COMP_TYPE_ATTRIBUTES
239 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
241 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
242 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
244 #undef TARGET_SCHED_ADJUST_COST
245 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
247 #undef TARGET_ENCODE_SECTION_INFO
249 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
251 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
254 #undef TARGET_STRIP_NAME_ENCODING
255 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
257 #undef TARGET_ASM_INTERNAL_LABEL
258 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
260 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
261 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
263 #undef TARGET_ASM_OUTPUT_MI_THUNK
264 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
265 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
266 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
268 /* This will be overridden in arm_override_options. */
269 #undef TARGET_RTX_COSTS
270 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
271 #undef TARGET_ADDRESS_COST
272 #define TARGET_ADDRESS_COST arm_address_cost
274 #undef TARGET_SHIFT_TRUNCATION_MASK
275 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
276 #undef TARGET_VECTOR_MODE_SUPPORTED_P
277 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
279 #undef TARGET_MACHINE_DEPENDENT_REORG
280 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
282 #undef TARGET_INIT_BUILTINS
283 #define TARGET_INIT_BUILTINS arm_init_builtins
284 #undef TARGET_EXPAND_BUILTIN
285 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
287 #undef TARGET_INIT_LIBFUNCS
288 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
290 #undef TARGET_PROMOTE_FUNCTION_ARGS
291 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
292 #undef TARGET_PROMOTE_FUNCTION_RETURN
293 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
294 #undef TARGET_PROMOTE_PROTOTYPES
295 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
296 #undef TARGET_PASS_BY_REFERENCE
297 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
298 #undef TARGET_ARG_PARTIAL_BYTES
299 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
301 #undef TARGET_STRUCT_VALUE_RTX
302 #define TARGET_STRUCT_VALUE_RTX arm_struct_value_rtx
304 #undef TARGET_SETUP_INCOMING_VARARGS
305 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
307 #undef TARGET_DEFAULT_SHORT_ENUMS
308 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
310 #undef TARGET_ALIGN_ANON_BITFIELD
311 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
313 #undef TARGET_CXX_GUARD_TYPE
314 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
316 #undef TARGET_CXX_GUARD_MASK_BIT
317 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
319 #undef TARGET_CXX_GET_COOKIE_SIZE
320 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
322 #undef TARGET_CXX_COOKIE_HAS_SIZE
323 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
325 #undef TARGET_CXX_CDTOR_RETURNS_THIS
326 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
328 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
329 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
331 #undef TARGET_CXX_USE_AEABI_ATEXIT
332 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
334 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
335 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
336 arm_cxx_determine_class_data_visibility
338 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
339 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
341 #undef TARGET_RETURN_IN_MSB
342 #define TARGET_RETURN_IN_MSB arm_return_in_msb
344 #undef TARGET_MUST_PASS_IN_STACK
345 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
347 #ifdef TARGET_UNWIND_INFO
348 #undef TARGET_UNWIND_EMIT
349 #define TARGET_UNWIND_EMIT arm_unwind_emit
351 /* EABI unwinding tables use a different format for the typeinfo tables. */
352 #undef TARGET_ASM_TTYPE
353 #define TARGET_ASM_TTYPE arm_output_ttype
355 #undef TARGET_ARM_EABI_UNWINDER
356 #define TARGET_ARM_EABI_UNWINDER true
357 #endif /* TARGET_UNWIND_INFO */
359 #undef TARGET_CANNOT_COPY_INSN_P
360 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
363 #undef TARGET_HAVE_TLS
364 #define TARGET_HAVE_TLS true
367 #undef TARGET_CANNOT_FORCE_CONST_MEM
368 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
370 struct gcc_target targetm
= TARGET_INITIALIZER
;
372 /* Obstack for minipool constant handling. */
373 static struct obstack minipool_obstack
;
374 static char * minipool_startobj
;
376 /* The maximum number of insns skipped which
377 will be conditionalised if possible. */
378 static int max_insns_skipped
= 5;
380 extern FILE * asm_out_file
;
382 /* True if we are currently building a constant table. */
383 int making_const_table
;
385 /* Define the information needed to generate branch insns. This is
386 stored from the compare operation. */
387 rtx arm_compare_op0
, arm_compare_op1
;
389 /* The processor for which instructions should be scheduled. */
390 enum processor_type arm_tune
= arm_none
;
392 /* Which floating point model to use. */
393 enum arm_fp_model arm_fp_model
;
395 /* Which floating point hardware is available. */
396 enum fputype arm_fpu_arch
;
398 /* Which floating point hardware to schedule for. */
399 enum fputype arm_fpu_tune
;
401 /* Whether to use floating point hardware. */
402 enum float_abi_type arm_float_abi
;
404 /* Which ABI to use. */
405 enum arm_abi_type arm_abi
;
407 /* Which thread pointer model to use. */
408 enum arm_tp_type target_thread_pointer
= TP_AUTO
;
410 /* Used to parse -mstructure_size_boundary command line option. */
411 int arm_structure_size_boundary
= DEFAULT_STRUCTURE_SIZE_BOUNDARY
;
413 /* Used for Thumb call_via trampolines. */
414 rtx thumb_call_via_label
[14];
415 static int thumb_call_reg_needed
;
417 /* Bit values used to identify processor capabilities. */
418 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
419 #define FL_ARCH3M (1 << 1) /* Extended multiply */
420 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
421 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
422 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
423 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
424 #define FL_THUMB (1 << 6) /* Thumb aware */
425 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
426 #define FL_STRONG (1 << 8) /* StrongARM */
427 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
428 #define FL_XSCALE (1 << 10) /* XScale */
429 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
430 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
431 media instructions. */
432 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
433 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
434 Note: ARM6 & 7 derivatives only. */
435 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
437 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
439 #define FL_FOR_ARCH2 0
440 #define FL_FOR_ARCH3 FL_MODE32
441 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
442 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
443 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
444 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
445 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
446 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
447 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
448 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
449 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
450 #define FL_FOR_ARCH6J FL_FOR_ARCH6
451 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
452 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
453 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
455 /* The bits in this mask specify which
456 instructions we are allowed to generate. */
457 static unsigned long insn_flags
= 0;
459 /* The bits in this mask specify which instruction scheduling options should
461 static unsigned long tune_flags
= 0;
463 /* The following are used in the arm.md file as equivalents to bits
464 in the above two flag variables. */
466 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
469 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
472 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
475 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
478 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
481 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
484 /* Nonzero if this chip supports the ARM 6K extensions. */
487 /* Nonzero if this chip can benefit from load scheduling. */
488 int arm_ld_sched
= 0;
490 /* Nonzero if this chip is a StrongARM. */
491 int arm_tune_strongarm
= 0;
493 /* Nonzero if this chip is a Cirrus variant. */
494 int arm_arch_cirrus
= 0;
496 /* Nonzero if this chip supports Intel Wireless MMX technology. */
497 int arm_arch_iwmmxt
= 0;
499 /* Nonzero if this chip is an XScale. */
500 int arm_arch_xscale
= 0;
502 /* Nonzero if tuning for XScale */
503 int arm_tune_xscale
= 0;
505 /* Nonzero if we want to tune for stores that access the write-buffer.
506 This typically means an ARM6 or ARM7 with MMU or MPU. */
507 int arm_tune_wbuf
= 0;
509 /* Nonzero if generating Thumb instructions. */
512 /* Nonzero if we should define __THUMB_INTERWORK__ in the
514 XXX This is a bit of a hack, it's intended to help work around
515 problems in GLD which doesn't understand that armv5t code is
516 interworking clean. */
517 int arm_cpp_interwork
= 0;
519 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
520 must report the mode of the memory reference from PRINT_OPERAND to
521 PRINT_OPERAND_ADDRESS. */
522 enum machine_mode output_memory_reference_mode
;
524 /* The register number to be used for the PIC offset register. */
525 int arm_pic_register
= INVALID_REGNUM
;
527 /* Set to 1 when a return insn is output, this means that the epilogue
529 int return_used_this_function
;
531 /* Set to 1 after arm_reorg has started. Reset to start at the start of
532 the next function. */
533 static int after_arm_reorg
= 0;
535 /* The maximum number of insns to be used when loading a constant. */
536 static int arm_constant_limit
= 3;
538 /* For an explanation of these variables, see final_prescan_insn below. */
540 enum arm_cond_code arm_current_cc
;
542 int arm_target_label
;
544 /* The condition codes of the ARM, and the inverse function. */
545 static const char * const arm_condition_codes
[] =
547 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
548 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
551 #define streq(string1, string2) (strcmp (string1, string2) == 0)
553 /* Initialization code. */
557 const char *const name
;
558 enum processor_type core
;
560 const unsigned long flags
;
561 bool (* rtx_costs
) (rtx
, int, int, int *);
564 /* Not all of these give usefully different compilation alternatives,
565 but there is no simple way of generalizing them. */
566 static const struct processors all_cores
[] =
569 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
570 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
571 #include "arm-cores.def"
573 {NULL
, arm_none
, NULL
, 0, NULL
}
576 static const struct processors all_architectures
[] =
578 /* ARM Architectures */
579 /* We don't specify rtx_costs here as it will be figured out
582 {"armv2", arm2
, "2", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH2
, NULL
},
583 {"armv2a", arm2
, "2", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH2
, NULL
},
584 {"armv3", arm6
, "3", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH3
, NULL
},
585 {"armv3m", arm7m
, "3M", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH3M
, NULL
},
586 {"armv4", arm7tdmi
, "4", FL_CO_PROC
| FL_MODE26
| FL_FOR_ARCH4
, NULL
},
587 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
588 implementations that support it, so we will leave it out for now. */
589 {"armv4t", arm7tdmi
, "4T", FL_CO_PROC
| FL_FOR_ARCH4T
, NULL
},
590 {"armv5", arm10tdmi
, "5", FL_CO_PROC
| FL_FOR_ARCH5
, NULL
},
591 {"armv5t", arm10tdmi
, "5T", FL_CO_PROC
| FL_FOR_ARCH5T
, NULL
},
592 {"armv5e", arm1026ejs
, "5E", FL_CO_PROC
| FL_FOR_ARCH5E
, NULL
},
593 {"armv5te", arm1026ejs
, "5TE", FL_CO_PROC
| FL_FOR_ARCH5TE
, NULL
},
594 {"armv6", arm1136js
, "6", FL_CO_PROC
| FL_FOR_ARCH6
, NULL
},
595 {"armv6j", arm1136js
, "6J", FL_CO_PROC
| FL_FOR_ARCH6J
, NULL
},
596 {"armv6k", mpcore
, "6K", FL_CO_PROC
| FL_FOR_ARCH6K
, NULL
},
597 {"armv6z", arm1176jzs
, "6Z", FL_CO_PROC
| FL_FOR_ARCH6Z
, NULL
},
598 {"armv6zk", arm1176jzs
, "6ZK", FL_CO_PROC
| FL_FOR_ARCH6ZK
, NULL
},
599 {"ep9312", ep9312
, "4T", FL_LDSCHED
| FL_CIRRUS
| FL_FOR_ARCH4
, NULL
},
600 {"iwmmxt", iwmmxt
, "5TE", FL_LDSCHED
| FL_STRONG
| FL_FOR_ARCH5TE
| FL_XSCALE
| FL_IWMMXT
, NULL
},
601 {NULL
, arm_none
, NULL
, 0 , NULL
}
604 struct arm_cpu_select
608 const struct processors
* processors
;
611 /* This is a magic structure. The 'string' field is magically filled in
612 with a pointer to the value specified by the user on the command line
613 assuming that the user has specified such a value. */
615 static struct arm_cpu_select arm_select
[] =
617 /* string name processors */
618 { NULL
, "-mcpu=", all_cores
},
619 { NULL
, "-march=", all_architectures
},
620 { NULL
, "-mtune=", all_cores
}
623 /* Defines representing the indexes into the above table. */
624 #define ARM_OPT_SET_CPU 0
625 #define ARM_OPT_SET_ARCH 1
626 #define ARM_OPT_SET_TUNE 2
628 /* The name of the proprocessor macro to define for this architecture. */
630 char arm_arch_name
[] = "__ARM_ARCH_0UNK__";
639 /* Available values for for -mfpu=. */
641 static const struct fpu_desc all_fpus
[] =
643 {"fpa", FPUTYPE_FPA
},
644 {"fpe2", FPUTYPE_FPA_EMU2
},
645 {"fpe3", FPUTYPE_FPA_EMU2
},
646 {"maverick", FPUTYPE_MAVERICK
},
651 /* Floating point models used by the different hardware.
652 See fputype in arm.h. */
654 static const enum fputype fp_model_for_fpu
[] =
656 /* No FP hardware. */
657 ARM_FP_MODEL_UNKNOWN
, /* FPUTYPE_NONE */
658 ARM_FP_MODEL_FPA
, /* FPUTYPE_FPA */
659 ARM_FP_MODEL_FPA
, /* FPUTYPE_FPA_EMU2 */
660 ARM_FP_MODEL_FPA
, /* FPUTYPE_FPA_EMU3 */
661 ARM_FP_MODEL_MAVERICK
, /* FPUTYPE_MAVERICK */
662 ARM_FP_MODEL_VFP
/* FPUTYPE_VFP */
669 enum float_abi_type abi_type
;
673 /* Available values for -mfloat-abi=. */
675 static const struct float_abi all_float_abis
[] =
677 {"soft", ARM_FLOAT_ABI_SOFT
},
678 {"softfp", ARM_FLOAT_ABI_SOFTFP
},
679 {"hard", ARM_FLOAT_ABI_HARD
}
686 enum arm_abi_type abi_type
;
690 /* Available values for -mabi=. */
692 static const struct abi_name arm_all_abis
[] =
694 {"apcs-gnu", ARM_ABI_APCS
},
695 {"atpcs", ARM_ABI_ATPCS
},
696 {"aapcs", ARM_ABI_AAPCS
},
697 {"iwmmxt", ARM_ABI_IWMMXT
},
698 {"aapcs-linux", ARM_ABI_AAPCS_LINUX
}
701 /* Supported TLS relocations. */
711 /* Return the number of bits set in VALUE. */
713 bit_count (unsigned long value
)
715 unsigned long count
= 0;
720 value
&= value
- 1; /* Clear the least-significant set bit. */
726 /* Set up library functions unique to ARM. */
729 arm_init_libfuncs (void)
731 /* There are no special library functions unless we are using the
736 /* The functions below are described in Section 4 of the "Run-Time
737 ABI for the ARM architecture", Version 1.0. */
739 /* Double-precision floating-point arithmetic. Table 2. */
740 set_optab_libfunc (add_optab
, DFmode
, "__aeabi_dadd");
741 set_optab_libfunc (sdiv_optab
, DFmode
, "__aeabi_ddiv");
742 set_optab_libfunc (smul_optab
, DFmode
, "__aeabi_dmul");
743 set_optab_libfunc (neg_optab
, DFmode
, "__aeabi_dneg");
744 set_optab_libfunc (sub_optab
, DFmode
, "__aeabi_dsub");
746 /* Double-precision comparisons. Table 3. */
747 set_optab_libfunc (eq_optab
, DFmode
, "__aeabi_dcmpeq");
748 set_optab_libfunc (ne_optab
, DFmode
, NULL
);
749 set_optab_libfunc (lt_optab
, DFmode
, "__aeabi_dcmplt");
750 set_optab_libfunc (le_optab
, DFmode
, "__aeabi_dcmple");
751 set_optab_libfunc (ge_optab
, DFmode
, "__aeabi_dcmpge");
752 set_optab_libfunc (gt_optab
, DFmode
, "__aeabi_dcmpgt");
753 set_optab_libfunc (unord_optab
, DFmode
, "__aeabi_dcmpun");
755 /* Single-precision floating-point arithmetic. Table 4. */
756 set_optab_libfunc (add_optab
, SFmode
, "__aeabi_fadd");
757 set_optab_libfunc (sdiv_optab
, SFmode
, "__aeabi_fdiv");
758 set_optab_libfunc (smul_optab
, SFmode
, "__aeabi_fmul");
759 set_optab_libfunc (neg_optab
, SFmode
, "__aeabi_fneg");
760 set_optab_libfunc (sub_optab
, SFmode
, "__aeabi_fsub");
762 /* Single-precision comparisons. Table 5. */
763 set_optab_libfunc (eq_optab
, SFmode
, "__aeabi_fcmpeq");
764 set_optab_libfunc (ne_optab
, SFmode
, NULL
);
765 set_optab_libfunc (lt_optab
, SFmode
, "__aeabi_fcmplt");
766 set_optab_libfunc (le_optab
, SFmode
, "__aeabi_fcmple");
767 set_optab_libfunc (ge_optab
, SFmode
, "__aeabi_fcmpge");
768 set_optab_libfunc (gt_optab
, SFmode
, "__aeabi_fcmpgt");
769 set_optab_libfunc (unord_optab
, SFmode
, "__aeabi_fcmpun");
771 /* Floating-point to integer conversions. Table 6. */
772 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__aeabi_d2iz");
773 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, "__aeabi_d2uiz");
774 set_conv_libfunc (sfix_optab
, DImode
, DFmode
, "__aeabi_d2lz");
775 set_conv_libfunc (ufix_optab
, DImode
, DFmode
, "__aeabi_d2ulz");
776 set_conv_libfunc (sfix_optab
, SImode
, SFmode
, "__aeabi_f2iz");
777 set_conv_libfunc (ufix_optab
, SImode
, SFmode
, "__aeabi_f2uiz");
778 set_conv_libfunc (sfix_optab
, DImode
, SFmode
, "__aeabi_f2lz");
779 set_conv_libfunc (ufix_optab
, DImode
, SFmode
, "__aeabi_f2ulz");
781 /* Conversions between floating types. Table 7. */
782 set_conv_libfunc (trunc_optab
, SFmode
, DFmode
, "__aeabi_d2f");
783 set_conv_libfunc (sext_optab
, DFmode
, SFmode
, "__aeabi_f2d");
785 /* Integer to floating-point conversions. Table 8. */
786 set_conv_libfunc (sfloat_optab
, DFmode
, SImode
, "__aeabi_i2d");
787 set_conv_libfunc (ufloat_optab
, DFmode
, SImode
, "__aeabi_ui2d");
788 set_conv_libfunc (sfloat_optab
, DFmode
, DImode
, "__aeabi_l2d");
789 set_conv_libfunc (ufloat_optab
, DFmode
, DImode
, "__aeabi_ul2d");
790 set_conv_libfunc (sfloat_optab
, SFmode
, SImode
, "__aeabi_i2f");
791 set_conv_libfunc (ufloat_optab
, SFmode
, SImode
, "__aeabi_ui2f");
792 set_conv_libfunc (sfloat_optab
, SFmode
, DImode
, "__aeabi_l2f");
793 set_conv_libfunc (ufloat_optab
, SFmode
, DImode
, "__aeabi_ul2f");
795 /* Long long. Table 9. */
796 set_optab_libfunc (smul_optab
, DImode
, "__aeabi_lmul");
797 set_optab_libfunc (sdivmod_optab
, DImode
, "__aeabi_ldivmod");
798 set_optab_libfunc (udivmod_optab
, DImode
, "__aeabi_uldivmod");
799 set_optab_libfunc (ashl_optab
, DImode
, "__aeabi_llsl");
800 set_optab_libfunc (lshr_optab
, DImode
, "__aeabi_llsr");
801 set_optab_libfunc (ashr_optab
, DImode
, "__aeabi_lasr");
802 set_optab_libfunc (cmp_optab
, DImode
, "__aeabi_lcmp");
803 set_optab_libfunc (ucmp_optab
, DImode
, "__aeabi_ulcmp");
805 /* Integer (32/32->32) division. \S 4.3.1. */
806 set_optab_libfunc (sdivmod_optab
, SImode
, "__aeabi_idivmod");
807 set_optab_libfunc (udivmod_optab
, SImode
, "__aeabi_uidivmod");
809 /* The divmod functions are designed so that they can be used for
810 plain division, even though they return both the quotient and the
811 remainder. The quotient is returned in the usual location (i.e.,
812 r0 for SImode, {r0, r1} for DImode), just as would be expected
813 for an ordinary division routine. Because the AAPCS calling
814 conventions specify that all of { r0, r1, r2, r3 } are
815 callee-saved registers, there is no need to tell the compiler
816 explicitly that those registers are clobbered by these
818 set_optab_libfunc (sdiv_optab
, DImode
, "__aeabi_ldivmod");
819 set_optab_libfunc (udiv_optab
, DImode
, "__aeabi_uldivmod");
821 /* For SImode division the ABI provides div-without-mod routines,
823 set_optab_libfunc (sdiv_optab
, SImode
, "__aeabi_idiv");
824 set_optab_libfunc (udiv_optab
, SImode
, "__aeabi_uidiv");
826 /* We don't have mod libcalls. Fortunately gcc knows how to use the
827 divmod libcalls instead. */
828 set_optab_libfunc (smod_optab
, DImode
, NULL
);
829 set_optab_libfunc (umod_optab
, DImode
, NULL
);
830 set_optab_libfunc (smod_optab
, SImode
, NULL
);
831 set_optab_libfunc (umod_optab
, SImode
, NULL
);
834 /* Implement TARGET_HANDLE_OPTION. */
837 arm_handle_option (size_t code
, const char *arg
, int value ATTRIBUTE_UNUSED
)
842 arm_select
[1].string
= arg
;
846 arm_select
[0].string
= arg
;
849 case OPT_mhard_float
:
850 target_float_abi_name
= "hard";
853 case OPT_msoft_float
:
854 target_float_abi_name
= "soft";
858 arm_select
[2].string
= arg
;
866 /* Fix up any incompatible options that the user has specified.
867 This has now turned into a maze. */
869 arm_override_options (void)
872 enum processor_type target_arch_cpu
= arm_none
;
874 /* Set up the flags based on the cpu/architecture selected by the user. */
875 for (i
= ARRAY_SIZE (arm_select
); i
--;)
877 struct arm_cpu_select
* ptr
= arm_select
+ i
;
879 if (ptr
->string
!= NULL
&& ptr
->string
[0] != '\0')
881 const struct processors
* sel
;
883 for (sel
= ptr
->processors
; sel
->name
!= NULL
; sel
++)
884 if (streq (ptr
->string
, sel
->name
))
886 /* Set the architecture define. */
887 if (i
!= ARM_OPT_SET_TUNE
)
888 sprintf (arm_arch_name
, "__ARM_ARCH_%s__", sel
->arch
);
890 /* Determine the processor core for which we should
891 tune code-generation. */
892 if (/* -mcpu= is a sensible default. */
894 /* -mtune= overrides -mcpu= and -march=. */
895 || i
== ARM_OPT_SET_TUNE
)
896 arm_tune
= (enum processor_type
) (sel
- ptr
->processors
);
898 /* Remember the CPU associated with this architecture.
899 If no other option is used to set the CPU type,
900 we'll use this to guess the most suitable tuning
902 if (i
== ARM_OPT_SET_ARCH
)
903 target_arch_cpu
= sel
->core
;
905 if (i
!= ARM_OPT_SET_TUNE
)
907 /* If we have been given an architecture and a processor
908 make sure that they are compatible. We only generate
909 a warning though, and we prefer the CPU over the
911 if (insn_flags
!= 0 && (insn_flags
^ sel
->flags
))
912 warning (0, "switch -mcpu=%s conflicts with -march= switch",
915 insn_flags
= sel
->flags
;
921 if (sel
->name
== NULL
)
922 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
926 /* Guess the tuning options from the architecture if necessary. */
927 if (arm_tune
== arm_none
)
928 arm_tune
= target_arch_cpu
;
930 /* If the user did not specify a processor, choose one for them. */
933 const struct processors
* sel
;
935 enum processor_type cpu
;
937 cpu
= TARGET_CPU_DEFAULT
;
940 #ifdef SUBTARGET_CPU_DEFAULT
941 /* Use the subtarget default CPU if none was specified by
943 cpu
= SUBTARGET_CPU_DEFAULT
;
945 /* Default to ARM6. */
949 sel
= &all_cores
[cpu
];
951 insn_flags
= sel
->flags
;
953 /* Now check to see if the user has specified some command line
954 switch that require certain abilities from the cpu. */
957 if (TARGET_INTERWORK
|| TARGET_THUMB
)
959 sought
|= (FL_THUMB
| FL_MODE32
);
961 /* There are no ARM processors that support both APCS-26 and
962 interworking. Therefore we force FL_MODE26 to be removed
963 from insn_flags here (if it was set), so that the search
964 below will always be able to find a compatible processor. */
965 insn_flags
&= ~FL_MODE26
;
968 if (sought
!= 0 && ((sought
& insn_flags
) != sought
))
970 /* Try to locate a CPU type that supports all of the abilities
971 of the default CPU, plus the extra abilities requested by
973 for (sel
= all_cores
; sel
->name
!= NULL
; sel
++)
974 if ((sel
->flags
& sought
) == (sought
| insn_flags
))
977 if (sel
->name
== NULL
)
979 unsigned current_bit_count
= 0;
980 const struct processors
* best_fit
= NULL
;
982 /* Ideally we would like to issue an error message here
983 saying that it was not possible to find a CPU compatible
984 with the default CPU, but which also supports the command
985 line options specified by the programmer, and so they
986 ought to use the -mcpu=<name> command line option to
987 override the default CPU type.
989 If we cannot find a cpu that has both the
990 characteristics of the default cpu and the given
991 command line options we scan the array again looking
993 for (sel
= all_cores
; sel
->name
!= NULL
; sel
++)
994 if ((sel
->flags
& sought
) == sought
)
998 count
= bit_count (sel
->flags
& insn_flags
);
1000 if (count
>= current_bit_count
)
1003 current_bit_count
= count
;
1007 gcc_assert (best_fit
);
1011 insn_flags
= sel
->flags
;
1013 sprintf (arm_arch_name
, "__ARM_ARCH_%s__", sel
->arch
);
1014 if (arm_tune
== arm_none
)
1015 arm_tune
= (enum processor_type
) (sel
- all_cores
);
1018 /* The processor for which we should tune should now have been
1020 gcc_assert (arm_tune
!= arm_none
);
1022 tune_flags
= all_cores
[(int)arm_tune
].flags
;
1024 targetm
.rtx_costs
= arm_size_rtx_costs
;
1026 targetm
.rtx_costs
= all_cores
[(int)arm_tune
].rtx_costs
;
1028 /* Make sure that the processor choice does not conflict with any of the
1029 other command line choices. */
1030 if (TARGET_INTERWORK
&& !(insn_flags
& FL_THUMB
))
1032 warning (0, "target CPU does not support interworking" );
1033 target_flags
&= ~MASK_INTERWORK
;
1036 if (TARGET_THUMB
&& !(insn_flags
& FL_THUMB
))
1038 warning (0, "target CPU does not support THUMB instructions");
1039 target_flags
&= ~MASK_THUMB
;
1042 if (TARGET_APCS_FRAME
&& TARGET_THUMB
)
1044 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1045 target_flags
&= ~MASK_APCS_FRAME
;
1048 /* Callee super interworking implies thumb interworking. Adding
1049 this to the flags here simplifies the logic elsewhere. */
1050 if (TARGET_THUMB
&& TARGET_CALLEE_INTERWORKING
)
1051 target_flags
|= MASK_INTERWORK
;
1053 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1054 from here where no function is being compiled currently. */
1055 if ((TARGET_TPCS_FRAME
|| TARGET_TPCS_LEAF_FRAME
) && TARGET_ARM
)
1056 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1058 if (TARGET_ARM
&& TARGET_CALLEE_INTERWORKING
)
1059 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1061 if (TARGET_ARM
&& TARGET_CALLER_INTERWORKING
)
1062 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1064 if (TARGET_APCS_STACK
&& !TARGET_APCS_FRAME
)
1066 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1067 target_flags
|= MASK_APCS_FRAME
;
1070 if (TARGET_POKE_FUNCTION_NAME
)
1071 target_flags
|= MASK_APCS_FRAME
;
1073 if (TARGET_APCS_REENT
&& flag_pic
)
1074 error ("-fpic and -mapcs-reent are incompatible");
1076 if (TARGET_APCS_REENT
)
1077 warning (0, "APCS reentrant code not supported. Ignored");
1079 /* If this target is normally configured to use APCS frames, warn if they
1080 are turned off and debugging is turned on. */
1082 && write_symbols
!= NO_DEBUG
1083 && !TARGET_APCS_FRAME
1084 && (TARGET_DEFAULT
& MASK_APCS_FRAME
))
1085 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1087 /* If stack checking is disabled, we can use r10 as the PIC register,
1088 which keeps r9 available. */
1090 arm_pic_register
= TARGET_APCS_STACK
? 9 : 10;
1092 if (TARGET_APCS_FLOAT
)
1093 warning (0, "passing floating point arguments in fp regs not yet supported");
1095 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1096 arm_arch3m
= (insn_flags
& FL_ARCH3M
) != 0;
1097 arm_arch4
= (insn_flags
& FL_ARCH4
) != 0;
1098 arm_arch4t
= arm_arch4
& ((insn_flags
& FL_THUMB
) != 0);
1099 arm_arch5
= (insn_flags
& FL_ARCH5
) != 0;
1100 arm_arch5e
= (insn_flags
& FL_ARCH5E
) != 0;
1101 arm_arch6
= (insn_flags
& FL_ARCH6
) != 0;
1102 arm_arch6k
= (insn_flags
& FL_ARCH6K
) != 0;
1103 arm_arch_xscale
= (insn_flags
& FL_XSCALE
) != 0;
1104 arm_arch_cirrus
= (insn_flags
& FL_CIRRUS
) != 0;
1106 arm_ld_sched
= (tune_flags
& FL_LDSCHED
) != 0;
1107 arm_tune_strongarm
= (tune_flags
& FL_STRONG
) != 0;
1108 thumb_code
= (TARGET_ARM
== 0);
1109 arm_tune_wbuf
= (tune_flags
& FL_WBUF
) != 0;
1110 arm_tune_xscale
= (tune_flags
& FL_XSCALE
) != 0;
1111 arm_arch_iwmmxt
= (insn_flags
& FL_IWMMXT
) != 0;
1113 /* V5 code we generate is completely interworking capable, so we turn off
1114 TARGET_INTERWORK here to avoid many tests later on. */
1116 /* XXX However, we must pass the right pre-processor defines to CPP
1117 or GLD can get confused. This is a hack. */
1118 if (TARGET_INTERWORK
)
1119 arm_cpp_interwork
= 1;
1122 target_flags
&= ~MASK_INTERWORK
;
1124 if (target_abi_name
)
1126 for (i
= 0; i
< ARRAY_SIZE (arm_all_abis
); i
++)
1128 if (streq (arm_all_abis
[i
].name
, target_abi_name
))
1130 arm_abi
= arm_all_abis
[i
].abi_type
;
1134 if (i
== ARRAY_SIZE (arm_all_abis
))
1135 error ("invalid ABI option: -mabi=%s", target_abi_name
);
1138 arm_abi
= ARM_DEFAULT_ABI
;
1140 if (TARGET_IWMMXT
&& !ARM_DOUBLEWORD_ALIGN
)
1141 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1143 if (TARGET_IWMMXT_ABI
&& !TARGET_IWMMXT
)
1144 error ("iwmmxt abi requires an iwmmxt capable cpu");
1146 arm_fp_model
= ARM_FP_MODEL_UNKNOWN
;
1147 if (target_fpu_name
== NULL
&& target_fpe_name
!= NULL
)
1149 if (streq (target_fpe_name
, "2"))
1150 target_fpu_name
= "fpe2";
1151 else if (streq (target_fpe_name
, "3"))
1152 target_fpu_name
= "fpe3";
1154 error ("invalid floating point emulation option: -mfpe=%s",
1157 if (target_fpu_name
!= NULL
)
1159 /* The user specified a FPU. */
1160 for (i
= 0; i
< ARRAY_SIZE (all_fpus
); i
++)
1162 if (streq (all_fpus
[i
].name
, target_fpu_name
))
1164 arm_fpu_arch
= all_fpus
[i
].fpu
;
1165 arm_fpu_tune
= arm_fpu_arch
;
1166 arm_fp_model
= fp_model_for_fpu
[arm_fpu_arch
];
1170 if (arm_fp_model
== ARM_FP_MODEL_UNKNOWN
)
1171 error ("invalid floating point option: -mfpu=%s", target_fpu_name
);
1175 #ifdef FPUTYPE_DEFAULT
1176 /* Use the default if it is specified for this platform. */
1177 arm_fpu_arch
= FPUTYPE_DEFAULT
;
1178 arm_fpu_tune
= FPUTYPE_DEFAULT
;
1180 /* Pick one based on CPU type. */
1181 /* ??? Some targets assume FPA is the default.
1182 if ((insn_flags & FL_VFP) != 0)
1183 arm_fpu_arch = FPUTYPE_VFP;
1186 if (arm_arch_cirrus
)
1187 arm_fpu_arch
= FPUTYPE_MAVERICK
;
1189 arm_fpu_arch
= FPUTYPE_FPA_EMU2
;
1191 if (tune_flags
& FL_CO_PROC
&& arm_fpu_arch
== FPUTYPE_FPA_EMU2
)
1192 arm_fpu_tune
= FPUTYPE_FPA
;
1194 arm_fpu_tune
= arm_fpu_arch
;
1195 arm_fp_model
= fp_model_for_fpu
[arm_fpu_arch
];
1196 gcc_assert (arm_fp_model
!= ARM_FP_MODEL_UNKNOWN
);
1199 if (target_float_abi_name
!= NULL
)
1201 /* The user specified a FP ABI. */
1202 for (i
= 0; i
< ARRAY_SIZE (all_float_abis
); i
++)
1204 if (streq (all_float_abis
[i
].name
, target_float_abi_name
))
1206 arm_float_abi
= all_float_abis
[i
].abi_type
;
1210 if (i
== ARRAY_SIZE (all_float_abis
))
1211 error ("invalid floating point abi: -mfloat-abi=%s",
1212 target_float_abi_name
);
1215 arm_float_abi
= TARGET_DEFAULT_FLOAT_ABI
;
1217 if (arm_float_abi
== ARM_FLOAT_ABI_HARD
&& TARGET_VFP
)
1218 sorry ("-mfloat-abi=hard and VFP");
1220 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1221 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1222 will ever exist. GCC makes no attempt to support this combination. */
1223 if (TARGET_IWMMXT
&& !TARGET_SOFT_FLOAT
)
1224 sorry ("iWMMXt and hardware floating point");
1226 /* If soft-float is specified then don't use FPU. */
1227 if (TARGET_SOFT_FLOAT
)
1228 arm_fpu_arch
= FPUTYPE_NONE
;
1230 /* For arm2/3 there is no need to do any scheduling if there is only
1231 a floating point emulator, or we are doing software floating-point. */
1232 if ((TARGET_SOFT_FLOAT
1233 || arm_fpu_tune
== FPUTYPE_FPA_EMU2
1234 || arm_fpu_tune
== FPUTYPE_FPA_EMU3
)
1235 && (tune_flags
& FL_MODE32
) == 0)
1236 flag_schedule_insns
= flag_schedule_insns_after_reload
= 0;
1238 if (target_thread_switch
)
1240 if (strcmp (target_thread_switch
, "soft") == 0)
1241 target_thread_pointer
= TP_SOFT
;
1242 else if (strcmp (target_thread_switch
, "auto") == 0)
1243 target_thread_pointer
= TP_AUTO
;
1244 else if (strcmp (target_thread_switch
, "cp15") == 0)
1245 target_thread_pointer
= TP_CP15
;
1247 error ("invalid thread pointer option: -mtp=%s", target_thread_switch
);
1250 /* Use the cp15 method if it is available. */
1251 if (target_thread_pointer
== TP_AUTO
)
1253 if (arm_arch6k
&& !TARGET_THUMB
)
1254 target_thread_pointer
= TP_CP15
;
1256 target_thread_pointer
= TP_SOFT
;
1259 if (TARGET_HARD_TP
&& TARGET_THUMB
)
1260 error ("can not use -mtp=cp15 with -mthumb");
1262 /* Override the default structure alignment for AAPCS ABI. */
1263 if (TARGET_AAPCS_BASED
)
1264 arm_structure_size_boundary
= 8;
1266 if (structure_size_string
!= NULL
)
1268 int size
= strtol (structure_size_string
, NULL
, 0);
1270 if (size
== 8 || size
== 32
1271 || (ARM_DOUBLEWORD_ALIGN
&& size
== 64))
1272 arm_structure_size_boundary
= size
;
1274 warning (0, "structure size boundary can only be set to %s",
1275 ARM_DOUBLEWORD_ALIGN
? "8, 32 or 64": "8 or 32");
1278 if (arm_pic_register_string
!= NULL
)
1280 int pic_register
= decode_reg_name (arm_pic_register_string
);
1283 warning (0, "-mpic-register= is useless without -fpic");
1285 /* Prevent the user from choosing an obviously stupid PIC register. */
1286 else if (pic_register
< 0 || call_used_regs
[pic_register
]
1287 || pic_register
== HARD_FRAME_POINTER_REGNUM
1288 || pic_register
== STACK_POINTER_REGNUM
1289 || pic_register
>= PC_REGNUM
)
1290 error ("unable to use '%s' for PIC register", arm_pic_register_string
);
1292 arm_pic_register
= pic_register
;
1295 if (TARGET_THUMB
&& flag_schedule_insns
)
1297 /* Don't warn since it's on by default in -O2. */
1298 flag_schedule_insns
= 0;
1303 arm_constant_limit
= 1;
1305 /* If optimizing for size, bump the number of instructions that we
1306 are prepared to conditionally execute (even on a StrongARM). */
1307 max_insns_skipped
= 6;
1311 /* For processors with load scheduling, it never costs more than
1312 2 cycles to load a constant, and the load scheduler may well
1313 reduce that to 1. */
1315 arm_constant_limit
= 1;
1317 /* On XScale the longer latency of a load makes it more difficult
1318 to achieve a good schedule, so it's faster to synthesize
1319 constants that can be done in two insns. */
1320 if (arm_tune_xscale
)
1321 arm_constant_limit
= 2;
1323 /* StrongARM has early execution of branches, so a sequence
1324 that is worth skipping is shorter. */
1325 if (arm_tune_strongarm
)
1326 max_insns_skipped
= 3;
1329 /* Register global variables with the garbage collector. */
1330 arm_add_gc_roots ();
1334 arm_add_gc_roots (void)
1336 gcc_obstack_init(&minipool_obstack
);
1337 minipool_startobj
= (char *) obstack_alloc (&minipool_obstack
, 0);
1340 /* A table of known ARM exception types.
1341 For use with the interrupt function attribute. */
1345 const char *const arg
;
1346 const unsigned long return_value
;
1350 static const isr_attribute_arg isr_attribute_args
[] =
1352 { "IRQ", ARM_FT_ISR
},
1353 { "irq", ARM_FT_ISR
},
1354 { "FIQ", ARM_FT_FIQ
},
1355 { "fiq", ARM_FT_FIQ
},
1356 { "ABORT", ARM_FT_ISR
},
1357 { "abort", ARM_FT_ISR
},
1358 { "ABORT", ARM_FT_ISR
},
1359 { "abort", ARM_FT_ISR
},
1360 { "UNDEF", ARM_FT_EXCEPTION
},
1361 { "undef", ARM_FT_EXCEPTION
},
1362 { "SWI", ARM_FT_EXCEPTION
},
1363 { "swi", ARM_FT_EXCEPTION
},
1364 { NULL
, ARM_FT_NORMAL
}
1367 /* Returns the (interrupt) function type of the current
1368 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1370 static unsigned long
1371 arm_isr_value (tree argument
)
1373 const isr_attribute_arg
* ptr
;
1376 /* No argument - default to IRQ. */
1377 if (argument
== NULL_TREE
)
1380 /* Get the value of the argument. */
1381 if (TREE_VALUE (argument
) == NULL_TREE
1382 || TREE_CODE (TREE_VALUE (argument
)) != STRING_CST
)
1383 return ARM_FT_UNKNOWN
;
1385 arg
= TREE_STRING_POINTER (TREE_VALUE (argument
));
1387 /* Check it against the list of known arguments. */
1388 for (ptr
= isr_attribute_args
; ptr
->arg
!= NULL
; ptr
++)
1389 if (streq (arg
, ptr
->arg
))
1390 return ptr
->return_value
;
1392 /* An unrecognized interrupt type. */
1393 return ARM_FT_UNKNOWN
;
1396 /* Computes the type of the current function. */
1398 static unsigned long
1399 arm_compute_func_type (void)
1401 unsigned long type
= ARM_FT_UNKNOWN
;
1405 gcc_assert (TREE_CODE (current_function_decl
) == FUNCTION_DECL
);
1407 /* Decide if the current function is volatile. Such functions
1408 never return, and many memory cycles can be saved by not storing
1409 register values that will never be needed again. This optimization
1410 was added to speed up context switching in a kernel application. */
1412 && (TREE_NOTHROW (current_function_decl
)
1413 || !(flag_unwind_tables
1414 || (flag_exceptions
&& !USING_SJLJ_EXCEPTIONS
)))
1415 && TREE_THIS_VOLATILE (current_function_decl
))
1416 type
|= ARM_FT_VOLATILE
;
1418 if (cfun
->static_chain_decl
!= NULL
)
1419 type
|= ARM_FT_NESTED
;
1421 attr
= DECL_ATTRIBUTES (current_function_decl
);
1423 a
= lookup_attribute ("naked", attr
);
1425 type
|= ARM_FT_NAKED
;
1427 a
= lookup_attribute ("isr", attr
);
1429 a
= lookup_attribute ("interrupt", attr
);
1432 type
|= TARGET_INTERWORK
? ARM_FT_INTERWORKED
: ARM_FT_NORMAL
;
1434 type
|= arm_isr_value (TREE_VALUE (a
));
1439 /* Returns the type of the current function. */
1442 arm_current_func_type (void)
1444 if (ARM_FUNC_TYPE (cfun
->machine
->func_type
) == ARM_FT_UNKNOWN
)
1445 cfun
->machine
->func_type
= arm_compute_func_type ();
1447 return cfun
->machine
->func_type
;
1450 /* Return 1 if it is possible to return using a single instruction.
1451 If SIBLING is non-null, this is a test for a return before a sibling
1452 call. SIBLING is the call insn, so we can examine its register usage. */
1455 use_return_insn (int iscond
, rtx sibling
)
1458 unsigned int func_type
;
1459 unsigned long saved_int_regs
;
1460 unsigned HOST_WIDE_INT stack_adjust
;
1461 arm_stack_offsets
*offsets
;
1463 /* Never use a return instruction before reload has run. */
1464 if (!reload_completed
)
1467 func_type
= arm_current_func_type ();
1469 /* Naked functions and volatile functions need special
1471 if (func_type
& (ARM_FT_VOLATILE
| ARM_FT_NAKED
))
1474 /* So do interrupt functions that use the frame pointer. */
1475 if (IS_INTERRUPT (func_type
) && frame_pointer_needed
)
1478 offsets
= arm_get_frame_offsets ();
1479 stack_adjust
= offsets
->outgoing_args
- offsets
->saved_regs
;
1481 /* As do variadic functions. */
1482 if (current_function_pretend_args_size
1483 || cfun
->machine
->uses_anonymous_args
1484 /* Or if the function calls __builtin_eh_return () */
1485 || current_function_calls_eh_return
1486 /* Or if the function calls alloca */
1487 || current_function_calls_alloca
1488 /* Or if there is a stack adjustment. However, if the stack pointer
1489 is saved on the stack, we can use a pre-incrementing stack load. */
1490 || !(stack_adjust
== 0 || (frame_pointer_needed
&& stack_adjust
== 4)))
1493 saved_int_regs
= arm_compute_save_reg_mask ();
1495 /* Unfortunately, the insn
1497 ldmib sp, {..., sp, ...}
1499 triggers a bug on most SA-110 based devices, such that the stack
1500 pointer won't be correctly restored if the instruction takes a
1501 page fault. We work around this problem by popping r3 along with
1502 the other registers, since that is never slower than executing
1503 another instruction.
1505 We test for !arm_arch5 here, because code for any architecture
1506 less than this could potentially be run on one of the buggy
1508 if (stack_adjust
== 4 && !arm_arch5
)
1510 /* Validate that r3 is a call-clobbered register (always true in
1511 the default abi) ... */
1512 if (!call_used_regs
[3])
1515 /* ... that it isn't being used for a return value ... */
1516 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD
))
1519 /* ... or for a tail-call argument ... */
1522 gcc_assert (GET_CODE (sibling
) == CALL_INSN
);
1524 if (find_regno_fusage (sibling
, USE
, 3))
1528 /* ... and that there are no call-saved registers in r0-r2
1529 (always true in the default ABI). */
1530 if (saved_int_regs
& 0x7)
1534 /* Can't be done if interworking with Thumb, and any registers have been
1536 if (TARGET_INTERWORK
&& saved_int_regs
!= 0)
1539 /* On StrongARM, conditional returns are expensive if they aren't
1540 taken and multiple registers have been stacked. */
1541 if (iscond
&& arm_tune_strongarm
)
1543 /* Conditional return when just the LR is stored is a simple
1544 conditional-load instruction, that's not expensive. */
1545 if (saved_int_regs
!= 0 && saved_int_regs
!= (1 << LR_REGNUM
))
1548 if (flag_pic
&& regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
])
1552 /* If there are saved registers but the LR isn't saved, then we need
1553 two instructions for the return. */
1554 if (saved_int_regs
&& !(saved_int_regs
& (1 << LR_REGNUM
)))
1557 /* Can't be done if any of the FPA regs are pushed,
1558 since this also requires an insn. */
1559 if (TARGET_HARD_FLOAT
&& TARGET_FPA
)
1560 for (regno
= FIRST_FPA_REGNUM
; regno
<= LAST_FPA_REGNUM
; regno
++)
1561 if (regs_ever_live
[regno
] && !call_used_regs
[regno
])
1564 /* Likewise VFP regs. */
1565 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
1566 for (regno
= FIRST_VFP_REGNUM
; regno
<= LAST_VFP_REGNUM
; regno
++)
1567 if (regs_ever_live
[regno
] && !call_used_regs
[regno
])
1570 if (TARGET_REALLY_IWMMXT
)
1571 for (regno
= FIRST_IWMMXT_REGNUM
; regno
<= LAST_IWMMXT_REGNUM
; regno
++)
1572 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
1578 /* Return TRUE if int I is a valid immediate ARM constant. */
1581 const_ok_for_arm (HOST_WIDE_INT i
)
1585 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1586 be all zero, or all one. */
1587 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff) != 0
1588 && ((i
& ~(unsigned HOST_WIDE_INT
) 0xffffffff)
1589 != ((~(unsigned HOST_WIDE_INT
) 0)
1590 & ~(unsigned HOST_WIDE_INT
) 0xffffffff)))
1593 i
&= (unsigned HOST_WIDE_INT
) 0xffffffff;
1595 /* Fast return for 0 and small values. We must do this for zero, since
1596 the code below can't handle that one case. */
1597 if ((i
& ~(unsigned HOST_WIDE_INT
) 0xff) == 0)
1600 /* Get the number of trailing zeros, rounded down to the nearest even
1602 lowbit
= (ffs ((int) i
) - 1) & ~1;
1604 if ((i
& ~(((unsigned HOST_WIDE_INT
) 0xff) << lowbit
)) == 0)
1606 else if (lowbit
<= 4
1607 && ((i
& ~0xc000003f) == 0
1608 || (i
& ~0xf000000f) == 0
1609 || (i
& ~0xfc000003) == 0))
1615 /* Return true if I is a valid constant for the operation CODE. */
1617 const_ok_for_op (HOST_WIDE_INT i
, enum rtx_code code
)
1619 if (const_ok_for_arm (i
))
1625 return const_ok_for_arm (ARM_SIGN_EXTEND (-i
));
1627 case MINUS
: /* Should only occur with (MINUS I reg) => rsb */
1633 return const_ok_for_arm (ARM_SIGN_EXTEND (~i
));
1640 /* Emit a sequence of insns to handle a large constant.
1641 CODE is the code of the operation required, it can be any of SET, PLUS,
1642 IOR, AND, XOR, MINUS;
1643 MODE is the mode in which the operation is being performed;
1644 VAL is the integer to operate on;
1645 SOURCE is the other operand (a register, or a null-pointer for SET);
1646 SUBTARGETS means it is safe to create scratch registers if that will
1647 either produce a simpler sequence, or we will want to cse the values.
1648 Return value is the number of insns emitted. */
1651 arm_split_constant (enum rtx_code code
, enum machine_mode mode
, rtx insn
,
1652 HOST_WIDE_INT val
, rtx target
, rtx source
, int subtargets
)
1656 if (insn
&& GET_CODE (PATTERN (insn
)) == COND_EXEC
)
1657 cond
= COND_EXEC_TEST (PATTERN (insn
));
1661 if (subtargets
|| code
== SET
1662 || (GET_CODE (target
) == REG
&& GET_CODE (source
) == REG
1663 && REGNO (target
) != REGNO (source
)))
1665 /* After arm_reorg has been called, we can't fix up expensive
1666 constants by pushing them into memory so we must synthesize
1667 them in-line, regardless of the cost. This is only likely to
1668 be more costly on chips that have load delay slots and we are
1669 compiling without running the scheduler (so no splitting
1670 occurred before the final instruction emission).
1672 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1674 if (!after_arm_reorg
1676 && (arm_gen_constant (code
, mode
, NULL_RTX
, val
, target
, source
,
1678 > arm_constant_limit
+ (code
!= SET
)))
1682 /* Currently SET is the only monadic value for CODE, all
1683 the rest are diadic. */
1684 emit_insn (gen_rtx_SET (VOIDmode
, target
, GEN_INT (val
)));
1689 rtx temp
= subtargets
? gen_reg_rtx (mode
) : target
;
1691 emit_insn (gen_rtx_SET (VOIDmode
, temp
, GEN_INT (val
)));
1692 /* For MINUS, the value is subtracted from, since we never
1693 have subtraction of a constant. */
1695 emit_insn (gen_rtx_SET (VOIDmode
, target
,
1696 gen_rtx_MINUS (mode
, temp
, source
)));
1698 emit_insn (gen_rtx_SET (VOIDmode
, target
,
1699 gen_rtx_fmt_ee (code
, mode
, source
, temp
)));
1705 return arm_gen_constant (code
, mode
, cond
, val
, target
, source
, subtargets
,
1710 count_insns_for_constant (HOST_WIDE_INT remainder
, int i
)
1712 HOST_WIDE_INT temp1
;
1720 if (remainder
& (3 << (i
- 2)))
1725 temp1
= remainder
& ((0x0ff << end
)
1726 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
1727 remainder
&= ~temp1
;
1732 } while (remainder
);
1736 /* Emit an instruction with the indicated PATTERN. If COND is
1737 non-NULL, conditionalize the execution of the instruction on COND
1741 emit_constant_insn (rtx cond
, rtx pattern
)
1744 pattern
= gen_rtx_COND_EXEC (VOIDmode
, copy_rtx (cond
), pattern
);
1745 emit_insn (pattern
);
1748 /* As above, but extra parameter GENERATE which, if clear, suppresses
1752 arm_gen_constant (enum rtx_code code
, enum machine_mode mode
, rtx cond
,
1753 HOST_WIDE_INT val
, rtx target
, rtx source
, int subtargets
,
1758 int can_negate_initial
= 0;
1761 int num_bits_set
= 0;
1762 int set_sign_bit_copies
= 0;
1763 int clear_sign_bit_copies
= 0;
1764 int clear_zero_bit_copies
= 0;
1765 int set_zero_bit_copies
= 0;
1767 unsigned HOST_WIDE_INT temp1
, temp2
;
1768 unsigned HOST_WIDE_INT remainder
= val
& 0xffffffff;
1770 /* Find out which operations are safe for a given CODE. Also do a quick
1771 check for degenerate cases; these can occur when DImode operations
1783 can_negate_initial
= 1;
1787 if (remainder
== 0xffffffff)
1790 emit_constant_insn (cond
,
1791 gen_rtx_SET (VOIDmode
, target
,
1792 GEN_INT (ARM_SIGN_EXTEND (val
))));
1797 if (reload_completed
&& rtx_equal_p (target
, source
))
1800 emit_constant_insn (cond
,
1801 gen_rtx_SET (VOIDmode
, target
, source
));
1810 emit_constant_insn (cond
,
1811 gen_rtx_SET (VOIDmode
, target
, const0_rtx
));
1814 if (remainder
== 0xffffffff)
1816 if (reload_completed
&& rtx_equal_p (target
, source
))
1819 emit_constant_insn (cond
,
1820 gen_rtx_SET (VOIDmode
, target
, source
));
1829 if (reload_completed
&& rtx_equal_p (target
, source
))
1832 emit_constant_insn (cond
,
1833 gen_rtx_SET (VOIDmode
, target
, source
));
1837 /* We don't know how to handle other cases yet. */
1838 gcc_assert (remainder
== 0xffffffff);
1841 emit_constant_insn (cond
,
1842 gen_rtx_SET (VOIDmode
, target
,
1843 gen_rtx_NOT (mode
, source
)));
1847 /* We treat MINUS as (val - source), since (source - val) is always
1848 passed as (source + (-val)). */
1852 emit_constant_insn (cond
,
1853 gen_rtx_SET (VOIDmode
, target
,
1854 gen_rtx_NEG (mode
, source
)));
1857 if (const_ok_for_arm (val
))
1860 emit_constant_insn (cond
,
1861 gen_rtx_SET (VOIDmode
, target
,
1862 gen_rtx_MINUS (mode
, GEN_INT (val
),
1874 /* If we can do it in one insn get out quickly. */
1875 if (const_ok_for_arm (val
)
1876 || (can_negate_initial
&& const_ok_for_arm (-val
))
1877 || (can_invert
&& const_ok_for_arm (~val
)))
1880 emit_constant_insn (cond
,
1881 gen_rtx_SET (VOIDmode
, target
,
1883 ? gen_rtx_fmt_ee (code
, mode
, source
,
1889 /* Calculate a few attributes that may be useful for specific
1891 for (i
= 31; i
>= 0; i
--)
1893 if ((remainder
& (1 << i
)) == 0)
1894 clear_sign_bit_copies
++;
1899 for (i
= 31; i
>= 0; i
--)
1901 if ((remainder
& (1 << i
)) != 0)
1902 set_sign_bit_copies
++;
1907 for (i
= 0; i
<= 31; i
++)
1909 if ((remainder
& (1 << i
)) == 0)
1910 clear_zero_bit_copies
++;
1915 for (i
= 0; i
<= 31; i
++)
1917 if ((remainder
& (1 << i
)) != 0)
1918 set_zero_bit_copies
++;
1926 /* See if we can do this by sign_extending a constant that is known
1927 to be negative. This is a good, way of doing it, since the shift
1928 may well merge into a subsequent insn. */
1929 if (set_sign_bit_copies
> 1)
1931 if (const_ok_for_arm
1932 (temp1
= ARM_SIGN_EXTEND (remainder
1933 << (set_sign_bit_copies
- 1))))
1937 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1938 emit_constant_insn (cond
,
1939 gen_rtx_SET (VOIDmode
, new_src
,
1941 emit_constant_insn (cond
,
1942 gen_ashrsi3 (target
, new_src
,
1943 GEN_INT (set_sign_bit_copies
- 1)));
1947 /* For an inverted constant, we will need to set the low bits,
1948 these will be shifted out of harm's way. */
1949 temp1
|= (1 << (set_sign_bit_copies
- 1)) - 1;
1950 if (const_ok_for_arm (~temp1
))
1954 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1955 emit_constant_insn (cond
,
1956 gen_rtx_SET (VOIDmode
, new_src
,
1958 emit_constant_insn (cond
,
1959 gen_ashrsi3 (target
, new_src
,
1960 GEN_INT (set_sign_bit_copies
- 1)));
1966 /* See if we can calculate the value as the difference between two
1967 valid immediates. */
1968 if (clear_sign_bit_copies
+ clear_zero_bit_copies
<= 16)
1970 int topshift
= clear_sign_bit_copies
& ~1;
1972 temp1
= ARM_SIGN_EXTEND ((remainder
+ (0x00800000 >> topshift
))
1973 & (0xff000000 >> topshift
));
1975 /* If temp1 is zero, then that means the 9 most significant
1976 bits of remainder were 1 and we've caused it to overflow.
1977 When topshift is 0 we don't need to do anything since we
1978 can borrow from 'bit 32'. */
1979 if (temp1
== 0 && topshift
!= 0)
1980 temp1
= 0x80000000 >> (topshift
- 1);
1982 temp2
= ARM_SIGN_EXTEND (temp1
- remainder
);
1984 if (const_ok_for_arm (temp2
))
1988 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
1989 emit_constant_insn (cond
,
1990 gen_rtx_SET (VOIDmode
, new_src
,
1992 emit_constant_insn (cond
,
1993 gen_addsi3 (target
, new_src
,
2001 /* See if we can generate this by setting the bottom (or the top)
2002 16 bits, and then shifting these into the other half of the
2003 word. We only look for the simplest cases, to do more would cost
2004 too much. Be careful, however, not to generate this when the
2005 alternative would take fewer insns. */
2006 if (val
& 0xffff0000)
2008 temp1
= remainder
& 0xffff0000;
2009 temp2
= remainder
& 0x0000ffff;
2011 /* Overlaps outside this range are best done using other methods. */
2012 for (i
= 9; i
< 24; i
++)
2014 if ((((temp2
| (temp2
<< i
)) & 0xffffffff) == remainder
)
2015 && !const_ok_for_arm (temp2
))
2017 rtx new_src
= (subtargets
2018 ? (generate
? gen_reg_rtx (mode
) : NULL_RTX
)
2020 insns
= arm_gen_constant (code
, mode
, cond
, temp2
, new_src
,
2021 source
, subtargets
, generate
);
2029 gen_rtx_ASHIFT (mode
, source
,
2036 /* Don't duplicate cases already considered. */
2037 for (i
= 17; i
< 24; i
++)
2039 if (((temp1
| (temp1
>> i
)) == remainder
)
2040 && !const_ok_for_arm (temp1
))
2042 rtx new_src
= (subtargets
2043 ? (generate
? gen_reg_rtx (mode
) : NULL_RTX
)
2045 insns
= arm_gen_constant (code
, mode
, cond
, temp1
, new_src
,
2046 source
, subtargets
, generate
);
2051 gen_rtx_SET (VOIDmode
, target
,
2054 gen_rtx_LSHIFTRT (mode
, source
,
2065 /* If we have IOR or XOR, and the constant can be loaded in a
2066 single instruction, and we can find a temporary to put it in,
2067 then this can be done in two instructions instead of 3-4. */
2069 /* TARGET can't be NULL if SUBTARGETS is 0 */
2070 || (reload_completed
&& !reg_mentioned_p (target
, source
)))
2072 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val
)))
2076 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
2078 emit_constant_insn (cond
,
2079 gen_rtx_SET (VOIDmode
, sub
,
2081 emit_constant_insn (cond
,
2082 gen_rtx_SET (VOIDmode
, target
,
2083 gen_rtx_fmt_ee (code
, mode
,
2093 if (set_sign_bit_copies
> 8
2094 && (val
& (-1 << (32 - set_sign_bit_copies
))) == val
)
2098 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
2099 rtx shift
= GEN_INT (set_sign_bit_copies
);
2103 gen_rtx_SET (VOIDmode
, sub
,
2105 gen_rtx_ASHIFT (mode
,
2110 gen_rtx_SET (VOIDmode
, target
,
2112 gen_rtx_LSHIFTRT (mode
, sub
,
2118 if (set_zero_bit_copies
> 8
2119 && (remainder
& ((1 << set_zero_bit_copies
) - 1)) == remainder
)
2123 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
2124 rtx shift
= GEN_INT (set_zero_bit_copies
);
2128 gen_rtx_SET (VOIDmode
, sub
,
2130 gen_rtx_LSHIFTRT (mode
,
2135 gen_rtx_SET (VOIDmode
, target
,
2137 gen_rtx_ASHIFT (mode
, sub
,
2143 if (const_ok_for_arm (temp1
= ARM_SIGN_EXTEND (~val
)))
2147 rtx sub
= subtargets
? gen_reg_rtx (mode
) : target
;
2148 emit_constant_insn (cond
,
2149 gen_rtx_SET (VOIDmode
, sub
,
2150 gen_rtx_NOT (mode
, source
)));
2153 sub
= gen_reg_rtx (mode
);
2154 emit_constant_insn (cond
,
2155 gen_rtx_SET (VOIDmode
, sub
,
2156 gen_rtx_AND (mode
, source
,
2158 emit_constant_insn (cond
,
2159 gen_rtx_SET (VOIDmode
, target
,
2160 gen_rtx_NOT (mode
, sub
)));
2167 /* See if two shifts will do 2 or more insn's worth of work. */
2168 if (clear_sign_bit_copies
>= 16 && clear_sign_bit_copies
< 24)
2170 HOST_WIDE_INT shift_mask
= ((0xffffffff
2171 << (32 - clear_sign_bit_copies
))
2174 if ((remainder
| shift_mask
) != 0xffffffff)
2178 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
2179 insns
= arm_gen_constant (AND
, mode
, cond
,
2180 remainder
| shift_mask
,
2181 new_src
, source
, subtargets
, 1);
2186 rtx targ
= subtargets
? NULL_RTX
: target
;
2187 insns
= arm_gen_constant (AND
, mode
, cond
,
2188 remainder
| shift_mask
,
2189 targ
, source
, subtargets
, 0);
2195 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
2196 rtx shift
= GEN_INT (clear_sign_bit_copies
);
2198 emit_insn (gen_ashlsi3 (new_src
, source
, shift
));
2199 emit_insn (gen_lshrsi3 (target
, new_src
, shift
));
2205 if (clear_zero_bit_copies
>= 16 && clear_zero_bit_copies
< 24)
2207 HOST_WIDE_INT shift_mask
= (1 << clear_zero_bit_copies
) - 1;
2209 if ((remainder
| shift_mask
) != 0xffffffff)
2213 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
2215 insns
= arm_gen_constant (AND
, mode
, cond
,
2216 remainder
| shift_mask
,
2217 new_src
, source
, subtargets
, 1);
2222 rtx targ
= subtargets
? NULL_RTX
: target
;
2224 insns
= arm_gen_constant (AND
, mode
, cond
,
2225 remainder
| shift_mask
,
2226 targ
, source
, subtargets
, 0);
2232 rtx new_src
= subtargets
? gen_reg_rtx (mode
) : target
;
2233 rtx shift
= GEN_INT (clear_zero_bit_copies
);
2235 emit_insn (gen_lshrsi3 (new_src
, source
, shift
));
2236 emit_insn (gen_ashlsi3 (target
, new_src
, shift
));
2248 for (i
= 0; i
< 32; i
++)
2249 if (remainder
& (1 << i
))
2252 if (code
== AND
|| (can_invert
&& num_bits_set
> 16))
2253 remainder
= (~remainder
) & 0xffffffff;
2254 else if (code
== PLUS
&& num_bits_set
> 16)
2255 remainder
= (-remainder
) & 0xffffffff;
2262 /* Now try and find a way of doing the job in either two or three
2264 We start by looking for the largest block of zeros that are aligned on
2265 a 2-bit boundary, we then fill up the temps, wrapping around to the
2266 top of the word when we drop off the bottom.
2267 In the worst case this code should produce no more than four insns. */
2270 int best_consecutive_zeros
= 0;
2272 for (i
= 0; i
< 32; i
+= 2)
2274 int consecutive_zeros
= 0;
2276 if (!(remainder
& (3 << i
)))
2278 while ((i
< 32) && !(remainder
& (3 << i
)))
2280 consecutive_zeros
+= 2;
2283 if (consecutive_zeros
> best_consecutive_zeros
)
2285 best_consecutive_zeros
= consecutive_zeros
;
2286 best_start
= i
- consecutive_zeros
;
2292 /* So long as it won't require any more insns to do so, it's
2293 desirable to emit a small constant (in bits 0...9) in the last
2294 insn. This way there is more chance that it can be combined with
2295 a later addressing insn to form a pre-indexed load or store
2296 operation. Consider:
2298 *((volatile int *)0xe0000100) = 1;
2299 *((volatile int *)0xe0000110) = 2;
2301 We want this to wind up as:
2305 str rB, [rA, #0x100]
2307 str rB, [rA, #0x110]
2309 rather than having to synthesize both large constants from scratch.
2311 Therefore, we calculate how many insns would be required to emit
2312 the constant starting from `best_start', and also starting from
2313 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2314 yield a shorter sequence, we may as well use zero. */
2316 && ((((unsigned HOST_WIDE_INT
) 1) << best_start
) < remainder
)
2317 && (count_insns_for_constant (remainder
, 0) <=
2318 count_insns_for_constant (remainder
, best_start
)))
2321 /* Now start emitting the insns. */
2329 if (remainder
& (3 << (i
- 2)))
2334 temp1
= remainder
& ((0x0ff << end
)
2335 | ((i
< end
) ? (0xff >> (32 - end
)) : 0));
2336 remainder
&= ~temp1
;
2340 rtx new_src
, temp1_rtx
;
2342 if (code
== SET
|| code
== MINUS
)
2344 new_src
= (subtargets
? gen_reg_rtx (mode
) : target
);
2345 if (can_invert
&& code
!= MINUS
)
2350 if (remainder
&& subtargets
)
2351 new_src
= gen_reg_rtx (mode
);
2356 else if (can_negate
)
2360 temp1
= trunc_int_for_mode (temp1
, mode
);
2361 temp1_rtx
= GEN_INT (temp1
);
2365 else if (code
== MINUS
)
2366 temp1_rtx
= gen_rtx_MINUS (mode
, temp1_rtx
, source
);
2368 temp1_rtx
= gen_rtx_fmt_ee (code
, mode
, source
, temp1_rtx
);
2370 emit_constant_insn (cond
,
2371 gen_rtx_SET (VOIDmode
, new_src
,
2381 else if (code
== MINUS
)
2395 /* Canonicalize a comparison so that we are more likely to recognize it.
2396 This can be done for a few constant compares, where we can make the
2397 immediate value easier to load. */
2400 arm_canonicalize_comparison (enum rtx_code code
, enum machine_mode mode
,
2403 unsigned HOST_WIDE_INT i
= INTVAL (*op1
);
2404 unsigned HOST_WIDE_INT maxval
;
2405 maxval
= (((unsigned HOST_WIDE_INT
) 1) << (GET_MODE_BITSIZE(mode
) - 1)) - 1;
2416 && (const_ok_for_arm (i
+ 1) || const_ok_for_arm (-(i
+ 1))))
2418 *op1
= GEN_INT (i
+ 1);
2419 return code
== GT
? GE
: LT
;
2426 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (-(i
- 1))))
2428 *op1
= GEN_INT (i
- 1);
2429 return code
== GE
? GT
: LE
;
2435 if (i
!= ~((unsigned HOST_WIDE_INT
) 0)
2436 && (const_ok_for_arm (i
+ 1) || const_ok_for_arm (-(i
+ 1))))
2438 *op1
= GEN_INT (i
+ 1);
2439 return code
== GTU
? GEU
: LTU
;
2446 && (const_ok_for_arm (i
- 1) || const_ok_for_arm (-(i
- 1))))
2448 *op1
= GEN_INT (i
- 1);
2449 return code
== GEU
? GTU
: LEU
;
2461 /* Define how to find the value returned by a function. */
2464 arm_function_value(tree type
, tree func ATTRIBUTE_UNUSED
)
2466 enum machine_mode mode
;
2467 int unsignedp ATTRIBUTE_UNUSED
;
2468 rtx r ATTRIBUTE_UNUSED
;
2470 mode
= TYPE_MODE (type
);
2471 /* Promote integer types. */
2472 if (INTEGRAL_TYPE_P (type
))
2473 PROMOTE_FUNCTION_MODE (mode
, unsignedp
, type
);
2475 /* Promotes small structs returned in a register to full-word size
2476 for big-endian AAPCS. */
2477 if (arm_return_in_msb (type
))
2479 HOST_WIDE_INT size
= int_size_in_bytes (type
);
2480 if (size
% UNITS_PER_WORD
!= 0)
2482 size
+= UNITS_PER_WORD
- size
% UNITS_PER_WORD
;
2483 mode
= mode_for_size (size
* BITS_PER_UNIT
, MODE_INT
, 0);
2487 return LIBCALL_VALUE(mode
);
2490 /* Determine the amount of memory needed to store the possible return
2491 registers of an untyped call. */
2493 arm_apply_result_size (void)
2499 if (TARGET_HARD_FLOAT_ABI
)
2503 if (TARGET_MAVERICK
)
2506 if (TARGET_IWMMXT_ABI
)
2513 /* Decide whether a type should be returned in memory (true)
2514 or in a register (false). This is called by the macro
2515 RETURN_IN_MEMORY. */
2517 arm_return_in_memory (tree type
)
2521 if (!AGGREGATE_TYPE_P (type
) &&
2522 (TREE_CODE (type
) != VECTOR_TYPE
) &&
2523 !(TARGET_AAPCS_BASED
&& TREE_CODE (type
) == COMPLEX_TYPE
))
2524 /* All simple types are returned in registers.
2525 For AAPCS, complex types are treated the same as aggregates. */
2528 size
= int_size_in_bytes (type
);
2530 if (arm_abi
!= ARM_ABI_APCS
)
2532 /* ATPCS and later return aggregate types in memory only if they are
2533 larger than a word (or are variable size). */
2534 return (size
< 0 || size
> UNITS_PER_WORD
);
2537 /* To maximize backwards compatibility with previous versions of gcc,
2538 return vectors up to 4 words in registers. */
2539 if (TREE_CODE (type
) == VECTOR_TYPE
)
2540 return (size
< 0 || size
> (4 * UNITS_PER_WORD
));
2542 /* For the arm-wince targets we choose to be compatible with Microsoft's
2543 ARM and Thumb compilers, which always return aggregates in memory. */
2545 /* All structures/unions bigger than one word are returned in memory.
2546 Also catch the case where int_size_in_bytes returns -1. In this case
2547 the aggregate is either huge or of variable size, and in either case
2548 we will want to return it via memory and not in a register. */
2549 if (size
< 0 || size
> UNITS_PER_WORD
)
2552 if (TREE_CODE (type
) == RECORD_TYPE
)
2556 /* For a struct the APCS says that we only return in a register
2557 if the type is 'integer like' and every addressable element
2558 has an offset of zero. For practical purposes this means
2559 that the structure can have at most one non bit-field element
2560 and that this element must be the first one in the structure. */
2562 /* Find the first field, ignoring non FIELD_DECL things which will
2563 have been created by C++. */
2564 for (field
= TYPE_FIELDS (type
);
2565 field
&& TREE_CODE (field
) != FIELD_DECL
;
2566 field
= TREE_CHAIN (field
))
2570 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2572 /* Check that the first field is valid for returning in a register. */
2574 /* ... Floats are not allowed */
2575 if (FLOAT_TYPE_P (TREE_TYPE (field
)))
2578 /* ... Aggregates that are not themselves valid for returning in
2579 a register are not allowed. */
2580 if (RETURN_IN_MEMORY (TREE_TYPE (field
)))
2583 /* Now check the remaining fields, if any. Only bitfields are allowed,
2584 since they are not addressable. */
2585 for (field
= TREE_CHAIN (field
);
2587 field
= TREE_CHAIN (field
))
2589 if (TREE_CODE (field
) != FIELD_DECL
)
2592 if (!DECL_BIT_FIELD_TYPE (field
))
2599 if (TREE_CODE (type
) == UNION_TYPE
)
2603 /* Unions can be returned in registers if every element is
2604 integral, or can be returned in an integer register. */
2605 for (field
= TYPE_FIELDS (type
);
2607 field
= TREE_CHAIN (field
))
2609 if (TREE_CODE (field
) != FIELD_DECL
)
2612 if (FLOAT_TYPE_P (TREE_TYPE (field
)))
2615 if (RETURN_IN_MEMORY (TREE_TYPE (field
)))
2621 #endif /* not ARM_WINCE */
2623 /* Return all other types in memory. */
2627 /* Indicate whether or not words of a double are in big-endian order. */
2630 arm_float_words_big_endian (void)
2632 if (TARGET_MAVERICK
)
2635 /* For FPA, float words are always big-endian. For VFP, floats words
2636 follow the memory system mode. */
2644 return (TARGET_BIG_END
? 1 : 0);
2649 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2650 for a call to a function whose data type is FNTYPE.
2651 For a library call, FNTYPE is NULL. */
2653 arm_init_cumulative_args (CUMULATIVE_ARGS
*pcum
, tree fntype
,
2654 rtx libname ATTRIBUTE_UNUSED
,
2655 tree fndecl ATTRIBUTE_UNUSED
)
2657 /* On the ARM, the offset starts at 0. */
2658 pcum
->nregs
= ((fntype
&& aggregate_value_p (TREE_TYPE (fntype
), fntype
)) ? 1 : 0);
2659 pcum
->iwmmxt_nregs
= 0;
2660 pcum
->can_split
= true;
2662 pcum
->call_cookie
= CALL_NORMAL
;
2664 if (TARGET_LONG_CALLS
)
2665 pcum
->call_cookie
= CALL_LONG
;
2667 /* Check for long call/short call attributes. The attributes
2668 override any command line option. */
2671 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype
)))
2672 pcum
->call_cookie
= CALL_SHORT
;
2673 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype
)))
2674 pcum
->call_cookie
= CALL_LONG
;
2677 /* Varargs vectors are treated the same as long long.
2678 named_count avoids having to change the way arm handles 'named' */
2679 pcum
->named_count
= 0;
2682 if (TARGET_REALLY_IWMMXT
&& fntype
)
2686 for (fn_arg
= TYPE_ARG_TYPES (fntype
);
2688 fn_arg
= TREE_CHAIN (fn_arg
))
2689 pcum
->named_count
+= 1;
2691 if (! pcum
->named_count
)
2692 pcum
->named_count
= INT_MAX
;
2697 /* Return true if mode/type need doubleword alignment. */
2699 arm_needs_doubleword_align (enum machine_mode mode
, tree type
)
2701 return (GET_MODE_ALIGNMENT (mode
) > PARM_BOUNDARY
2702 || (type
&& TYPE_ALIGN (type
) > PARM_BOUNDARY
));
2706 /* Determine where to put an argument to a function.
2707 Value is zero to push the argument on the stack,
2708 or a hard register in which to store the argument.
2710 MODE is the argument's machine mode.
2711 TYPE is the data type of the argument (as a tree).
2712 This is null for libcalls where that information may
2714 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2715 the preceding args and about the function being called.
2716 NAMED is nonzero if this argument is a named parameter
2717 (otherwise it is an extra parameter matching an ellipsis). */
2720 arm_function_arg (CUMULATIVE_ARGS
*pcum
, enum machine_mode mode
,
2721 tree type
, int named
)
2725 /* Varargs vectors are treated the same as long long.
2726 named_count avoids having to change the way arm handles 'named' */
2727 if (TARGET_IWMMXT_ABI
2728 && arm_vector_mode_supported_p (mode
)
2729 && pcum
->named_count
> pcum
->nargs
+ 1)
2731 if (pcum
->iwmmxt_nregs
<= 9)
2732 return gen_rtx_REG (mode
, pcum
->iwmmxt_nregs
+ FIRST_IWMMXT_REGNUM
);
2735 pcum
->can_split
= false;
2740 /* Put doubleword aligned quantities in even register pairs. */
2742 && ARM_DOUBLEWORD_ALIGN
2743 && arm_needs_doubleword_align (mode
, type
))
2746 if (mode
== VOIDmode
)
2747 /* Compute operand 2 of the call insn. */
2748 return GEN_INT (pcum
->call_cookie
);
2750 /* Only allow splitting an arg between regs and memory if all preceding
2751 args were allocated to regs. For args passed by reference we only count
2752 the reference pointer. */
2753 if (pcum
->can_split
)
2756 nregs
= ARM_NUM_REGS2 (mode
, type
);
2758 if (!named
|| pcum
->nregs
+ nregs
> NUM_ARG_REGS
)
2761 return gen_rtx_REG (mode
, pcum
->nregs
);
2765 arm_arg_partial_bytes (CUMULATIVE_ARGS
*pcum
, enum machine_mode mode
,
2766 tree type
, bool named ATTRIBUTE_UNUSED
)
2768 int nregs
= pcum
->nregs
;
2770 if (arm_vector_mode_supported_p (mode
))
2773 if (NUM_ARG_REGS
> nregs
2774 && (NUM_ARG_REGS
< nregs
+ ARM_NUM_REGS2 (mode
, type
))
2776 return (NUM_ARG_REGS
- nregs
) * UNITS_PER_WORD
;
2781 /* Variable sized types are passed by reference. This is a GCC
2782 extension to the ARM ABI. */
2785 arm_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
2786 enum machine_mode mode ATTRIBUTE_UNUSED
,
2787 tree type
, bool named ATTRIBUTE_UNUSED
)
2789 return type
&& TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
;
2792 /* Encode the current state of the #pragma [no_]long_calls. */
2795 OFF
, /* No #pramgma [no_]long_calls is in effect. */
2796 LONG
, /* #pragma long_calls is in effect. */
2797 SHORT
/* #pragma no_long_calls is in effect. */
2800 static arm_pragma_enum arm_pragma_long_calls
= OFF
;
2803 arm_pr_long_calls (struct cpp_reader
* pfile ATTRIBUTE_UNUSED
)
2805 arm_pragma_long_calls
= LONG
;
2809 arm_pr_no_long_calls (struct cpp_reader
* pfile ATTRIBUTE_UNUSED
)
2811 arm_pragma_long_calls
= SHORT
;
2815 arm_pr_long_calls_off (struct cpp_reader
* pfile ATTRIBUTE_UNUSED
)
2817 arm_pragma_long_calls
= OFF
;
2820 /* Table of machine attributes. */
2821 const struct attribute_spec arm_attribute_table
[] =
2823 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2824 /* Function calls made to this symbol must be done indirectly, because
2825 it may lie outside of the 26 bit addressing range of a normal function
2827 { "long_call", 0, 0, false, true, true, NULL
},
2828 /* Whereas these functions are always known to reside within the 26 bit
2829 addressing range. */
2830 { "short_call", 0, 0, false, true, true, NULL
},
2831 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2832 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute
},
2833 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute
},
2834 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute
},
2836 /* ARM/PE has three new attributes:
2838 dllexport - for exporting a function/variable that will live in a dll
2839 dllimport - for importing a function/variable from a dll
2841 Microsoft allows multiple declspecs in one __declspec, separating
2842 them with spaces. We do NOT support this. Instead, use __declspec
2845 { "dllimport", 0, 0, true, false, false, NULL
},
2846 { "dllexport", 0, 0, true, false, false, NULL
},
2847 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute
},
2848 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2849 { "dllimport", 0, 0, false, false, false, handle_dll_attribute
},
2850 { "dllexport", 0, 0, false, false, false, handle_dll_attribute
},
2851 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute
},
2853 { NULL
, 0, 0, false, false, false, NULL
}
2856 /* Handle an attribute requiring a FUNCTION_DECL;
2857 arguments as in struct attribute_spec.handler. */
2859 arm_handle_fndecl_attribute (tree
*node
, tree name
, tree args ATTRIBUTE_UNUSED
,
2860 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
2862 if (TREE_CODE (*node
) != FUNCTION_DECL
)
2864 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
2865 IDENTIFIER_POINTER (name
));
2866 *no_add_attrs
= true;
2872 /* Handle an "interrupt" or "isr" attribute;
2873 arguments as in struct attribute_spec.handler. */
2875 arm_handle_isr_attribute (tree
*node
, tree name
, tree args
, int flags
,
2880 if (TREE_CODE (*node
) != FUNCTION_DECL
)
2882 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
2883 IDENTIFIER_POINTER (name
));
2884 *no_add_attrs
= true;
2886 /* FIXME: the argument if any is checked for type attributes;
2887 should it be checked for decl ones? */
2891 if (TREE_CODE (*node
) == FUNCTION_TYPE
2892 || TREE_CODE (*node
) == METHOD_TYPE
)
2894 if (arm_isr_value (args
) == ARM_FT_UNKNOWN
)
2896 warning (OPT_Wattributes
, "%qs attribute ignored",
2897 IDENTIFIER_POINTER (name
));
2898 *no_add_attrs
= true;
2901 else if (TREE_CODE (*node
) == POINTER_TYPE
2902 && (TREE_CODE (TREE_TYPE (*node
)) == FUNCTION_TYPE
2903 || TREE_CODE (TREE_TYPE (*node
)) == METHOD_TYPE
)
2904 && arm_isr_value (args
) != ARM_FT_UNKNOWN
)
2906 *node
= build_variant_type_copy (*node
);
2907 TREE_TYPE (*node
) = build_type_attribute_variant
2909 tree_cons (name
, args
, TYPE_ATTRIBUTES (TREE_TYPE (*node
))));
2910 *no_add_attrs
= true;
2914 /* Possibly pass this attribute on from the type to a decl. */
2915 if (flags
& ((int) ATTR_FLAG_DECL_NEXT
2916 | (int) ATTR_FLAG_FUNCTION_NEXT
2917 | (int) ATTR_FLAG_ARRAY_NEXT
))
2919 *no_add_attrs
= true;
2920 return tree_cons (name
, args
, NULL_TREE
);
2924 warning (OPT_Wattributes
, "%qs attribute ignored",
2925 IDENTIFIER_POINTER (name
));
2933 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
2934 /* Handle the "notshared" attribute. This attribute is another way of
2935 requesting hidden visibility. ARM's compiler supports
2936 "__declspec(notshared)"; we support the same thing via an
2940 arm_handle_notshared_attribute (tree
*node
,
2941 tree name ATTRIBUTE_UNUSED
,
2942 tree args ATTRIBUTE_UNUSED
,
2943 int flags ATTRIBUTE_UNUSED
,
2946 tree decl
= TYPE_NAME (*node
);
2950 DECL_VISIBILITY (decl
) = VISIBILITY_HIDDEN
;
2951 DECL_VISIBILITY_SPECIFIED (decl
) = 1;
2952 *no_add_attrs
= false;
2958 /* Return 0 if the attributes for two types are incompatible, 1 if they
2959 are compatible, and 2 if they are nearly compatible (which causes a
2960 warning to be generated). */
2962 arm_comp_type_attributes (tree type1
, tree type2
)
2966 /* Check for mismatch of non-default calling convention. */
2967 if (TREE_CODE (type1
) != FUNCTION_TYPE
)
2970 /* Check for mismatched call attributes. */
2971 l1
= lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1
)) != NULL
;
2972 l2
= lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2
)) != NULL
;
2973 s1
= lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1
)) != NULL
;
2974 s2
= lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2
)) != NULL
;
2976 /* Only bother to check if an attribute is defined. */
2977 if (l1
| l2
| s1
| s2
)
2979 /* If one type has an attribute, the other must have the same attribute. */
2980 if ((l1
!= l2
) || (s1
!= s2
))
2983 /* Disallow mixed attributes. */
2984 if ((l1
& s2
) || (l2
& s1
))
2988 /* Check for mismatched ISR attribute. */
2989 l1
= lookup_attribute ("isr", TYPE_ATTRIBUTES (type1
)) != NULL
;
2991 l1
= lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1
)) != NULL
;
2992 l2
= lookup_attribute ("isr", TYPE_ATTRIBUTES (type2
)) != NULL
;
2994 l1
= lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2
)) != NULL
;
3001 /* Encode long_call or short_call attribute by prefixing
3002 symbol name in DECL with a special character FLAG. */
3004 arm_encode_call_attribute (tree decl
, int flag
)
3006 const char * str
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
3007 int len
= strlen (str
);
3010 /* Do not allow weak functions to be treated as short call. */
3011 if (DECL_WEAK (decl
) && flag
== SHORT_CALL_FLAG_CHAR
)
3014 newstr
= alloca (len
+ 2);
3016 strcpy (newstr
+ 1, str
);
3018 newstr
= (char *) ggc_alloc_string (newstr
, len
+ 1);
3019 XSTR (XEXP (DECL_RTL (decl
), 0), 0) = newstr
;
3022 /* Assigns default attributes to newly defined type. This is used to
3023 set short_call/long_call attributes for function types of
3024 functions defined inside corresponding #pragma scopes. */
3026 arm_set_default_type_attributes (tree type
)
3028 /* Add __attribute__ ((long_call)) to all functions, when
3029 inside #pragma long_calls or __attribute__ ((short_call)),
3030 when inside #pragma no_long_calls. */
3031 if (TREE_CODE (type
) == FUNCTION_TYPE
|| TREE_CODE (type
) == METHOD_TYPE
)
3033 tree type_attr_list
, attr_name
;
3034 type_attr_list
= TYPE_ATTRIBUTES (type
);
3036 if (arm_pragma_long_calls
== LONG
)
3037 attr_name
= get_identifier ("long_call");
3038 else if (arm_pragma_long_calls
== SHORT
)
3039 attr_name
= get_identifier ("short_call");
3043 type_attr_list
= tree_cons (attr_name
, NULL_TREE
, type_attr_list
);
3044 TYPE_ATTRIBUTES (type
) = type_attr_list
;
3048 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3049 defined within the current compilation unit. If this cannot be
3050 determined, then 0 is returned. */
3052 current_file_function_operand (rtx sym_ref
)
3054 /* This is a bit of a fib. A function will have a short call flag
3055 applied to its name if it has the short call attribute, or it has
3056 already been defined within the current compilation unit. */
3057 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref
, 0)))
3060 /* The current function is always defined within the current compilation
3061 unit. If it s a weak definition however, then this may not be the real
3062 definition of the function, and so we have to say no. */
3063 if (sym_ref
== XEXP (DECL_RTL (current_function_decl
), 0)
3064 && !DECL_WEAK (current_function_decl
))
3067 /* We cannot make the determination - default to returning 0. */
3071 /* Return nonzero if a 32 bit "long_call" should be generated for
3072 this call. We generate a long_call if the function:
3074 a. has an __attribute__((long call))
3075 or b. is within the scope of a #pragma long_calls
3076 or c. the -mlong-calls command line switch has been specified
3078 1. -ffunction-sections is in effect
3079 or 2. the current function has __attribute__ ((section))
3080 or 3. the target function has __attribute__ ((section))
3082 However we do not generate a long call if the function:
3084 d. has an __attribute__ ((short_call))
3085 or e. is inside the scope of a #pragma no_long_calls
3086 or f. is defined within the current compilation unit.
3088 This function will be called by C fragments contained in the machine
3089 description file. SYM_REF and CALL_COOKIE correspond to the matched
3090 rtl operands. CALL_SYMBOL is used to distinguish between
3091 two different callers of the function. It is set to 1 in the
3092 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3093 and "call_value" patterns. This is because of the difference in the
3094 SYM_REFs passed by these patterns. */
3096 arm_is_longcall_p (rtx sym_ref
, int call_cookie
, int call_symbol
)
3100 if (GET_CODE (sym_ref
) != MEM
)
3103 sym_ref
= XEXP (sym_ref
, 0);
3106 if (GET_CODE (sym_ref
) != SYMBOL_REF
)
3109 if (call_cookie
& CALL_SHORT
)
3112 if (TARGET_LONG_CALLS
)
3114 if (flag_function_sections
3115 || DECL_SECTION_NAME (current_function_decl
))
3116 /* c.3 is handled by the definition of the
3117 ARM_DECLARE_FUNCTION_SIZE macro. */
3121 if (current_file_function_operand (sym_ref
))
3124 return (call_cookie
& CALL_LONG
)
3125 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref
, 0))
3126 || TARGET_LONG_CALLS
;
3129 /* Return nonzero if it is ok to make a tail-call to DECL. */
3131 arm_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
3133 int call_type
= TARGET_LONG_CALLS
? CALL_LONG
: CALL_NORMAL
;
3135 if (cfun
->machine
->sibcall_blocked
)
3138 /* Never tailcall something for which we have no decl, or if we
3139 are in Thumb mode. */
3140 if (decl
== NULL
|| TARGET_THUMB
)
3143 /* Get the calling method. */
3144 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl
))))
3145 call_type
= CALL_SHORT
;
3146 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl
))))
3147 call_type
= CALL_LONG
;
3149 /* Cannot tail-call to long calls, since these are out of range of
3150 a branch instruction. However, if not compiling PIC, we know
3151 we can reach the symbol if it is in this compilation unit. */
3152 if (call_type
== CALL_LONG
&& (flag_pic
|| !TREE_ASM_WRITTEN (decl
)))
3155 /* If we are interworking and the function is not declared static
3156 then we can't tail-call it unless we know that it exists in this
3157 compilation unit (since it might be a Thumb routine). */
3158 if (TARGET_INTERWORK
&& TREE_PUBLIC (decl
) && !TREE_ASM_WRITTEN (decl
))
3161 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3162 if (IS_INTERRUPT (arm_current_func_type ()))
3165 /* Everything else is ok. */
3170 /* Addressing mode support functions. */
3172 /* Return nonzero if X is a legitimate immediate operand when compiling
3175 legitimate_pic_operand_p (rtx x
)
3179 && (GET_CODE (x
) == SYMBOL_REF
3180 || (GET_CODE (x
) == CONST
3181 && GET_CODE (XEXP (x
, 0)) == PLUS
3182 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
)))
3189 legitimize_pic_address (rtx orig
, enum machine_mode mode
, rtx reg
)
3191 if (GET_CODE (orig
) == SYMBOL_REF
3192 || GET_CODE (orig
) == LABEL_REF
)
3194 #ifndef AOF_ASSEMBLER
3195 rtx pic_ref
, address
;
3202 gcc_assert (!no_new_pseudos
);
3203 reg
= gen_reg_rtx (Pmode
);
3208 #ifdef AOF_ASSEMBLER
3209 /* The AOF assembler can generate relocations for these directly, and
3210 understands that the PIC register has to be added into the offset. */
3211 insn
= emit_insn (gen_pic_load_addr_based (reg
, orig
));
3214 address
= gen_reg_rtx (Pmode
);
3219 emit_insn (gen_pic_load_addr_arm (address
, orig
));
3221 emit_insn (gen_pic_load_addr_thumb (address
, orig
));
3223 if ((GET_CODE (orig
) == LABEL_REF
3224 || (GET_CODE (orig
) == SYMBOL_REF
&&
3225 SYMBOL_REF_LOCAL_P (orig
)))
3227 pic_ref
= gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
, address
);
3230 pic_ref
= gen_const_mem (Pmode
,
3231 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
3235 insn
= emit_move_insn (reg
, pic_ref
);
3237 current_function_uses_pic_offset_table
= 1;
3238 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3240 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_EQUAL
, orig
,
3244 else if (GET_CODE (orig
) == CONST
)
3248 if (GET_CODE (XEXP (orig
, 0)) == PLUS
3249 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
3252 if (GET_CODE (XEXP (orig
, 0)) == UNSPEC
3253 && XINT (XEXP (orig
, 0), 1) == UNSPEC_TLS
)
3258 gcc_assert (!no_new_pseudos
);
3259 reg
= gen_reg_rtx (Pmode
);
3262 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
3264 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
3265 offset
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
3266 base
== reg
? 0 : reg
);
3268 if (GET_CODE (offset
) == CONST_INT
)
3270 /* The base register doesn't really matter, we only want to
3271 test the index for the appropriate mode. */
3272 if (!arm_legitimate_index_p (mode
, offset
, SET
, 0))
3274 gcc_assert (!no_new_pseudos
);
3275 offset
= force_reg (Pmode
, offset
);
3278 if (GET_CODE (offset
) == CONST_INT
)
3279 return plus_constant (base
, INTVAL (offset
));
3282 if (GET_MODE_SIZE (mode
) > 4
3283 && (GET_MODE_CLASS (mode
) == MODE_INT
3284 || TARGET_SOFT_FLOAT
))
3286 emit_insn (gen_addsi3 (reg
, base
, offset
));
3290 return gen_rtx_PLUS (Pmode
, base
, offset
);
3297 /* Find a spare low register to use during the prolog of a function. */
3300 thumb_find_work_register (unsigned long pushed_regs_mask
)
3304 /* Check the argument registers first as these are call-used. The
3305 register allocation order means that sometimes r3 might be used
3306 but earlier argument registers might not, so check them all. */
3307 for (reg
= LAST_ARG_REGNUM
; reg
>= 0; reg
--)
3308 if (!regs_ever_live
[reg
])
3311 /* Before going on to check the call-saved registers we can try a couple
3312 more ways of deducing that r3 is available. The first is when we are
3313 pushing anonymous arguments onto the stack and we have less than 4
3314 registers worth of fixed arguments(*). In this case r3 will be part of
3315 the variable argument list and so we can be sure that it will be
3316 pushed right at the start of the function. Hence it will be available
3317 for the rest of the prologue.
3318 (*): ie current_function_pretend_args_size is greater than 0. */
3319 if (cfun
->machine
->uses_anonymous_args
3320 && current_function_pretend_args_size
> 0)
3321 return LAST_ARG_REGNUM
;
3323 /* The other case is when we have fixed arguments but less than 4 registers
3324 worth. In this case r3 might be used in the body of the function, but
3325 it is not being used to convey an argument into the function. In theory
3326 we could just check current_function_args_size to see how many bytes are
3327 being passed in argument registers, but it seems that it is unreliable.
3328 Sometimes it will have the value 0 when in fact arguments are being
3329 passed. (See testcase execute/20021111-1.c for an example). So we also
3330 check the args_info.nregs field as well. The problem with this field is
3331 that it makes no allowances for arguments that are passed to the
3332 function but which are not used. Hence we could miss an opportunity
3333 when a function has an unused argument in r3. But it is better to be
3334 safe than to be sorry. */
3335 if (! cfun
->machine
->uses_anonymous_args
3336 && current_function_args_size
>= 0
3337 && current_function_args_size
<= (LAST_ARG_REGNUM
* UNITS_PER_WORD
)
3338 && cfun
->args_info
.nregs
< 4)
3339 return LAST_ARG_REGNUM
;
3341 /* Otherwise look for a call-saved register that is going to be pushed. */
3342 for (reg
= LAST_LO_REGNUM
; reg
> LAST_ARG_REGNUM
; reg
--)
3343 if (pushed_regs_mask
& (1 << reg
))
3346 /* Something went wrong - thumb_compute_save_reg_mask()
3347 should have arranged for a suitable register to be pushed. */
3351 static GTY(()) int pic_labelno
;
3353 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3357 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED
)
3359 #ifndef AOF_ASSEMBLER
3360 rtx l1
, labelno
, pic_tmp
, pic_tmp2
, pic_rtx
;
3361 rtx global_offset_table
;
3363 if (current_function_uses_pic_offset_table
== 0 || TARGET_SINGLE_PIC_BASE
)
3366 gcc_assert (flag_pic
);
3368 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3369 in the code stream. */
3371 labelno
= GEN_INT (pic_labelno
++);
3372 l1
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, labelno
), UNSPEC_PIC_LABEL
);
3373 l1
= gen_rtx_CONST (VOIDmode
, l1
);
3375 global_offset_table
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
3376 /* On the ARM the PC register contains 'dot + 8' at the time of the
3377 addition, on the Thumb it is 'dot + 4'. */
3378 pic_tmp
= plus_constant (l1
, TARGET_ARM
? 8 : 4);
3380 pic_tmp2
= gen_rtx_CONST (VOIDmode
,
3381 gen_rtx_PLUS (Pmode
, global_offset_table
, pc_rtx
));
3383 pic_tmp2
= gen_rtx_CONST (VOIDmode
, global_offset_table
);
3385 pic_rtx
= gen_rtx_CONST (Pmode
, gen_rtx_MINUS (Pmode
, pic_tmp2
, pic_tmp
));
3389 emit_insn (gen_pic_load_addr_arm (pic_offset_table_rtx
, pic_rtx
));
3390 emit_insn (gen_pic_add_dot_plus_eight (pic_offset_table_rtx
,
3391 pic_offset_table_rtx
, labelno
));
3395 if (REGNO (pic_offset_table_rtx
) > LAST_LO_REGNUM
)
3397 /* We will have pushed the pic register, so we should always be
3398 able to find a work register. */
3399 pic_tmp
= gen_rtx_REG (SImode
,
3400 thumb_find_work_register (saved_regs
));
3401 emit_insn (gen_pic_load_addr_thumb (pic_tmp
, pic_rtx
));
3402 emit_insn (gen_movsi (pic_offset_table_rtx
, pic_tmp
));
3405 emit_insn (gen_pic_load_addr_thumb (pic_offset_table_rtx
, pic_rtx
));
3406 emit_insn (gen_pic_add_dot_plus_four (pic_offset_table_rtx
,
3407 pic_offset_table_rtx
, labelno
));
3410 /* Need to emit this whether or not we obey regdecls,
3411 since setjmp/longjmp can cause life info to screw up. */
3412 emit_insn (gen_rtx_USE (VOIDmode
, pic_offset_table_rtx
));
3413 #endif /* AOF_ASSEMBLER */
3417 /* Return nonzero if X is valid as an ARM state addressing register. */
3419 arm_address_register_rtx_p (rtx x
, int strict_p
)
3423 if (GET_CODE (x
) != REG
)
3429 return ARM_REGNO_OK_FOR_BASE_P (regno
);
3431 return (regno
<= LAST_ARM_REGNUM
3432 || regno
>= FIRST_PSEUDO_REGISTER
3433 || regno
== FRAME_POINTER_REGNUM
3434 || regno
== ARG_POINTER_REGNUM
);
3437 /* Return TRUE if this rtx is the difference of a symbol and a label,
3438 and will reduce to a PC-relative relocation in the object file.
3439 Expressions like this can be left alone when generating PIC, rather
3440 than forced through the GOT. */
3442 pcrel_constant_p (rtx x
)
3444 if (GET_CODE (x
) == MINUS
)
3445 return symbol_mentioned_p (XEXP (x
, 0)) && label_mentioned_p (XEXP (x
, 1));
3450 /* Return nonzero if X is a valid ARM state address operand. */
3452 arm_legitimate_address_p (enum machine_mode mode
, rtx x
, RTX_CODE outer
,
3456 enum rtx_code code
= GET_CODE (x
);
3458 if (arm_address_register_rtx_p (x
, strict_p
))
3461 use_ldrd
= (TARGET_LDRD
3463 || (mode
== DFmode
&& (TARGET_SOFT_FLOAT
|| TARGET_VFP
))));
3465 if (code
== POST_INC
|| code
== PRE_DEC
3466 || ((code
== PRE_INC
|| code
== POST_DEC
)
3467 && (use_ldrd
|| GET_MODE_SIZE (mode
) <= 4)))
3468 return arm_address_register_rtx_p (XEXP (x
, 0), strict_p
);
3470 else if ((code
== POST_MODIFY
|| code
== PRE_MODIFY
)
3471 && arm_address_register_rtx_p (XEXP (x
, 0), strict_p
)
3472 && GET_CODE (XEXP (x
, 1)) == PLUS
3473 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
3475 rtx addend
= XEXP (XEXP (x
, 1), 1);
3477 /* Don't allow ldrd post increment by register because it's hard
3478 to fixup invalid register choices. */
3480 && GET_CODE (x
) == POST_MODIFY
3481 && GET_CODE (addend
) == REG
)
3484 return ((use_ldrd
|| GET_MODE_SIZE (mode
) <= 4)
3485 && arm_legitimate_index_p (mode
, addend
, outer
, strict_p
));
3488 /* After reload constants split into minipools will have addresses
3489 from a LABEL_REF. */
3490 else if (reload_completed
3491 && (code
== LABEL_REF
3493 && GET_CODE (XEXP (x
, 0)) == PLUS
3494 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LABEL_REF
3495 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
3498 else if (mode
== TImode
)
3501 else if (code
== PLUS
)
3503 rtx xop0
= XEXP (x
, 0);
3504 rtx xop1
= XEXP (x
, 1);
3506 return ((arm_address_register_rtx_p (xop0
, strict_p
)
3507 && arm_legitimate_index_p (mode
, xop1
, outer
, strict_p
))
3508 || (arm_address_register_rtx_p (xop1
, strict_p
)
3509 && arm_legitimate_index_p (mode
, xop0
, outer
, strict_p
)));
3513 /* Reload currently can't handle MINUS, so disable this for now */
3514 else if (GET_CODE (x
) == MINUS
)
3516 rtx xop0
= XEXP (x
, 0);
3517 rtx xop1
= XEXP (x
, 1);
3519 return (arm_address_register_rtx_p (xop0
, strict_p
)
3520 && arm_legitimate_index_p (mode
, xop1
, outer
, strict_p
));
3524 else if (GET_MODE_CLASS (mode
) != MODE_FLOAT
3525 && code
== SYMBOL_REF
3526 && CONSTANT_POOL_ADDRESS_P (x
)
3528 && symbol_mentioned_p (get_pool_constant (x
))
3529 && ! pcrel_constant_p (get_pool_constant (x
))))
3535 /* Return nonzero if INDEX is valid for an address index operand in
3538 arm_legitimate_index_p (enum machine_mode mode
, rtx index
, RTX_CODE outer
,
3541 HOST_WIDE_INT range
;
3542 enum rtx_code code
= GET_CODE (index
);
3544 /* Standard coprocessor addressing modes. */
3545 if (TARGET_HARD_FLOAT
3546 && (TARGET_FPA
|| TARGET_MAVERICK
)
3547 && (GET_MODE_CLASS (mode
) == MODE_FLOAT
3548 || (TARGET_MAVERICK
&& mode
== DImode
)))
3549 return (code
== CONST_INT
&& INTVAL (index
) < 1024
3550 && INTVAL (index
) > -1024
3551 && (INTVAL (index
) & 3) == 0);
3553 if (TARGET_REALLY_IWMMXT
&& VALID_IWMMXT_REG_MODE (mode
))
3554 return (code
== CONST_INT
3555 && INTVAL (index
) < 1024
3556 && INTVAL (index
) > -1024
3557 && (INTVAL (index
) & 3) == 0);
3559 if (arm_address_register_rtx_p (index
, strict_p
)
3560 && (GET_MODE_SIZE (mode
) <= 4))
3563 if (mode
== DImode
|| mode
== DFmode
)
3565 if (code
== CONST_INT
)
3567 HOST_WIDE_INT val
= INTVAL (index
);
3570 return val
> -256 && val
< 256;
3572 return val
> -4096 && val
< 4092;
3575 return TARGET_LDRD
&& arm_address_register_rtx_p (index
, strict_p
);
3578 if (GET_MODE_SIZE (mode
) <= 4
3581 || (mode
== QImode
&& outer
== SIGN_EXTEND
))))
3585 rtx xiop0
= XEXP (index
, 0);
3586 rtx xiop1
= XEXP (index
, 1);
3588 return ((arm_address_register_rtx_p (xiop0
, strict_p
)
3589 && power_of_two_operand (xiop1
, SImode
))
3590 || (arm_address_register_rtx_p (xiop1
, strict_p
)
3591 && power_of_two_operand (xiop0
, SImode
)));
3593 else if (code
== LSHIFTRT
|| code
== ASHIFTRT
3594 || code
== ASHIFT
|| code
== ROTATERT
)
3596 rtx op
= XEXP (index
, 1);
3598 return (arm_address_register_rtx_p (XEXP (index
, 0), strict_p
)
3599 && GET_CODE (op
) == CONST_INT
3601 && INTVAL (op
) <= 31);
3605 /* For ARM v4 we may be doing a sign-extend operation during the
3609 if (mode
== HImode
|| (outer
== SIGN_EXTEND
&& mode
== QImode
))
3615 range
= (mode
== HImode
) ? 4095 : 4096;
3617 return (code
== CONST_INT
3618 && INTVAL (index
) < range
3619 && INTVAL (index
) > -range
);
3622 /* Return nonzero if X is valid as a Thumb state base register. */
3624 thumb_base_register_rtx_p (rtx x
, enum machine_mode mode
, int strict_p
)
3628 if (GET_CODE (x
) != REG
)
3634 return THUMB_REGNO_MODE_OK_FOR_BASE_P (regno
, mode
);
3636 return (regno
<= LAST_LO_REGNUM
3637 || regno
> LAST_VIRTUAL_REGISTER
3638 || regno
== FRAME_POINTER_REGNUM
3639 || (GET_MODE_SIZE (mode
) >= 4
3640 && (regno
== STACK_POINTER_REGNUM
3641 || regno
>= FIRST_PSEUDO_REGISTER
3642 || x
== hard_frame_pointer_rtx
3643 || x
== arg_pointer_rtx
)));
3646 /* Return nonzero if x is a legitimate index register. This is the case
3647 for any base register that can access a QImode object. */
3649 thumb_index_register_rtx_p (rtx x
, int strict_p
)
3651 return thumb_base_register_rtx_p (x
, QImode
, strict_p
);
3654 /* Return nonzero if x is a legitimate Thumb-state address.
3656 The AP may be eliminated to either the SP or the FP, so we use the
3657 least common denominator, e.g. SImode, and offsets from 0 to 64.
3659 ??? Verify whether the above is the right approach.
3661 ??? Also, the FP may be eliminated to the SP, so perhaps that
3662 needs special handling also.
3664 ??? Look at how the mips16 port solves this problem. It probably uses
3665 better ways to solve some of these problems.
3667 Although it is not incorrect, we don't accept QImode and HImode
3668 addresses based on the frame pointer or arg pointer until the
3669 reload pass starts. This is so that eliminating such addresses
3670 into stack based ones won't produce impossible code. */
3672 thumb_legitimate_address_p (enum machine_mode mode
, rtx x
, int strict_p
)
3674 /* ??? Not clear if this is right. Experiment. */
3675 if (GET_MODE_SIZE (mode
) < 4
3676 && !(reload_in_progress
|| reload_completed
)
3677 && (reg_mentioned_p (frame_pointer_rtx
, x
)
3678 || reg_mentioned_p (arg_pointer_rtx
, x
)
3679 || reg_mentioned_p (virtual_incoming_args_rtx
, x
)
3680 || reg_mentioned_p (virtual_outgoing_args_rtx
, x
)
3681 || reg_mentioned_p (virtual_stack_dynamic_rtx
, x
)
3682 || reg_mentioned_p (virtual_stack_vars_rtx
, x
)))
3685 /* Accept any base register. SP only in SImode or larger. */
3686 else if (thumb_base_register_rtx_p (x
, mode
, strict_p
))
3689 /* This is PC relative data before arm_reorg runs. */
3690 else if (GET_MODE_SIZE (mode
) >= 4 && CONSTANT_P (x
)
3691 && GET_CODE (x
) == SYMBOL_REF
3692 && CONSTANT_POOL_ADDRESS_P (x
) && ! flag_pic
)
3695 /* This is PC relative data after arm_reorg runs. */
3696 else if (GET_MODE_SIZE (mode
) >= 4 && reload_completed
3697 && (GET_CODE (x
) == LABEL_REF
3698 || (GET_CODE (x
) == CONST
3699 && GET_CODE (XEXP (x
, 0)) == PLUS
3700 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == LABEL_REF
3701 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
)))
3704 /* Post-inc indexing only supported for SImode and larger. */
3705 else if (GET_CODE (x
) == POST_INC
&& GET_MODE_SIZE (mode
) >= 4
3706 && thumb_index_register_rtx_p (XEXP (x
, 0), strict_p
))
3709 else if (GET_CODE (x
) == PLUS
)
3711 /* REG+REG address can be any two index registers. */
3712 /* We disallow FRAME+REG addressing since we know that FRAME
3713 will be replaced with STACK, and SP relative addressing only
3714 permits SP+OFFSET. */
3715 if (GET_MODE_SIZE (mode
) <= 4
3716 && XEXP (x
, 0) != frame_pointer_rtx
3717 && XEXP (x
, 1) != frame_pointer_rtx
3718 && thumb_index_register_rtx_p (XEXP (x
, 0), strict_p
)
3719 && thumb_index_register_rtx_p (XEXP (x
, 1), strict_p
))
3722 /* REG+const has 5-7 bit offset for non-SP registers. */
3723 else if ((thumb_index_register_rtx_p (XEXP (x
, 0), strict_p
)
3724 || XEXP (x
, 0) == arg_pointer_rtx
)
3725 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3726 && thumb_legitimate_offset_p (mode
, INTVAL (XEXP (x
, 1))))
3729 /* REG+const has 10 bit offset for SP, but only SImode and
3730 larger is supported. */
3731 /* ??? Should probably check for DI/DFmode overflow here
3732 just like GO_IF_LEGITIMATE_OFFSET does. */
3733 else if (GET_CODE (XEXP (x
, 0)) == REG
3734 && REGNO (XEXP (x
, 0)) == STACK_POINTER_REGNUM
3735 && GET_MODE_SIZE (mode
) >= 4
3736 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3737 && INTVAL (XEXP (x
, 1)) >= 0
3738 && INTVAL (XEXP (x
, 1)) + GET_MODE_SIZE (mode
) <= 1024
3739 && (INTVAL (XEXP (x
, 1)) & 3) == 0)
3742 else if (GET_CODE (XEXP (x
, 0)) == REG
3743 && REGNO (XEXP (x
, 0)) == FRAME_POINTER_REGNUM
3744 && GET_MODE_SIZE (mode
) >= 4
3745 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3746 && (INTVAL (XEXP (x
, 1)) & 3) == 0)
3750 else if (GET_MODE_CLASS (mode
) != MODE_FLOAT
3751 && GET_MODE_SIZE (mode
) == 4
3752 && GET_CODE (x
) == SYMBOL_REF
3753 && CONSTANT_POOL_ADDRESS_P (x
)
3755 && symbol_mentioned_p (get_pool_constant (x
))
3756 && ! pcrel_constant_p (get_pool_constant (x
))))
3762 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
3763 instruction of mode MODE. */
3765 thumb_legitimate_offset_p (enum machine_mode mode
, HOST_WIDE_INT val
)
3767 switch (GET_MODE_SIZE (mode
))
3770 return val
>= 0 && val
< 32;
3773 return val
>= 0 && val
< 64 && (val
& 1) == 0;
3777 && (val
+ GET_MODE_SIZE (mode
)) <= 128
3782 /* Build the SYMBOL_REF for __tls_get_addr. */
3784 static GTY(()) rtx tls_get_addr_libfunc
;
3787 get_tls_get_addr (void)
3789 if (!tls_get_addr_libfunc
)
3790 tls_get_addr_libfunc
= init_one_libfunc ("__tls_get_addr");
3791 return tls_get_addr_libfunc
;
3795 arm_load_tp (rtx target
)
3798 target
= gen_reg_rtx (SImode
);
3802 /* Can return in any reg. */
3803 emit_insn (gen_load_tp_hard (target
));
3807 /* Always returned in r0. Immediately copy the result into a pseudo,
3808 otherwise other uses of r0 (e.g. setting up function arguments) may
3809 clobber the value. */
3813 emit_insn (gen_load_tp_soft ());
3815 tmp
= gen_rtx_REG (SImode
, 0);
3816 emit_move_insn (target
, tmp
);
3822 load_tls_operand (rtx x
, rtx reg
)
3826 if (reg
== NULL_RTX
)
3827 reg
= gen_reg_rtx (SImode
);
3829 tmp
= gen_rtx_CONST (SImode
, x
);
3831 emit_move_insn (reg
, tmp
);
3837 arm_call_tls_get_addr (rtx x
, rtx reg
, rtx
*valuep
, int reloc
)
3839 rtx insns
, label
, labelno
, sum
;
3843 labelno
= GEN_INT (pic_labelno
++);
3844 label
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, labelno
), UNSPEC_PIC_LABEL
);
3845 label
= gen_rtx_CONST (VOIDmode
, label
);
3847 sum
= gen_rtx_UNSPEC (Pmode
,
3848 gen_rtvec (4, x
, GEN_INT (reloc
), label
,
3849 GEN_INT (TARGET_ARM
? 8 : 4)),
3851 reg
= load_tls_operand (sum
, reg
);
3854 emit_insn (gen_pic_add_dot_plus_eight (reg
, reg
, labelno
));
3856 emit_insn (gen_pic_add_dot_plus_four (reg
, reg
, labelno
));
3858 *valuep
= emit_library_call_value (get_tls_get_addr (), NULL_RTX
, LCT_PURE
, /* LCT_CONST? */
3859 Pmode
, 1, reg
, Pmode
);
3861 insns
= get_insns ();
3868 legitimize_tls_address (rtx x
, rtx reg
)
3870 rtx dest
, tp
, label
, labelno
, sum
, insns
, ret
, eqv
, addend
;
3871 unsigned int model
= SYMBOL_REF_TLS_MODEL (x
);
3875 case TLS_MODEL_GLOBAL_DYNAMIC
:
3876 insns
= arm_call_tls_get_addr (x
, reg
, &ret
, TLS_GD32
);
3877 dest
= gen_reg_rtx (Pmode
);
3878 emit_libcall_block (insns
, dest
, ret
, x
);
3881 case TLS_MODEL_LOCAL_DYNAMIC
:
3882 insns
= arm_call_tls_get_addr (x
, reg
, &ret
, TLS_LDM32
);
3884 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
3885 share the LDM result with other LD model accesses. */
3886 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const1_rtx
),
3888 dest
= gen_reg_rtx (Pmode
);
3889 emit_libcall_block (insns
, dest
, ret
, eqv
);
3891 /* Load the addend. */
3892 addend
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (TLS_LDO32
)),
3894 addend
= force_reg (SImode
, gen_rtx_CONST (SImode
, addend
));
3895 return gen_rtx_PLUS (Pmode
, dest
, addend
);
3897 case TLS_MODEL_INITIAL_EXEC
:
3898 labelno
= GEN_INT (pic_labelno
++);
3899 label
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, labelno
), UNSPEC_PIC_LABEL
);
3900 label
= gen_rtx_CONST (VOIDmode
, label
);
3901 sum
= gen_rtx_UNSPEC (Pmode
,
3902 gen_rtvec (4, x
, GEN_INT (TLS_IE32
), label
,
3903 GEN_INT (TARGET_ARM
? 8 : 4)),
3905 reg
= load_tls_operand (sum
, reg
);
3908 emit_insn (gen_tls_load_dot_plus_eight (reg
, reg
, labelno
));
3911 emit_insn (gen_pic_add_dot_plus_four (reg
, reg
, labelno
));
3912 emit_move_insn (reg
, gen_const_mem (SImode
, reg
));
3915 tp
= arm_load_tp (NULL_RTX
);
3917 return gen_rtx_PLUS (Pmode
, tp
, reg
);
3919 case TLS_MODEL_LOCAL_EXEC
:
3920 tp
= arm_load_tp (NULL_RTX
);
3922 reg
= gen_rtx_UNSPEC (Pmode
,
3923 gen_rtvec (2, x
, GEN_INT (TLS_LE32
)),
3925 reg
= force_reg (SImode
, gen_rtx_CONST (SImode
, reg
));
3927 return gen_rtx_PLUS (Pmode
, tp
, reg
);
3934 /* Try machine-dependent ways of modifying an illegitimate address
3935 to be legitimate. If we find one, return the new, valid address. */
3937 arm_legitimize_address (rtx x
, rtx orig_x
, enum machine_mode mode
)
3939 if (arm_tls_symbol_p (x
))
3940 return legitimize_tls_address (x
, NULL_RTX
);
3942 if (GET_CODE (x
) == PLUS
)
3944 rtx xop0
= XEXP (x
, 0);
3945 rtx xop1
= XEXP (x
, 1);
3947 if (CONSTANT_P (xop0
) && !symbol_mentioned_p (xop0
))
3948 xop0
= force_reg (SImode
, xop0
);
3950 if (CONSTANT_P (xop1
) && !symbol_mentioned_p (xop1
))
3951 xop1
= force_reg (SImode
, xop1
);
3953 if (ARM_BASE_REGISTER_RTX_P (xop0
)
3954 && GET_CODE (xop1
) == CONST_INT
)
3956 HOST_WIDE_INT n
, low_n
;
3960 /* VFP addressing modes actually allow greater offsets, but for
3961 now we just stick with the lowest common denominator. */
3963 || ((TARGET_SOFT_FLOAT
|| TARGET_VFP
) && mode
== DFmode
))
3975 low_n
= ((mode
) == TImode
? 0
3976 : n
>= 0 ? (n
& 0xfff) : -((-n
) & 0xfff));
3980 base_reg
= gen_reg_rtx (SImode
);
3981 val
= force_operand (gen_rtx_PLUS (SImode
, xop0
,
3982 GEN_INT (n
)), NULL_RTX
);
3983 emit_move_insn (base_reg
, val
);
3984 x
= (low_n
== 0 ? base_reg
3985 : gen_rtx_PLUS (SImode
, base_reg
, GEN_INT (low_n
)));
3987 else if (xop0
!= XEXP (x
, 0) || xop1
!= XEXP (x
, 1))
3988 x
= gen_rtx_PLUS (SImode
, xop0
, xop1
);
3991 /* XXX We don't allow MINUS any more -- see comment in
3992 arm_legitimate_address_p (). */
3993 else if (GET_CODE (x
) == MINUS
)
3995 rtx xop0
= XEXP (x
, 0);
3996 rtx xop1
= XEXP (x
, 1);
3998 if (CONSTANT_P (xop0
))
3999 xop0
= force_reg (SImode
, xop0
);
4001 if (CONSTANT_P (xop1
) && ! symbol_mentioned_p (xop1
))
4002 xop1
= force_reg (SImode
, xop1
);
4004 if (xop0
!= XEXP (x
, 0) || xop1
!= XEXP (x
, 1))
4005 x
= gen_rtx_MINUS (SImode
, xop0
, xop1
);
4008 /* Make sure to take full advantage of the pre-indexed addressing mode
4009 with absolute addresses which often allows for the base register to
4010 be factorized for multiple adjacent memory references, and it might
4011 even allows for the mini pool to be avoided entirely. */
4012 else if (GET_CODE (x
) == CONST_INT
&& optimize
> 0)
4015 HOST_WIDE_INT mask
, base
, index
;
4018 /* ldr and ldrb can use a 12 bit index, ldrsb and the rest can only
4019 use a 8 bit index. So let's use a 12 bit index for SImode only and
4020 hope that arm_gen_constant will enable ldrb to use more bits. */
4021 bits
= (mode
== SImode
) ? 12 : 8;
4022 mask
= (1 << bits
) - 1;
4023 base
= INTVAL (x
) & ~mask
;
4024 index
= INTVAL (x
) & mask
;
4025 if (bit_count (base
& 0xffffffff) > (32 - bits
)/2)
4027 /* It'll most probably be more efficient to generate the base
4028 with more bits set and use a negative index instead. */
4032 base_reg
= force_reg (SImode
, GEN_INT (base
));
4033 x
= gen_rtx_PLUS (SImode
, base_reg
, GEN_INT (index
));
4038 /* We need to find and carefully transform any SYMBOL and LABEL
4039 references; so go back to the original address expression. */
4040 rtx new_x
= legitimize_pic_address (orig_x
, mode
, NULL_RTX
);
4042 if (new_x
!= orig_x
)
4050 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4051 to be legitimate. If we find one, return the new, valid address. */
4053 thumb_legitimize_address (rtx x
, rtx orig_x
, enum machine_mode mode
)
4055 if (arm_tls_symbol_p (x
))
4056 return legitimize_tls_address (x
, NULL_RTX
);
4058 if (GET_CODE (x
) == PLUS
4059 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4060 && (INTVAL (XEXP (x
, 1)) >= 32 * GET_MODE_SIZE (mode
)
4061 || INTVAL (XEXP (x
, 1)) < 0))
4063 rtx xop0
= XEXP (x
, 0);
4064 rtx xop1
= XEXP (x
, 1);
4065 HOST_WIDE_INT offset
= INTVAL (xop1
);
4067 /* Try and fold the offset into a biasing of the base register and
4068 then offsetting that. Don't do this when optimizing for space
4069 since it can cause too many CSEs. */
4070 if (optimize_size
&& offset
>= 0
4071 && offset
< 256 + 31 * GET_MODE_SIZE (mode
))
4073 HOST_WIDE_INT delta
;
4076 delta
= offset
- (256 - GET_MODE_SIZE (mode
));
4077 else if (offset
< 32 * GET_MODE_SIZE (mode
) + 8)
4078 delta
= 31 * GET_MODE_SIZE (mode
);
4080 delta
= offset
& (~31 * GET_MODE_SIZE (mode
));
4082 xop0
= force_operand (plus_constant (xop0
, offset
- delta
),
4084 x
= plus_constant (xop0
, delta
);
4086 else if (offset
< 0 && offset
> -256)
4087 /* Small negative offsets are best done with a subtract before the
4088 dereference, forcing these into a register normally takes two
4090 x
= force_operand (x
, NULL_RTX
);
4093 /* For the remaining cases, force the constant into a register. */
4094 xop1
= force_reg (SImode
, xop1
);
4095 x
= gen_rtx_PLUS (SImode
, xop0
, xop1
);
4098 else if (GET_CODE (x
) == PLUS
4099 && s_register_operand (XEXP (x
, 1), SImode
)
4100 && !s_register_operand (XEXP (x
, 0), SImode
))
4102 rtx xop0
= force_operand (XEXP (x
, 0), NULL_RTX
);
4104 x
= gen_rtx_PLUS (SImode
, xop0
, XEXP (x
, 1));
4109 /* We need to find and carefully transform any SYMBOL and LABEL
4110 references; so go back to the original address expression. */
4111 rtx new_x
= legitimize_pic_address (orig_x
, mode
, NULL_RTX
);
4113 if (new_x
!= orig_x
)
4121 thumb_legitimize_reload_address (rtx
*x_p
,
4122 enum machine_mode mode
,
4123 int opnum
, int type
,
4124 int ind_levels ATTRIBUTE_UNUSED
)
4128 if (GET_CODE (x
) == PLUS
4129 && GET_MODE_SIZE (mode
) < 4
4130 && REG_P (XEXP (x
, 0))
4131 && XEXP (x
, 0) == stack_pointer_rtx
4132 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4133 && !thumb_legitimate_offset_p (mode
, INTVAL (XEXP (x
, 1))))
4138 push_reload (orig_x
, NULL_RTX
, x_p
, NULL
, MODE_BASE_REG_CLASS (mode
),
4139 Pmode
, VOIDmode
, 0, 0, opnum
, type
);
4143 /* If both registers are hi-regs, then it's better to reload the
4144 entire expression rather than each register individually. That
4145 only requires one reload register rather than two. */
4146 if (GET_CODE (x
) == PLUS
4147 && REG_P (XEXP (x
, 0))
4148 && REG_P (XEXP (x
, 1))
4149 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x
, 0), mode
)
4150 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x
, 1), mode
))
4155 push_reload (orig_x
, NULL_RTX
, x_p
, NULL
, MODE_BASE_REG_CLASS (mode
),
4156 Pmode
, VOIDmode
, 0, 0, opnum
, type
);
4163 /* Test for various thread-local symbols. */
4165 /* Return TRUE if X is a thread-local symbol. */
4168 arm_tls_symbol_p (rtx x
)
4170 if (! TARGET_HAVE_TLS
)
4173 if (GET_CODE (x
) != SYMBOL_REF
)
4176 return SYMBOL_REF_TLS_MODEL (x
) != 0;
4179 /* Helper for arm_tls_referenced_p. */
4182 arm_tls_operand_p_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
4184 if (GET_CODE (*x
) == SYMBOL_REF
)
4185 return SYMBOL_REF_TLS_MODEL (*x
) != 0;
4187 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4188 TLS offsets, not real symbol references. */
4189 if (GET_CODE (*x
) == UNSPEC
4190 && XINT (*x
, 1) == UNSPEC_TLS
)
4196 /* Return TRUE if X contains any TLS symbol references. */
4199 arm_tls_referenced_p (rtx x
)
4201 if (! TARGET_HAVE_TLS
)
4204 return for_each_rtx (&x
, arm_tls_operand_p_1
, NULL
);
4207 #define REG_OR_SUBREG_REG(X) \
4208 (GET_CODE (X) == REG \
4209 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4211 #define REG_OR_SUBREG_RTX(X) \
4212 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4214 #ifndef COSTS_N_INSNS
4215 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4218 thumb_rtx_costs (rtx x
, enum rtx_code code
, enum rtx_code outer
)
4220 enum machine_mode mode
= GET_MODE (x
);
4233 return COSTS_N_INSNS (1);
4236 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4239 unsigned HOST_WIDE_INT i
= INTVAL (XEXP (x
, 1));
4246 return COSTS_N_INSNS (2) + cycles
;
4248 return COSTS_N_INSNS (1) + 16;
4251 return (COSTS_N_INSNS (1)
4252 + 4 * ((GET_CODE (SET_SRC (x
)) == MEM
)
4253 + GET_CODE (SET_DEST (x
)) == MEM
));
4258 if ((unsigned HOST_WIDE_INT
) INTVAL (x
) < 256)
4260 if (thumb_shiftable_const (INTVAL (x
)))
4261 return COSTS_N_INSNS (2);
4262 return COSTS_N_INSNS (3);
4264 else if ((outer
== PLUS
|| outer
== COMPARE
)
4265 && INTVAL (x
) < 256 && INTVAL (x
) > -256)
4267 else if (outer
== AND
4268 && INTVAL (x
) < 256 && INTVAL (x
) >= -256)
4269 return COSTS_N_INSNS (1);
4270 else if (outer
== ASHIFT
|| outer
== ASHIFTRT
4271 || outer
== LSHIFTRT
)
4273 return COSTS_N_INSNS (2);
4279 return COSTS_N_INSNS (3);
4297 /* XXX another guess. */
4298 /* Memory costs quite a lot for the first word, but subsequent words
4299 load at the equivalent of a single insn each. */
4300 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
4301 + ((GET_CODE (x
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (x
))
4306 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
4311 /* XXX still guessing. */
4312 switch (GET_MODE (XEXP (x
, 0)))
4315 return (1 + (mode
== DImode
? 4 : 0)
4316 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4319 return (4 + (mode
== DImode
? 4 : 0)
4320 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4323 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4335 /* Worker routine for arm_rtx_costs. */
4337 arm_rtx_costs_1 (rtx x
, enum rtx_code code
, enum rtx_code outer
)
4339 enum machine_mode mode
= GET_MODE (x
);
4340 enum rtx_code subcode
;
4346 /* Memory costs quite a lot for the first word, but subsequent words
4347 load at the equivalent of a single insn each. */
4348 return (10 + 4 * ((GET_MODE_SIZE (mode
) - 1) / UNITS_PER_WORD
)
4349 + (GET_CODE (x
) == SYMBOL_REF
4350 && CONSTANT_POOL_ADDRESS_P (x
) ? 4 : 0));
4356 return optimize_size
? COSTS_N_INSNS (2) : 100;
4359 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
4366 case ASHIFT
: case LSHIFTRT
: case ASHIFTRT
:
4368 return (8 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : 8)
4369 + ((GET_CODE (XEXP (x
, 0)) == REG
4370 || (GET_CODE (XEXP (x
, 0)) == SUBREG
4371 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
4373 return (1 + ((GET_CODE (XEXP (x
, 0)) == REG
4374 || (GET_CODE (XEXP (x
, 0)) == SUBREG
4375 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == REG
))
4377 + ((GET_CODE (XEXP (x
, 1)) == REG
4378 || (GET_CODE (XEXP (x
, 1)) == SUBREG
4379 && GET_CODE (SUBREG_REG (XEXP (x
, 1))) == REG
)
4380 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
))
4385 return (4 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 8)
4386 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
4387 || (GET_CODE (XEXP (x
, 0)) == CONST_INT
4388 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))))
4391 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4392 return (2 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
4393 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
4394 && arm_const_double_rtx (XEXP (x
, 1))))
4396 + ((REG_OR_SUBREG_REG (XEXP (x
, 0))
4397 || (GET_CODE (XEXP (x
, 0)) == CONST_DOUBLE
4398 && arm_const_double_rtx (XEXP (x
, 0))))
4401 if (((GET_CODE (XEXP (x
, 0)) == CONST_INT
4402 && const_ok_for_arm (INTVAL (XEXP (x
, 0)))
4403 && REG_OR_SUBREG_REG (XEXP (x
, 1))))
4404 || (((subcode
= GET_CODE (XEXP (x
, 1))) == ASHIFT
4405 || subcode
== ASHIFTRT
|| subcode
== LSHIFTRT
4406 || subcode
== ROTATE
|| subcode
== ROTATERT
4408 && GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
4409 && ((INTVAL (XEXP (XEXP (x
, 1), 1)) &
4410 (INTVAL (XEXP (XEXP (x
, 1), 1)) - 1)) == 0)))
4411 && REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 0))
4412 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 1), 1))
4413 || GET_CODE (XEXP (XEXP (x
, 1), 1)) == CONST_INT
)
4414 && REG_OR_SUBREG_REG (XEXP (x
, 0))))
4419 if (GET_CODE (XEXP (x
, 0)) == MULT
)
4421 extra_cost
= rtx_cost (XEXP (x
, 0), code
);
4422 if (!REG_OR_SUBREG_REG (XEXP (x
, 1)))
4423 extra_cost
+= 4 * ARM_NUM_REGS (mode
);
4427 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4428 return (2 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
4429 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
4430 || (GET_CODE (XEXP (x
, 1)) == CONST_DOUBLE
4431 && arm_const_double_rtx (XEXP (x
, 1))))
4435 case AND
: case XOR
: case IOR
:
4438 /* Normally the frame registers will be spilt into reg+const during
4439 reload, so it is a bad idea to combine them with other instructions,
4440 since then they might not be moved outside of loops. As a compromise
4441 we allow integration with ops that have a constant as their second
4443 if ((REG_OR_SUBREG_REG (XEXP (x
, 0))
4444 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))
4445 && GET_CODE (XEXP (x
, 1)) != CONST_INT
)
4446 || (REG_OR_SUBREG_REG (XEXP (x
, 0))
4447 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x
, 0)))))
4451 return (4 + extra_cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 8)
4452 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
4453 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
4454 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
)))
4457 if (REG_OR_SUBREG_REG (XEXP (x
, 0)))
4458 return (1 + (GET_CODE (XEXP (x
, 1)) == CONST_INT
? 0 : extra_cost
)
4459 + ((REG_OR_SUBREG_REG (XEXP (x
, 1))
4460 || (GET_CODE (XEXP (x
, 1)) == CONST_INT
4461 && const_ok_for_op (INTVAL (XEXP (x
, 1)), code
)))
4464 else if (REG_OR_SUBREG_REG (XEXP (x
, 1)))
4465 return (1 + extra_cost
4466 + ((((subcode
= GET_CODE (XEXP (x
, 0))) == ASHIFT
4467 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
4468 || subcode
== ROTATE
|| subcode
== ROTATERT
4470 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
4471 && ((INTVAL (XEXP (XEXP (x
, 0), 1)) &
4472 (INTVAL (XEXP (XEXP (x
, 0), 1)) - 1)) == 0)))
4473 && (REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 0)))
4474 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x
, 0), 1)))
4475 || GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
))
4481 /* This should have been handled by the CPU specific routines. */
4485 if (arm_arch3m
&& mode
== SImode
4486 && GET_CODE (XEXP (x
, 0)) == LSHIFTRT
4487 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == MULT
4488 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0))
4489 == GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 1)))
4490 && (GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == ZERO_EXTEND
4491 || GET_CODE (XEXP (XEXP (XEXP (x
, 0), 0), 0)) == SIGN_EXTEND
))
4496 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4497 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 6);
4501 return 4 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
4503 return 1 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4);
4506 if (GET_CODE (XEXP (x
, 1)) == PC
|| GET_CODE (XEXP (x
, 2)) == PC
)
4514 return 4 + (mode
== DImode
? 4 : 0);
4517 if (GET_MODE (XEXP (x
, 0)) == QImode
)
4518 return (4 + (mode
== DImode
? 4 : 0)
4519 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4522 switch (GET_MODE (XEXP (x
, 0)))
4525 return (1 + (mode
== DImode
? 4 : 0)
4526 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4529 return (4 + (mode
== DImode
? 4 : 0)
4530 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4533 return (1 + (GET_CODE (XEXP (x
, 0)) == MEM
? 10 : 0));
4548 if (const_ok_for_arm (INTVAL (x
)))
4549 return outer
== SET
? 2 : -1;
4550 else if (outer
== AND
4551 && const_ok_for_arm (~INTVAL (x
)))
4553 else if ((outer
== COMPARE
4554 || outer
== PLUS
|| outer
== MINUS
)
4555 && const_ok_for_arm (-INTVAL (x
)))
4566 if (arm_const_double_rtx (x
))
4567 return outer
== SET
? 2 : -1;
4568 else if ((outer
== COMPARE
|| outer
== PLUS
)
4569 && neg_const_double_rtx_ok_for_fpa (x
))
4578 /* RTX costs when optimizing for size. */
4580 arm_size_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4582 enum machine_mode mode
= GET_MODE (x
);
4586 /* XXX TBD. For now, use the standard costs. */
4587 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4594 /* A memory access costs 1 insn if the mode is small, or the address is
4595 a single register, otherwise it costs one insn per word. */
4596 if (REG_P (XEXP (x
, 0)))
4597 *total
= COSTS_N_INSNS (1);
4599 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4606 /* Needs a libcall, so it costs about this. */
4607 *total
= COSTS_N_INSNS (2);
4611 if (mode
== SImode
&& GET_CODE (XEXP (x
, 1)) == REG
)
4613 *total
= COSTS_N_INSNS (2) + rtx_cost (XEXP (x
, 0), code
);
4621 if (mode
== DImode
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4623 *total
= COSTS_N_INSNS (3) + rtx_cost (XEXP (x
, 0), code
);
4626 else if (mode
== SImode
)
4628 *total
= COSTS_N_INSNS (1) + rtx_cost (XEXP (x
, 0), code
);
4629 /* Slightly disparage register shifts, but not by much. */
4630 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
4631 *total
+= 1 + rtx_cost (XEXP (x
, 1), code
);
4635 /* Needs a libcall. */
4636 *total
= COSTS_N_INSNS (2);
4640 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4642 *total
= COSTS_N_INSNS (1);
4648 enum rtx_code subcode0
= GET_CODE (XEXP (x
, 0));
4649 enum rtx_code subcode1
= GET_CODE (XEXP (x
, 1));
4651 if (subcode0
== ROTATE
|| subcode0
== ROTATERT
|| subcode0
== ASHIFT
4652 || subcode0
== LSHIFTRT
|| subcode0
== ASHIFTRT
4653 || subcode1
== ROTATE
|| subcode1
== ROTATERT
4654 || subcode1
== ASHIFT
|| subcode1
== LSHIFTRT
4655 || subcode1
== ASHIFTRT
)
4657 /* It's just the cost of the two operands. */
4662 *total
= COSTS_N_INSNS (1);
4666 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4670 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4672 *total
= COSTS_N_INSNS (1);
4677 case AND
: case XOR
: case IOR
:
4680 enum rtx_code subcode
= GET_CODE (XEXP (x
, 0));
4682 if (subcode
== ROTATE
|| subcode
== ROTATERT
|| subcode
== ASHIFT
4683 || subcode
== LSHIFTRT
|| subcode
== ASHIFTRT
4684 || (code
== AND
&& subcode
== NOT
))
4686 /* It's just the cost of the two operands. */
4692 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4696 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4700 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4701 *total
= COSTS_N_INSNS (1);
4704 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4713 if (cc_register (XEXP (x
, 0), VOIDmode
))
4716 *total
= COSTS_N_INSNS (1);
4720 if (TARGET_HARD_FLOAT
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
4721 *total
= COSTS_N_INSNS (1);
4723 *total
= COSTS_N_INSNS (1 + ARM_NUM_REGS (mode
));
4728 if (GET_MODE_SIZE (GET_MODE (XEXP (x
, 0))) < 4)
4730 if (!(arm_arch4
&& MEM_P (XEXP (x
, 0))))
4731 *total
+= COSTS_N_INSNS (arm_arch6
? 1 : 2);
4734 *total
+= COSTS_N_INSNS (1);
4739 if (!(arm_arch4
&& MEM_P (XEXP (x
, 0))))
4741 switch (GET_MODE (XEXP (x
, 0)))
4744 *total
+= COSTS_N_INSNS (1);
4748 *total
+= COSTS_N_INSNS (arm_arch6
? 1 : 2);
4754 *total
+= COSTS_N_INSNS (2);
4759 *total
+= COSTS_N_INSNS (1);
4764 if (const_ok_for_arm (INTVAL (x
)))
4765 *total
= COSTS_N_INSNS (outer_code
== SET
? 1 : 0);
4766 else if (const_ok_for_arm (~INTVAL (x
)))
4767 *total
= COSTS_N_INSNS (outer_code
== AND
? 0 : 1);
4768 else if (const_ok_for_arm (-INTVAL (x
)))
4770 if (outer_code
== COMPARE
|| outer_code
== PLUS
4771 || outer_code
== MINUS
)
4774 *total
= COSTS_N_INSNS (1);
4777 *total
= COSTS_N_INSNS (2);
4783 *total
= COSTS_N_INSNS (2);
4787 *total
= COSTS_N_INSNS (4);
4791 if (mode
!= VOIDmode
)
4792 *total
= COSTS_N_INSNS (ARM_NUM_REGS (mode
));
4794 *total
= COSTS_N_INSNS (4); /* How knows? */
4799 /* RTX costs for cores with a slow MUL implementation. */
4802 arm_slowmul_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4804 enum machine_mode mode
= GET_MODE (x
);
4808 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4815 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
4822 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4824 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
4825 & (unsigned HOST_WIDE_INT
) 0xffffffff);
4826 int cost
, const_ok
= const_ok_for_arm (i
);
4827 int j
, booth_unit_size
;
4829 /* Tune as appropriate. */
4830 cost
= const_ok
? 4 : 8;
4831 booth_unit_size
= 2;
4832 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
4834 i
>>= booth_unit_size
;
4842 *total
= 30 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
4843 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4);
4847 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
4853 /* RTX cost for cores with a fast multiply unit (M variants). */
4856 arm_fastmul_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4858 enum machine_mode mode
= GET_MODE (x
);
4862 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4869 /* There is no point basing this on the tuning, since it is always the
4870 fast variant if it exists at all. */
4872 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
4873 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
4874 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
4881 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
4888 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4890 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
4891 & (unsigned HOST_WIDE_INT
) 0xffffffff);
4892 int cost
, const_ok
= const_ok_for_arm (i
);
4893 int j
, booth_unit_size
;
4895 /* Tune as appropriate. */
4896 cost
= const_ok
? 4 : 8;
4897 booth_unit_size
= 8;
4898 for (j
= 0; i
&& j
< 32; j
+= booth_unit_size
)
4900 i
>>= booth_unit_size
;
4908 *total
= 8 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
4909 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4);
4913 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
4919 /* RTX cost for XScale CPUs. */
4922 arm_xscale_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
4924 enum machine_mode mode
= GET_MODE (x
);
4928 *total
= thumb_rtx_costs (x
, code
, outer_code
);
4935 /* There is no point basing this on the tuning, since it is always the
4936 fast variant if it exists at all. */
4938 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
4939 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
4940 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
4947 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
4954 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4956 unsigned HOST_WIDE_INT i
= (INTVAL (XEXP (x
, 1))
4957 & (unsigned HOST_WIDE_INT
) 0xffffffff);
4958 int cost
, const_ok
= const_ok_for_arm (i
);
4959 unsigned HOST_WIDE_INT masked_const
;
4961 /* The cost will be related to two insns.
4962 First a load of the constant (MOV or LDR), then a multiply. */
4965 cost
+= 1; /* LDR is probably more expensive because
4966 of longer result latency. */
4967 masked_const
= i
& 0xffff8000;
4968 if (masked_const
!= 0 && masked_const
!= 0xffff8000)
4970 masked_const
= i
& 0xf8000000;
4971 if (masked_const
== 0 || masked_const
== 0xf8000000)
4980 *total
= 8 + (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : 4)
4981 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : 4);
4985 /* A COMPARE of a MULT is slow on XScale; the muls instruction
4986 will stall until the multiplication is complete. */
4987 if (GET_CODE (XEXP (x
, 0)) == MULT
)
4988 *total
= 4 + rtx_cost (XEXP (x
, 0), code
);
4990 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
4994 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
5000 /* RTX costs for 9e (and later) cores. */
5003 arm_9e_rtx_costs (rtx x
, int code
, int outer_code
, int *total
)
5005 enum machine_mode mode
= GET_MODE (x
);
5014 *total
= COSTS_N_INSNS (3);
5018 *total
= thumb_rtx_costs (x
, code
, outer_code
);
5026 /* There is no point basing this on the tuning, since it is always the
5027 fast variant if it exists at all. */
5029 && (GET_CODE (XEXP (x
, 0)) == GET_CODE (XEXP (x
, 1)))
5030 && (GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
5031 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
))
5038 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
5055 *total
= cost
+ (REG_OR_SUBREG_REG (XEXP (x
, 0)) ? 0 : nonreg_cost
)
5056 + (REG_OR_SUBREG_REG (XEXP (x
, 1)) ? 0 : nonreg_cost
);
5060 *total
= arm_rtx_costs_1 (x
, code
, outer_code
);
5064 /* All address computations that can be done are free, but rtx cost returns
5065 the same for practically all of them. So we weight the different types
5066 of address here in the order (most pref first):
5067 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5069 arm_arm_address_cost (rtx x
)
5071 enum rtx_code c
= GET_CODE (x
);
5073 if (c
== PRE_INC
|| c
== PRE_DEC
|| c
== POST_INC
|| c
== POST_DEC
)
5075 if (c
== MEM
|| c
== LABEL_REF
|| c
== SYMBOL_REF
)
5078 if (c
== PLUS
|| c
== MINUS
)
5080 if (GET_CODE (XEXP (x
, 0)) == CONST_INT
)
5083 if (ARITHMETIC_P (XEXP (x
, 0)) || ARITHMETIC_P (XEXP (x
, 1)))
5093 arm_thumb_address_cost (rtx x
)
5095 enum rtx_code c
= GET_CODE (x
);
5100 && GET_CODE (XEXP (x
, 0)) == REG
5101 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
5108 arm_address_cost (rtx x
)
5110 return TARGET_ARM
? arm_arm_address_cost (x
) : arm_thumb_address_cost (x
);
5114 arm_adjust_cost (rtx insn
, rtx link
, rtx dep
, int cost
)
5118 /* Some true dependencies can have a higher cost depending
5119 on precisely how certain input operands are used. */
5121 && REG_NOTE_KIND (link
) == 0
5122 && recog_memoized (insn
) >= 0
5123 && recog_memoized (dep
) >= 0)
5125 int shift_opnum
= get_attr_shift (insn
);
5126 enum attr_type attr_type
= get_attr_type (dep
);
5128 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5129 operand for INSN. If we have a shifted input operand and the
5130 instruction we depend on is another ALU instruction, then we may
5131 have to account for an additional stall. */
5132 if (shift_opnum
!= 0
5133 && (attr_type
== TYPE_ALU_SHIFT
|| attr_type
== TYPE_ALU_SHIFT_REG
))
5135 rtx shifted_operand
;
5138 /* Get the shifted operand. */
5139 extract_insn (insn
);
5140 shifted_operand
= recog_data
.operand
[shift_opnum
];
5142 /* Iterate over all the operands in DEP. If we write an operand
5143 that overlaps with SHIFTED_OPERAND, then we have increase the
5144 cost of this dependency. */
5146 preprocess_constraints ();
5147 for (opno
= 0; opno
< recog_data
.n_operands
; opno
++)
5149 /* We can ignore strict inputs. */
5150 if (recog_data
.operand_type
[opno
] == OP_IN
)
5153 if (reg_overlap_mentioned_p (recog_data
.operand
[opno
],
5160 /* XXX This is not strictly true for the FPA. */
5161 if (REG_NOTE_KIND (link
) == REG_DEP_ANTI
5162 || REG_NOTE_KIND (link
) == REG_DEP_OUTPUT
)
5165 /* Call insns don't incur a stall, even if they follow a load. */
5166 if (REG_NOTE_KIND (link
) == 0
5167 && GET_CODE (insn
) == CALL_INSN
)
5170 if ((i_pat
= single_set (insn
)) != NULL
5171 && GET_CODE (SET_SRC (i_pat
)) == MEM
5172 && (d_pat
= single_set (dep
)) != NULL
5173 && GET_CODE (SET_DEST (d_pat
)) == MEM
)
5175 rtx src_mem
= XEXP (SET_SRC (i_pat
), 0);
5176 /* This is a load after a store, there is no conflict if the load reads
5177 from a cached area. Assume that loads from the stack, and from the
5178 constant pool are cached, and that others will miss. This is a
5181 if ((GET_CODE (src_mem
) == SYMBOL_REF
&& CONSTANT_POOL_ADDRESS_P (src_mem
))
5182 || reg_mentioned_p (stack_pointer_rtx
, src_mem
)
5183 || reg_mentioned_p (frame_pointer_rtx
, src_mem
)
5184 || reg_mentioned_p (hard_frame_pointer_rtx
, src_mem
))
5191 static int fp_consts_inited
= 0;
5193 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5194 static const char * const strings_fp
[8] =
5197 "4", "5", "0.5", "10"
5200 static REAL_VALUE_TYPE values_fp
[8];
5203 init_fp_table (void)
5209 fp_consts_inited
= 1;
5211 fp_consts_inited
= 8;
5213 for (i
= 0; i
< fp_consts_inited
; i
++)
5215 r
= REAL_VALUE_ATOF (strings_fp
[i
], DFmode
);
5220 /* Return TRUE if rtx X is a valid immediate FP constant. */
5222 arm_const_double_rtx (rtx x
)
5227 if (!fp_consts_inited
)
5230 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
5231 if (REAL_VALUE_MINUS_ZERO (r
))
5234 for (i
= 0; i
< fp_consts_inited
; i
++)
5235 if (REAL_VALUES_EQUAL (r
, values_fp
[i
]))
5241 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5243 neg_const_double_rtx_ok_for_fpa (rtx x
)
5248 if (!fp_consts_inited
)
5251 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
5252 r
= REAL_VALUE_NEGATE (r
);
5253 if (REAL_VALUE_MINUS_ZERO (r
))
5256 for (i
= 0; i
< 8; i
++)
5257 if (REAL_VALUES_EQUAL (r
, values_fp
[i
]))
5263 /* Predicates for `match_operand' and `match_operator'. */
5265 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5267 cirrus_memory_offset (rtx op
)
5269 /* Reject eliminable registers. */
5270 if (! (reload_in_progress
|| reload_completed
)
5271 && ( reg_mentioned_p (frame_pointer_rtx
, op
)
5272 || reg_mentioned_p (arg_pointer_rtx
, op
)
5273 || reg_mentioned_p (virtual_incoming_args_rtx
, op
)
5274 || reg_mentioned_p (virtual_outgoing_args_rtx
, op
)
5275 || reg_mentioned_p (virtual_stack_dynamic_rtx
, op
)
5276 || reg_mentioned_p (virtual_stack_vars_rtx
, op
)))
5279 if (GET_CODE (op
) == MEM
)
5285 /* Match: (mem (reg)). */
5286 if (GET_CODE (ind
) == REG
)
5292 if (GET_CODE (ind
) == PLUS
5293 && GET_CODE (XEXP (ind
, 0)) == REG
5294 && REG_MODE_OK_FOR_BASE_P (XEXP (ind
, 0), VOIDmode
)
5295 && GET_CODE (XEXP (ind
, 1)) == CONST_INT
)
5302 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5303 WB if true if writeback address modes are allowed. */
5306 arm_coproc_mem_operand (rtx op
, bool wb
)
5310 /* Reject eliminable registers. */
5311 if (! (reload_in_progress
|| reload_completed
)
5312 && ( reg_mentioned_p (frame_pointer_rtx
, op
)
5313 || reg_mentioned_p (arg_pointer_rtx
, op
)
5314 || reg_mentioned_p (virtual_incoming_args_rtx
, op
)
5315 || reg_mentioned_p (virtual_outgoing_args_rtx
, op
)
5316 || reg_mentioned_p (virtual_stack_dynamic_rtx
, op
)
5317 || reg_mentioned_p (virtual_stack_vars_rtx
, op
)))
5320 /* Constants are converted into offsets from labels. */
5321 if (GET_CODE (op
) != MEM
)
5326 if (reload_completed
5327 && (GET_CODE (ind
) == LABEL_REF
5328 || (GET_CODE (ind
) == CONST
5329 && GET_CODE (XEXP (ind
, 0)) == PLUS
5330 && GET_CODE (XEXP (XEXP (ind
, 0), 0)) == LABEL_REF
5331 && GET_CODE (XEXP (XEXP (ind
, 0), 1)) == CONST_INT
)))
5334 /* Match: (mem (reg)). */
5335 if (GET_CODE (ind
) == REG
)
5336 return arm_address_register_rtx_p (ind
, 0);
5338 /* Autoincremment addressing modes. */
5340 && (GET_CODE (ind
) == PRE_INC
5341 || GET_CODE (ind
) == POST_INC
5342 || GET_CODE (ind
) == PRE_DEC
5343 || GET_CODE (ind
) == POST_DEC
))
5344 return arm_address_register_rtx_p (XEXP (ind
, 0), 0);
5347 && (GET_CODE (ind
) == POST_MODIFY
|| GET_CODE (ind
) == PRE_MODIFY
)
5348 && arm_address_register_rtx_p (XEXP (ind
, 0), 0)
5349 && GET_CODE (XEXP (ind
, 1)) == PLUS
5350 && rtx_equal_p (XEXP (XEXP (ind
, 1), 0), XEXP (ind
, 0)))
5351 ind
= XEXP (ind
, 1);
5356 if (GET_CODE (ind
) == PLUS
5357 && GET_CODE (XEXP (ind
, 0)) == REG
5358 && REG_MODE_OK_FOR_BASE_P (XEXP (ind
, 0), VOIDmode
)
5359 && GET_CODE (XEXP (ind
, 1)) == CONST_INT
5360 && INTVAL (XEXP (ind
, 1)) > -1024
5361 && INTVAL (XEXP (ind
, 1)) < 1024
5362 && (INTVAL (XEXP (ind
, 1)) & 3) == 0)
5368 /* Return true if X is a register that will be eliminated later on. */
5370 arm_eliminable_register (rtx x
)
5372 return REG_P (x
) && (REGNO (x
) == FRAME_POINTER_REGNUM
5373 || REGNO (x
) == ARG_POINTER_REGNUM
5374 || (REGNO (x
) >= FIRST_VIRTUAL_REGISTER
5375 && REGNO (x
) <= LAST_VIRTUAL_REGISTER
));
5378 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5379 VFP registers. Otherwise return NO_REGS. */
5382 vfp_secondary_reload_class (enum machine_mode mode
, rtx x
)
5384 if (arm_coproc_mem_operand (x
, FALSE
) || s_register_operand (x
, mode
))
5387 return GENERAL_REGS
;
5390 /* Values which must be returned in the most-significant end of the return
5394 arm_return_in_msb (tree valtype
)
5396 return (TARGET_AAPCS_BASED
5398 && (AGGREGATE_TYPE_P (valtype
)
5399 || TREE_CODE (valtype
) == COMPLEX_TYPE
));
5402 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5403 Use by the Cirrus Maverick code which has to workaround
5404 a hardware bug triggered by such instructions. */
5406 arm_memory_load_p (rtx insn
)
5408 rtx body
, lhs
, rhs
;;
5410 if (insn
== NULL_RTX
|| GET_CODE (insn
) != INSN
)
5413 body
= PATTERN (insn
);
5415 if (GET_CODE (body
) != SET
)
5418 lhs
= XEXP (body
, 0);
5419 rhs
= XEXP (body
, 1);
5421 lhs
= REG_OR_SUBREG_RTX (lhs
);
5423 /* If the destination is not a general purpose
5424 register we do not have to worry. */
5425 if (GET_CODE (lhs
) != REG
5426 || REGNO_REG_CLASS (REGNO (lhs
)) != GENERAL_REGS
)
5429 /* As well as loads from memory we also have to react
5430 to loads of invalid constants which will be turned
5431 into loads from the minipool. */
5432 return (GET_CODE (rhs
) == MEM
5433 || GET_CODE (rhs
) == SYMBOL_REF
5434 || note_invalid_constants (insn
, -1, false));
5437 /* Return TRUE if INSN is a Cirrus instruction. */
5439 arm_cirrus_insn_p (rtx insn
)
5441 enum attr_cirrus attr
;
5443 /* get_attr cannot accept USE or CLOBBER. */
5445 || GET_CODE (insn
) != INSN
5446 || GET_CODE (PATTERN (insn
)) == USE
5447 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
5450 attr
= get_attr_cirrus (insn
);
5452 return attr
!= CIRRUS_NOT
;
5455 /* Cirrus reorg for invalid instruction combinations. */
5457 cirrus_reorg (rtx first
)
5459 enum attr_cirrus attr
;
5460 rtx body
= PATTERN (first
);
5464 /* Any branch must be followed by 2 non Cirrus instructions. */
5465 if (GET_CODE (first
) == JUMP_INSN
&& GET_CODE (body
) != RETURN
)
5468 t
= next_nonnote_insn (first
);
5470 if (arm_cirrus_insn_p (t
))
5473 if (arm_cirrus_insn_p (next_nonnote_insn (t
)))
5477 emit_insn_after (gen_nop (), first
);
5482 /* (float (blah)) is in parallel with a clobber. */
5483 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
5484 body
= XVECEXP (body
, 0, 0);
5486 if (GET_CODE (body
) == SET
)
5488 rtx lhs
= XEXP (body
, 0), rhs
= XEXP (body
, 1);
5490 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5491 be followed by a non Cirrus insn. */
5492 if (get_attr_cirrus (first
) == CIRRUS_DOUBLE
)
5494 if (arm_cirrus_insn_p (next_nonnote_insn (first
)))
5495 emit_insn_after (gen_nop (), first
);
5499 else if (arm_memory_load_p (first
))
5501 unsigned int arm_regno
;
5503 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5504 ldr/cfmv64hr combination where the Rd field is the same
5505 in both instructions must be split with a non Cirrus
5512 /* Get Arm register number for ldr insn. */
5513 if (GET_CODE (lhs
) == REG
)
5514 arm_regno
= REGNO (lhs
);
5517 gcc_assert (GET_CODE (rhs
) == REG
);
5518 arm_regno
= REGNO (rhs
);
5522 first
= next_nonnote_insn (first
);
5524 if (! arm_cirrus_insn_p (first
))
5527 body
= PATTERN (first
);
5529 /* (float (blah)) is in parallel with a clobber. */
5530 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0))
5531 body
= XVECEXP (body
, 0, 0);
5533 if (GET_CODE (body
) == FLOAT
)
5534 body
= XEXP (body
, 0);
5536 if (get_attr_cirrus (first
) == CIRRUS_MOVE
5537 && GET_CODE (XEXP (body
, 1)) == REG
5538 && arm_regno
== REGNO (XEXP (body
, 1)))
5539 emit_insn_after (gen_nop (), first
);
5545 /* get_attr cannot accept USE or CLOBBER. */
5547 || GET_CODE (first
) != INSN
5548 || GET_CODE (PATTERN (first
)) == USE
5549 || GET_CODE (PATTERN (first
)) == CLOBBER
)
5552 attr
= get_attr_cirrus (first
);
5554 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5555 must be followed by a non-coprocessor instruction. */
5556 if (attr
== CIRRUS_COMPARE
)
5560 t
= next_nonnote_insn (first
);
5562 if (arm_cirrus_insn_p (t
))
5565 if (arm_cirrus_insn_p (next_nonnote_insn (t
)))
5569 emit_insn_after (gen_nop (), first
);
5575 /* Return TRUE if X references a SYMBOL_REF. */
5577 symbol_mentioned_p (rtx x
)
5582 if (GET_CODE (x
) == SYMBOL_REF
)
5585 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5586 are constant offsets, not symbols. */
5587 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLS
)
5590 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
5592 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
5598 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5599 if (symbol_mentioned_p (XVECEXP (x
, i
, j
)))
5602 else if (fmt
[i
] == 'e' && symbol_mentioned_p (XEXP (x
, i
)))
5609 /* Return TRUE if X references a LABEL_REF. */
5611 label_mentioned_p (rtx x
)
5616 if (GET_CODE (x
) == LABEL_REF
)
5619 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
5620 instruction, but they are constant offsets, not symbols. */
5621 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLS
)
5624 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
5625 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
5631 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
5632 if (label_mentioned_p (XVECEXP (x
, i
, j
)))
5635 else if (fmt
[i
] == 'e' && label_mentioned_p (XEXP (x
, i
)))
5643 tls_mentioned_p (rtx x
)
5645 switch (GET_CODE (x
))
5648 return tls_mentioned_p (XEXP (x
, 0));
5651 if (XINT (x
, 1) == UNSPEC_TLS
)
5659 /* Must not copy a SET whose source operand is PC-relative. */
5662 arm_cannot_copy_insn_p (rtx insn
)
5664 rtx pat
= PATTERN (insn
);
5666 if (GET_CODE (pat
) == PARALLEL
5667 && GET_CODE (XVECEXP (pat
, 0, 0)) == SET
)
5669 rtx rhs
= SET_SRC (XVECEXP (pat
, 0, 0));
5671 if (GET_CODE (rhs
) == UNSPEC
5672 && XINT (rhs
, 1) == UNSPEC_PIC_BASE
)
5675 if (GET_CODE (rhs
) == MEM
5676 && GET_CODE (XEXP (rhs
, 0)) == UNSPEC
5677 && XINT (XEXP (rhs
, 0), 1) == UNSPEC_PIC_BASE
)
5687 enum rtx_code code
= GET_CODE (x
);
5704 /* Return 1 if memory locations are adjacent. */
5706 adjacent_mem_locations (rtx a
, rtx b
)
5708 /* We don't guarantee to preserve the order of these memory refs. */
5709 if (volatile_refs_p (a
) || volatile_refs_p (b
))
5712 if ((GET_CODE (XEXP (a
, 0)) == REG
5713 || (GET_CODE (XEXP (a
, 0)) == PLUS
5714 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
5715 && (GET_CODE (XEXP (b
, 0)) == REG
5716 || (GET_CODE (XEXP (b
, 0)) == PLUS
5717 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
5719 HOST_WIDE_INT val0
= 0, val1
= 0;
5723 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
5725 reg0
= XEXP (XEXP (a
, 0), 0);
5726 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
5731 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
5733 reg1
= XEXP (XEXP (b
, 0), 0);
5734 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
5739 /* Don't accept any offset that will require multiple
5740 instructions to handle, since this would cause the
5741 arith_adjacentmem pattern to output an overlong sequence. */
5742 if (!const_ok_for_op (PLUS
, val0
) || !const_ok_for_op (PLUS
, val1
))
5745 /* Don't allow an eliminable register: register elimination can make
5746 the offset too large. */
5747 if (arm_eliminable_register (reg0
))
5750 val_diff
= val1
- val0
;
5754 /* If the target has load delay slots, then there's no benefit
5755 to using an ldm instruction unless the offset is zero and
5756 we are optimizing for size. */
5757 return (optimize_size
&& (REGNO (reg0
) == REGNO (reg1
))
5758 && (val0
== 0 || val1
== 0 || val0
== 4 || val1
== 4)
5759 && (val_diff
== 4 || val_diff
== -4));
5762 return ((REGNO (reg0
) == REGNO (reg1
))
5763 && (val_diff
== 4 || val_diff
== -4));
5770 load_multiple_sequence (rtx
*operands
, int nops
, int *regs
, int *base
,
5771 HOST_WIDE_INT
*load_offset
)
5773 int unsorted_regs
[4];
5774 HOST_WIDE_INT unsorted_offsets
[4];
5779 /* Can only handle 2, 3, or 4 insns at present,
5780 though could be easily extended if required. */
5781 gcc_assert (nops
>= 2 && nops
<= 4);
5783 /* Loop over the operands and check that the memory references are
5784 suitable (i.e. immediate offsets from the same base register). At
5785 the same time, extract the target register, and the memory
5787 for (i
= 0; i
< nops
; i
++)
5792 /* Convert a subreg of a mem into the mem itself. */
5793 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
5794 operands
[nops
+ i
] = alter_subreg (operands
+ (nops
+ i
));
5796 gcc_assert (GET_CODE (operands
[nops
+ i
]) == MEM
);
5798 /* Don't reorder volatile memory references; it doesn't seem worth
5799 looking for the case where the order is ok anyway. */
5800 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
5803 offset
= const0_rtx
;
5805 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
5806 || (GET_CODE (reg
) == SUBREG
5807 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
5808 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
5809 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
5811 || (GET_CODE (reg
) == SUBREG
5812 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
5813 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
5818 base_reg
= REGNO (reg
);
5819 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
5820 ? REGNO (operands
[i
])
5821 : REGNO (SUBREG_REG (operands
[i
])));
5826 if (base_reg
!= (int) REGNO (reg
))
5827 /* Not addressed from the same base register. */
5830 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
5831 ? REGNO (operands
[i
])
5832 : REGNO (SUBREG_REG (operands
[i
])));
5833 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
5837 /* If it isn't an integer register, or if it overwrites the
5838 base register but isn't the last insn in the list, then
5839 we can't do this. */
5840 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14
5841 || (i
!= nops
- 1 && unsorted_regs
[i
] == base_reg
))
5844 unsorted_offsets
[i
] = INTVAL (offset
);
5847 /* Not a suitable memory address. */
5851 /* All the useful information has now been extracted from the
5852 operands into unsorted_regs and unsorted_offsets; additionally,
5853 order[0] has been set to the lowest numbered register in the
5854 list. Sort the registers into order, and check that the memory
5855 offsets are ascending and adjacent. */
5857 for (i
= 1; i
< nops
; i
++)
5861 order
[i
] = order
[i
- 1];
5862 for (j
= 0; j
< nops
; j
++)
5863 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
5864 && (order
[i
] == order
[i
- 1]
5865 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
5868 /* Have we found a suitable register? if not, one must be used more
5870 if (order
[i
] == order
[i
- 1])
5873 /* Is the memory address adjacent and ascending? */
5874 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
5882 for (i
= 0; i
< nops
; i
++)
5883 regs
[i
] = unsorted_regs
[order
[i
]];
5885 *load_offset
= unsorted_offsets
[order
[0]];
5888 if (unsorted_offsets
[order
[0]] == 0)
5889 return 1; /* ldmia */
5891 if (unsorted_offsets
[order
[0]] == 4)
5892 return 2; /* ldmib */
5894 if (unsorted_offsets
[order
[nops
- 1]] == 0)
5895 return 3; /* ldmda */
5897 if (unsorted_offsets
[order
[nops
- 1]] == -4)
5898 return 4; /* ldmdb */
5900 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
5901 if the offset isn't small enough. The reason 2 ldrs are faster
5902 is because these ARMs are able to do more than one cache access
5903 in a single cycle. The ARM9 and StrongARM have Harvard caches,
5904 whilst the ARM8 has a double bandwidth cache. This means that
5905 these cores can do both an instruction fetch and a data fetch in
5906 a single cycle, so the trick of calculating the address into a
5907 scratch register (one of the result regs) and then doing a load
5908 multiple actually becomes slower (and no smaller in code size).
5909 That is the transformation
5911 ldr rd1, [rbase + offset]
5912 ldr rd2, [rbase + offset + 4]
5916 add rd1, rbase, offset
5917 ldmia rd1, {rd1, rd2}
5919 produces worse code -- '3 cycles + any stalls on rd2' instead of
5920 '2 cycles + any stalls on rd2'. On ARMs with only one cache
5921 access per cycle, the first sequence could never complete in less
5922 than 6 cycles, whereas the ldm sequence would only take 5 and
5923 would make better use of sequential accesses if not hitting the
5926 We cheat here and test 'arm_ld_sched' which we currently know to
5927 only be true for the ARM8, ARM9 and StrongARM. If this ever
5928 changes, then the test below needs to be reworked. */
5929 if (nops
== 2 && arm_ld_sched
)
5932 /* Can't do it without setting up the offset, only do this if it takes
5933 no more than one insn. */
5934 return (const_ok_for_arm (unsorted_offsets
[order
[0]])
5935 || const_ok_for_arm (-unsorted_offsets
[order
[0]])) ? 5 : 0;
5939 emit_ldm_seq (rtx
*operands
, int nops
)
5943 HOST_WIDE_INT offset
;
5947 switch (load_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
5950 strcpy (buf
, "ldm%?ia\t");
5954 strcpy (buf
, "ldm%?ib\t");
5958 strcpy (buf
, "ldm%?da\t");
5962 strcpy (buf
, "ldm%?db\t");
5967 sprintf (buf
, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
5968 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
5971 sprintf (buf
, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX
,
5972 reg_names
[regs
[0]], REGISTER_PREFIX
, reg_names
[base_reg
],
5974 output_asm_insn (buf
, operands
);
5976 strcpy (buf
, "ldm%?ia\t");
5983 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
5984 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
5986 for (i
= 1; i
< nops
; i
++)
5987 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
5988 reg_names
[regs
[i
]]);
5990 strcat (buf
, "}\t%@ phole ldm");
5992 output_asm_insn (buf
, operands
);
5997 store_multiple_sequence (rtx
*operands
, int nops
, int *regs
, int *base
,
5998 HOST_WIDE_INT
* load_offset
)
6000 int unsorted_regs
[4];
6001 HOST_WIDE_INT unsorted_offsets
[4];
6006 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6007 extended if required. */
6008 gcc_assert (nops
>= 2 && nops
<= 4);
6010 /* Loop over the operands and check that the memory references are
6011 suitable (i.e. immediate offsets from the same base register). At
6012 the same time, extract the target register, and the memory
6014 for (i
= 0; i
< nops
; i
++)
6019 /* Convert a subreg of a mem into the mem itself. */
6020 if (GET_CODE (operands
[nops
+ i
]) == SUBREG
)
6021 operands
[nops
+ i
] = alter_subreg (operands
+ (nops
+ i
));
6023 gcc_assert (GET_CODE (operands
[nops
+ i
]) == MEM
);
6025 /* Don't reorder volatile memory references; it doesn't seem worth
6026 looking for the case where the order is ok anyway. */
6027 if (MEM_VOLATILE_P (operands
[nops
+ i
]))
6030 offset
= const0_rtx
;
6032 if ((GET_CODE (reg
= XEXP (operands
[nops
+ i
], 0)) == REG
6033 || (GET_CODE (reg
) == SUBREG
6034 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
6035 || (GET_CODE (XEXP (operands
[nops
+ i
], 0)) == PLUS
6036 && ((GET_CODE (reg
= XEXP (XEXP (operands
[nops
+ i
], 0), 0))
6038 || (GET_CODE (reg
) == SUBREG
6039 && GET_CODE (reg
= SUBREG_REG (reg
)) == REG
))
6040 && (GET_CODE (offset
= XEXP (XEXP (operands
[nops
+ i
], 0), 1))
6045 base_reg
= REGNO (reg
);
6046 unsorted_regs
[0] = (GET_CODE (operands
[i
]) == REG
6047 ? REGNO (operands
[i
])
6048 : REGNO (SUBREG_REG (operands
[i
])));
6053 if (base_reg
!= (int) REGNO (reg
))
6054 /* Not addressed from the same base register. */
6057 unsorted_regs
[i
] = (GET_CODE (operands
[i
]) == REG
6058 ? REGNO (operands
[i
])
6059 : REGNO (SUBREG_REG (operands
[i
])));
6060 if (unsorted_regs
[i
] < unsorted_regs
[order
[0]])
6064 /* If it isn't an integer register, then we can't do this. */
6065 if (unsorted_regs
[i
] < 0 || unsorted_regs
[i
] > 14)
6068 unsorted_offsets
[i
] = INTVAL (offset
);
6071 /* Not a suitable memory address. */
6075 /* All the useful information has now been extracted from the
6076 operands into unsorted_regs and unsorted_offsets; additionally,
6077 order[0] has been set to the lowest numbered register in the
6078 list. Sort the registers into order, and check that the memory
6079 offsets are ascending and adjacent. */
6081 for (i
= 1; i
< nops
; i
++)
6085 order
[i
] = order
[i
- 1];
6086 for (j
= 0; j
< nops
; j
++)
6087 if (unsorted_regs
[j
] > unsorted_regs
[order
[i
- 1]]
6088 && (order
[i
] == order
[i
- 1]
6089 || unsorted_regs
[j
] < unsorted_regs
[order
[i
]]))
6092 /* Have we found a suitable register? if not, one must be used more
6094 if (order
[i
] == order
[i
- 1])
6097 /* Is the memory address adjacent and ascending? */
6098 if (unsorted_offsets
[order
[i
]] != unsorted_offsets
[order
[i
- 1]] + 4)
6106 for (i
= 0; i
< nops
; i
++)
6107 regs
[i
] = unsorted_regs
[order
[i
]];
6109 *load_offset
= unsorted_offsets
[order
[0]];
6112 if (unsorted_offsets
[order
[0]] == 0)
6113 return 1; /* stmia */
6115 if (unsorted_offsets
[order
[0]] == 4)
6116 return 2; /* stmib */
6118 if (unsorted_offsets
[order
[nops
- 1]] == 0)
6119 return 3; /* stmda */
6121 if (unsorted_offsets
[order
[nops
- 1]] == -4)
6122 return 4; /* stmdb */
6128 emit_stm_seq (rtx
*operands
, int nops
)
6132 HOST_WIDE_INT offset
;
6136 switch (store_multiple_sequence (operands
, nops
, regs
, &base_reg
, &offset
))
6139 strcpy (buf
, "stm%?ia\t");
6143 strcpy (buf
, "stm%?ib\t");
6147 strcpy (buf
, "stm%?da\t");
6151 strcpy (buf
, "stm%?db\t");
6158 sprintf (buf
+ strlen (buf
), "%s%s, {%s%s", REGISTER_PREFIX
,
6159 reg_names
[base_reg
], REGISTER_PREFIX
, reg_names
[regs
[0]]);
6161 for (i
= 1; i
< nops
; i
++)
6162 sprintf (buf
+ strlen (buf
), ", %s%s", REGISTER_PREFIX
,
6163 reg_names
[regs
[i
]]);
6165 strcat (buf
, "}\t%@ phole stm");
6167 output_asm_insn (buf
, operands
);
6171 /* Routines for use in generating RTL. */
6174 arm_gen_load_multiple (int base_regno
, int count
, rtx from
, int up
,
6175 int write_back
, rtx basemem
, HOST_WIDE_INT
*offsetp
)
6177 HOST_WIDE_INT offset
= *offsetp
;
6180 int sign
= up
? 1 : -1;
6183 /* XScale has load-store double instructions, but they have stricter
6184 alignment requirements than load-store multiple, so we cannot
6187 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6188 the pipeline until completion.
6196 An ldr instruction takes 1-3 cycles, but does not block the
6205 Best case ldr will always win. However, the more ldr instructions
6206 we issue, the less likely we are to be able to schedule them well.
6207 Using ldr instructions also increases code size.
6209 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6210 for counts of 3 or 4 regs. */
6211 if (arm_tune_xscale
&& count
<= 2 && ! optimize_size
)
6217 for (i
= 0; i
< count
; i
++)
6219 addr
= plus_constant (from
, i
* 4 * sign
);
6220 mem
= adjust_automodify_address (basemem
, SImode
, addr
, offset
);
6221 emit_move_insn (gen_rtx_REG (SImode
, base_regno
+ i
), mem
);
6227 emit_move_insn (from
, plus_constant (from
, count
* 4 * sign
));
6237 result
= gen_rtx_PARALLEL (VOIDmode
,
6238 rtvec_alloc (count
+ (write_back
? 1 : 0)));
6241 XVECEXP (result
, 0, 0)
6242 = gen_rtx_SET (GET_MODE (from
), from
,
6243 plus_constant (from
, count
* 4 * sign
));
6248 for (j
= 0; i
< count
; i
++, j
++)
6250 addr
= plus_constant (from
, j
* 4 * sign
);
6251 mem
= adjust_automodify_address_nv (basemem
, SImode
, addr
, offset
);
6252 XVECEXP (result
, 0, i
)
6253 = gen_rtx_SET (VOIDmode
, gen_rtx_REG (SImode
, base_regno
+ j
), mem
);
6264 arm_gen_store_multiple (int base_regno
, int count
, rtx to
, int up
,
6265 int write_back
, rtx basemem
, HOST_WIDE_INT
*offsetp
)
6267 HOST_WIDE_INT offset
= *offsetp
;
6270 int sign
= up
? 1 : -1;
6273 /* See arm_gen_load_multiple for discussion of
6274 the pros/cons of ldm/stm usage for XScale. */
6275 if (arm_tune_xscale
&& count
<= 2 && ! optimize_size
)
6281 for (i
= 0; i
< count
; i
++)
6283 addr
= plus_constant (to
, i
* 4 * sign
);
6284 mem
= adjust_automodify_address (basemem
, SImode
, addr
, offset
);
6285 emit_move_insn (mem
, gen_rtx_REG (SImode
, base_regno
+ i
));
6291 emit_move_insn (to
, plus_constant (to
, count
* 4 * sign
));
6301 result
= gen_rtx_PARALLEL (VOIDmode
,
6302 rtvec_alloc (count
+ (write_back
? 1 : 0)));
6305 XVECEXP (result
, 0, 0)
6306 = gen_rtx_SET (GET_MODE (to
), to
,
6307 plus_constant (to
, count
* 4 * sign
));
6312 for (j
= 0; i
< count
; i
++, j
++)
6314 addr
= plus_constant (to
, j
* 4 * sign
);
6315 mem
= adjust_automodify_address_nv (basemem
, SImode
, addr
, offset
);
6316 XVECEXP (result
, 0, i
)
6317 = gen_rtx_SET (VOIDmode
, mem
, gen_rtx_REG (SImode
, base_regno
+ j
));
6328 arm_gen_movmemqi (rtx
*operands
)
6330 HOST_WIDE_INT in_words_to_go
, out_words_to_go
, last_bytes
;
6331 HOST_WIDE_INT srcoffset
, dstoffset
;
6333 rtx src
, dst
, srcbase
, dstbase
;
6334 rtx part_bytes_reg
= NULL
;
6337 if (GET_CODE (operands
[2]) != CONST_INT
6338 || GET_CODE (operands
[3]) != CONST_INT
6339 || INTVAL (operands
[2]) > 64
6340 || INTVAL (operands
[3]) & 3)
6343 dstbase
= operands
[0];
6344 srcbase
= operands
[1];
6346 dst
= copy_to_mode_reg (SImode
, XEXP (dstbase
, 0));
6347 src
= copy_to_mode_reg (SImode
, XEXP (srcbase
, 0));
6349 in_words_to_go
= ARM_NUM_INTS (INTVAL (operands
[2]));
6350 out_words_to_go
= INTVAL (operands
[2]) / 4;
6351 last_bytes
= INTVAL (operands
[2]) & 3;
6352 dstoffset
= srcoffset
= 0;
6354 if (out_words_to_go
!= in_words_to_go
&& ((in_words_to_go
- 1) & 3) != 0)
6355 part_bytes_reg
= gen_rtx_REG (SImode
, (in_words_to_go
- 1) & 3);
6357 for (i
= 0; in_words_to_go
>= 2; i
+=4)
6359 if (in_words_to_go
> 4)
6360 emit_insn (arm_gen_load_multiple (0, 4, src
, TRUE
, TRUE
,
6361 srcbase
, &srcoffset
));
6363 emit_insn (arm_gen_load_multiple (0, in_words_to_go
, src
, TRUE
,
6364 FALSE
, srcbase
, &srcoffset
));
6366 if (out_words_to_go
)
6368 if (out_words_to_go
> 4)
6369 emit_insn (arm_gen_store_multiple (0, 4, dst
, TRUE
, TRUE
,
6370 dstbase
, &dstoffset
));
6371 else if (out_words_to_go
!= 1)
6372 emit_insn (arm_gen_store_multiple (0, out_words_to_go
,
6376 dstbase
, &dstoffset
));
6379 mem
= adjust_automodify_address (dstbase
, SImode
, dst
, dstoffset
);
6380 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
6381 if (last_bytes
!= 0)
6383 emit_insn (gen_addsi3 (dst
, dst
, GEN_INT (4)));
6389 in_words_to_go
-= in_words_to_go
< 4 ? in_words_to_go
: 4;
6390 out_words_to_go
-= out_words_to_go
< 4 ? out_words_to_go
: 4;
6393 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6394 if (out_words_to_go
)
6398 mem
= adjust_automodify_address (srcbase
, SImode
, src
, srcoffset
);
6399 sreg
= copy_to_reg (mem
);
6401 mem
= adjust_automodify_address (dstbase
, SImode
, dst
, dstoffset
);
6402 emit_move_insn (mem
, sreg
);
6405 gcc_assert (!in_words_to_go
); /* Sanity check */
6410 gcc_assert (in_words_to_go
> 0);
6412 mem
= adjust_automodify_address (srcbase
, SImode
, src
, srcoffset
);
6413 part_bytes_reg
= copy_to_mode_reg (SImode
, mem
);
6416 gcc_assert (!last_bytes
|| part_bytes_reg
);
6418 if (BYTES_BIG_ENDIAN
&& last_bytes
)
6420 rtx tmp
= gen_reg_rtx (SImode
);
6422 /* The bytes we want are in the top end of the word. */
6423 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
,
6424 GEN_INT (8 * (4 - last_bytes
))));
6425 part_bytes_reg
= tmp
;
6429 mem
= adjust_automodify_address (dstbase
, QImode
,
6430 plus_constant (dst
, last_bytes
- 1),
6431 dstoffset
+ last_bytes
- 1);
6432 emit_move_insn (mem
, gen_lowpart (QImode
, part_bytes_reg
));
6436 tmp
= gen_reg_rtx (SImode
);
6437 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (8)));
6438 part_bytes_reg
= tmp
;
6447 mem
= adjust_automodify_address (dstbase
, HImode
, dst
, dstoffset
);
6448 emit_move_insn (mem
, gen_lowpart (HImode
, part_bytes_reg
));
6452 rtx tmp
= gen_reg_rtx (SImode
);
6453 emit_insn (gen_addsi3 (dst
, dst
, const2_rtx
));
6454 emit_insn (gen_lshrsi3 (tmp
, part_bytes_reg
, GEN_INT (16)));
6455 part_bytes_reg
= tmp
;
6462 mem
= adjust_automodify_address (dstbase
, QImode
, dst
, dstoffset
);
6463 emit_move_insn (mem
, gen_lowpart (QImode
, part_bytes_reg
));
6470 /* Select a dominance comparison mode if possible for a test of the general
6471 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6472 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6473 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6474 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6475 In all cases OP will be either EQ or NE, but we don't need to know which
6476 here. If we are unable to support a dominance comparison we return
6477 CC mode. This will then fail to match for the RTL expressions that
6478 generate this call. */
6480 arm_select_dominance_cc_mode (rtx x
, rtx y
, HOST_WIDE_INT cond_or
)
6482 enum rtx_code cond1
, cond2
;
6485 /* Currently we will probably get the wrong result if the individual
6486 comparisons are not simple. This also ensures that it is safe to
6487 reverse a comparison if necessary. */
6488 if ((arm_select_cc_mode (cond1
= GET_CODE (x
), XEXP (x
, 0), XEXP (x
, 1))
6490 || (arm_select_cc_mode (cond2
= GET_CODE (y
), XEXP (y
, 0), XEXP (y
, 1))
6494 /* The if_then_else variant of this tests the second condition if the
6495 first passes, but is true if the first fails. Reverse the first
6496 condition to get a true "inclusive-or" expression. */
6497 if (cond_or
== DOM_CC_NX_OR_Y
)
6498 cond1
= reverse_condition (cond1
);
6500 /* If the comparisons are not equal, and one doesn't dominate the other,
6501 then we can't do this. */
6503 && !comparison_dominates_p (cond1
, cond2
)
6504 && (swapped
= 1, !comparison_dominates_p (cond2
, cond1
)))
6509 enum rtx_code temp
= cond1
;
6517 if (cond_or
== DOM_CC_X_AND_Y
)
6522 case EQ
: return CC_DEQmode
;
6523 case LE
: return CC_DLEmode
;
6524 case LEU
: return CC_DLEUmode
;
6525 case GE
: return CC_DGEmode
;
6526 case GEU
: return CC_DGEUmode
;
6527 default: gcc_unreachable ();
6531 if (cond_or
== DOM_CC_X_AND_Y
)
6547 if (cond_or
== DOM_CC_X_AND_Y
)
6563 if (cond_or
== DOM_CC_X_AND_Y
)
6579 if (cond_or
== DOM_CC_X_AND_Y
)
6594 /* The remaining cases only occur when both comparisons are the
6597 gcc_assert (cond1
== cond2
);
6601 gcc_assert (cond1
== cond2
);
6605 gcc_assert (cond1
== cond2
);
6609 gcc_assert (cond1
== cond2
);
6613 gcc_assert (cond1
== cond2
);
6622 arm_select_cc_mode (enum rtx_code op
, rtx x
, rtx y
)
6624 /* All floating point compares return CCFP if it is an equality
6625 comparison, and CCFPE otherwise. */
6626 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
6646 if (TARGET_HARD_FLOAT
&& TARGET_MAVERICK
)
6655 /* A compare with a shifted operand. Because of canonicalization, the
6656 comparison will have to be swapped when we emit the assembler. */
6657 if (GET_MODE (y
) == SImode
&& GET_CODE (y
) == REG
6658 && (GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
6659 || GET_CODE (x
) == LSHIFTRT
|| GET_CODE (x
) == ROTATE
6660 || GET_CODE (x
) == ROTATERT
))
6663 /* This operation is performed swapped, but since we only rely on the Z
6664 flag we don't need an additional mode. */
6665 if (GET_MODE (y
) == SImode
&& REG_P (y
)
6666 && GET_CODE (x
) == NEG
6667 && (op
== EQ
|| op
== NE
))
6670 /* This is a special case that is used by combine to allow a
6671 comparison of a shifted byte load to be split into a zero-extend
6672 followed by a comparison of the shifted integer (only valid for
6673 equalities and unsigned inequalities). */
6674 if (GET_MODE (x
) == SImode
6675 && GET_CODE (x
) == ASHIFT
6676 && GET_CODE (XEXP (x
, 1)) == CONST_INT
&& INTVAL (XEXP (x
, 1)) == 24
6677 && GET_CODE (XEXP (x
, 0)) == SUBREG
6678 && GET_CODE (SUBREG_REG (XEXP (x
, 0))) == MEM
6679 && GET_MODE (SUBREG_REG (XEXP (x
, 0))) == QImode
6680 && (op
== EQ
|| op
== NE
6681 || op
== GEU
|| op
== GTU
|| op
== LTU
|| op
== LEU
)
6682 && GET_CODE (y
) == CONST_INT
)
6685 /* A construct for a conditional compare, if the false arm contains
6686 0, then both conditions must be true, otherwise either condition
6687 must be true. Not all conditions are possible, so CCmode is
6688 returned if it can't be done. */
6689 if (GET_CODE (x
) == IF_THEN_ELSE
6690 && (XEXP (x
, 2) == const0_rtx
6691 || XEXP (x
, 2) == const1_rtx
)
6692 && COMPARISON_P (XEXP (x
, 0))
6693 && COMPARISON_P (XEXP (x
, 1)))
6694 return arm_select_dominance_cc_mode (XEXP (x
, 0), XEXP (x
, 1),
6695 INTVAL (XEXP (x
, 2)));
6697 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
6698 if (GET_CODE (x
) == AND
6699 && COMPARISON_P (XEXP (x
, 0))
6700 && COMPARISON_P (XEXP (x
, 1)))
6701 return arm_select_dominance_cc_mode (XEXP (x
, 0), XEXP (x
, 1),
6704 if (GET_CODE (x
) == IOR
6705 && COMPARISON_P (XEXP (x
, 0))
6706 && COMPARISON_P (XEXP (x
, 1)))
6707 return arm_select_dominance_cc_mode (XEXP (x
, 0), XEXP (x
, 1),
6710 /* An operation (on Thumb) where we want to test for a single bit.
6711 This is done by shifting that bit up into the top bit of a
6712 scratch register; we can then branch on the sign bit. */
6714 && GET_MODE (x
) == SImode
6715 && (op
== EQ
|| op
== NE
)
6716 && (GET_CODE (x
) == ZERO_EXTRACT
))
6719 /* An operation that sets the condition codes as a side-effect, the
6720 V flag is not set correctly, so we can only use comparisons where
6721 this doesn't matter. (For LT and GE we can use "mi" and "pl"
6723 if (GET_MODE (x
) == SImode
6725 && (op
== EQ
|| op
== NE
|| op
== LT
|| op
== GE
)
6726 && (GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
6727 || GET_CODE (x
) == AND
|| GET_CODE (x
) == IOR
6728 || GET_CODE (x
) == XOR
|| GET_CODE (x
) == MULT
6729 || GET_CODE (x
) == NOT
|| GET_CODE (x
) == NEG
6730 || GET_CODE (x
) == LSHIFTRT
6731 || GET_CODE (x
) == ASHIFT
|| GET_CODE (x
) == ASHIFTRT
6732 || GET_CODE (x
) == ROTATERT
6733 || (TARGET_ARM
&& GET_CODE (x
) == ZERO_EXTRACT
)))
6736 if (GET_MODE (x
) == QImode
&& (op
== EQ
|| op
== NE
))
6739 if (GET_MODE (x
) == SImode
&& (op
== LTU
|| op
== GEU
)
6740 && GET_CODE (x
) == PLUS
6741 && (rtx_equal_p (XEXP (x
, 0), y
) || rtx_equal_p (XEXP (x
, 1), y
)))
6747 /* X and Y are two things to compare using CODE. Emit the compare insn and
6748 return the rtx for register 0 in the proper mode. FP means this is a
6749 floating point compare: I don't think that it is needed on the arm. */
6751 arm_gen_compare_reg (enum rtx_code code
, rtx x
, rtx y
)
6753 enum machine_mode mode
= SELECT_CC_MODE (code
, x
, y
);
6754 rtx cc_reg
= gen_rtx_REG (mode
, CC_REGNUM
);
6756 emit_insn (gen_rtx_SET (VOIDmode
, cc_reg
,
6757 gen_rtx_COMPARE (mode
, x
, y
)));
6762 /* Generate a sequence of insns that will generate the correct return
6763 address mask depending on the physical architecture that the program
6766 arm_gen_return_addr_mask (void)
6768 rtx reg
= gen_reg_rtx (Pmode
);
6770 emit_insn (gen_return_addr_mask (reg
));
6775 arm_reload_in_hi (rtx
*operands
)
6777 rtx ref
= operands
[1];
6779 HOST_WIDE_INT offset
= 0;
6781 if (GET_CODE (ref
) == SUBREG
)
6783 offset
= SUBREG_BYTE (ref
);
6784 ref
= SUBREG_REG (ref
);
6787 if (GET_CODE (ref
) == REG
)
6789 /* We have a pseudo which has been spilt onto the stack; there
6790 are two cases here: the first where there is a simple
6791 stack-slot replacement and a second where the stack-slot is
6792 out of range, or is used as a subreg. */
6793 if (reg_equiv_mem
[REGNO (ref
)])
6795 ref
= reg_equiv_mem
[REGNO (ref
)];
6796 base
= find_replacement (&XEXP (ref
, 0));
6799 /* The slot is out of range, or was dressed up in a SUBREG. */
6800 base
= reg_equiv_address
[REGNO (ref
)];
6803 base
= find_replacement (&XEXP (ref
, 0));
6805 /* Handle the case where the address is too complex to be offset by 1. */
6806 if (GET_CODE (base
) == MINUS
6807 || (GET_CODE (base
) == PLUS
&& GET_CODE (XEXP (base
, 1)) != CONST_INT
))
6809 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6811 emit_insn (gen_rtx_SET (VOIDmode
, base_plus
, base
));
6814 else if (GET_CODE (base
) == PLUS
)
6816 /* The addend must be CONST_INT, or we would have dealt with it above. */
6817 HOST_WIDE_INT hi
, lo
;
6819 offset
+= INTVAL (XEXP (base
, 1));
6820 base
= XEXP (base
, 0);
6822 /* Rework the address into a legal sequence of insns. */
6823 /* Valid range for lo is -4095 -> 4095 */
6826 : -((-offset
) & 0xfff));
6828 /* Corner case, if lo is the max offset then we would be out of range
6829 once we have added the additional 1 below, so bump the msb into the
6830 pre-loading insn(s). */
6834 hi
= ((((offset
- lo
) & (HOST_WIDE_INT
) 0xffffffff)
6835 ^ (HOST_WIDE_INT
) 0x80000000)
6836 - (HOST_WIDE_INT
) 0x80000000);
6838 gcc_assert (hi
+ lo
== offset
);
6842 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6844 /* Get the base address; addsi3 knows how to handle constants
6845 that require more than one insn. */
6846 emit_insn (gen_addsi3 (base_plus
, base
, GEN_INT (hi
)));
6852 /* Operands[2] may overlap operands[0] (though it won't overlap
6853 operands[1]), that's why we asked for a DImode reg -- so we can
6854 use the bit that does not overlap. */
6855 if (REGNO (operands
[2]) == REGNO (operands
[0]))
6856 scratch
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6858 scratch
= gen_rtx_REG (SImode
, REGNO (operands
[2]));
6860 emit_insn (gen_zero_extendqisi2 (scratch
,
6861 gen_rtx_MEM (QImode
,
6862 plus_constant (base
,
6864 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode
, operands
[0], 0),
6865 gen_rtx_MEM (QImode
,
6866 plus_constant (base
,
6868 if (!BYTES_BIG_ENDIAN
)
6869 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_SUBREG (SImode
, operands
[0], 0),
6870 gen_rtx_IOR (SImode
,
6873 gen_rtx_SUBREG (SImode
, operands
[0], 0),
6877 emit_insn (gen_rtx_SET (VOIDmode
, gen_rtx_SUBREG (SImode
, operands
[0], 0),
6878 gen_rtx_IOR (SImode
,
6879 gen_rtx_ASHIFT (SImode
, scratch
,
6881 gen_rtx_SUBREG (SImode
, operands
[0],
6885 /* Handle storing a half-word to memory during reload by synthesizing as two
6886 byte stores. Take care not to clobber the input values until after we
6887 have moved them somewhere safe. This code assumes that if the DImode
6888 scratch in operands[2] overlaps either the input value or output address
6889 in some way, then that value must die in this insn (we absolutely need
6890 two scratch registers for some corner cases). */
6892 arm_reload_out_hi (rtx
*operands
)
6894 rtx ref
= operands
[0];
6895 rtx outval
= operands
[1];
6897 HOST_WIDE_INT offset
= 0;
6899 if (GET_CODE (ref
) == SUBREG
)
6901 offset
= SUBREG_BYTE (ref
);
6902 ref
= SUBREG_REG (ref
);
6905 if (GET_CODE (ref
) == REG
)
6907 /* We have a pseudo which has been spilt onto the stack; there
6908 are two cases here: the first where there is a simple
6909 stack-slot replacement and a second where the stack-slot is
6910 out of range, or is used as a subreg. */
6911 if (reg_equiv_mem
[REGNO (ref
)])
6913 ref
= reg_equiv_mem
[REGNO (ref
)];
6914 base
= find_replacement (&XEXP (ref
, 0));
6917 /* The slot is out of range, or was dressed up in a SUBREG. */
6918 base
= reg_equiv_address
[REGNO (ref
)];
6921 base
= find_replacement (&XEXP (ref
, 0));
6923 scratch
= gen_rtx_REG (SImode
, REGNO (operands
[2]));
6925 /* Handle the case where the address is too complex to be offset by 1. */
6926 if (GET_CODE (base
) == MINUS
6927 || (GET_CODE (base
) == PLUS
&& GET_CODE (XEXP (base
, 1)) != CONST_INT
))
6929 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6931 /* Be careful not to destroy OUTVAL. */
6932 if (reg_overlap_mentioned_p (base_plus
, outval
))
6934 /* Updating base_plus might destroy outval, see if we can
6935 swap the scratch and base_plus. */
6936 if (!reg_overlap_mentioned_p (scratch
, outval
))
6939 scratch
= base_plus
;
6944 rtx scratch_hi
= gen_rtx_REG (HImode
, REGNO (operands
[2]));
6946 /* Be conservative and copy OUTVAL into the scratch now,
6947 this should only be necessary if outval is a subreg
6948 of something larger than a word. */
6949 /* XXX Might this clobber base? I can't see how it can,
6950 since scratch is known to overlap with OUTVAL, and
6951 must be wider than a word. */
6952 emit_insn (gen_movhi (scratch_hi
, outval
));
6953 outval
= scratch_hi
;
6957 emit_insn (gen_rtx_SET (VOIDmode
, base_plus
, base
));
6960 else if (GET_CODE (base
) == PLUS
)
6962 /* The addend must be CONST_INT, or we would have dealt with it above. */
6963 HOST_WIDE_INT hi
, lo
;
6965 offset
+= INTVAL (XEXP (base
, 1));
6966 base
= XEXP (base
, 0);
6968 /* Rework the address into a legal sequence of insns. */
6969 /* Valid range for lo is -4095 -> 4095 */
6972 : -((-offset
) & 0xfff));
6974 /* Corner case, if lo is the max offset then we would be out of range
6975 once we have added the additional 1 below, so bump the msb into the
6976 pre-loading insn(s). */
6980 hi
= ((((offset
- lo
) & (HOST_WIDE_INT
) 0xffffffff)
6981 ^ (HOST_WIDE_INT
) 0x80000000)
6982 - (HOST_WIDE_INT
) 0x80000000);
6984 gcc_assert (hi
+ lo
== offset
);
6988 rtx base_plus
= gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
6990 /* Be careful not to destroy OUTVAL. */
6991 if (reg_overlap_mentioned_p (base_plus
, outval
))
6993 /* Updating base_plus might destroy outval, see if we
6994 can swap the scratch and base_plus. */
6995 if (!reg_overlap_mentioned_p (scratch
, outval
))
6998 scratch
= base_plus
;
7003 rtx scratch_hi
= gen_rtx_REG (HImode
, REGNO (operands
[2]));
7005 /* Be conservative and copy outval into scratch now,
7006 this should only be necessary if outval is a
7007 subreg of something larger than a word. */
7008 /* XXX Might this clobber base? I can't see how it
7009 can, since scratch is known to overlap with
7011 emit_insn (gen_movhi (scratch_hi
, outval
));
7012 outval
= scratch_hi
;
7016 /* Get the base address; addsi3 knows how to handle constants
7017 that require more than one insn. */
7018 emit_insn (gen_addsi3 (base_plus
, base
, GEN_INT (hi
)));
7024 if (BYTES_BIG_ENDIAN
)
7026 emit_insn (gen_movqi (gen_rtx_MEM (QImode
,
7027 plus_constant (base
, offset
+ 1)),
7028 gen_lowpart (QImode
, outval
)));
7029 emit_insn (gen_lshrsi3 (scratch
,
7030 gen_rtx_SUBREG (SImode
, outval
, 0),
7032 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (base
, offset
)),
7033 gen_lowpart (QImode
, scratch
)));
7037 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (base
, offset
)),
7038 gen_lowpart (QImode
, outval
)));
7039 emit_insn (gen_lshrsi3 (scratch
,
7040 gen_rtx_SUBREG (SImode
, outval
, 0),
7042 emit_insn (gen_movqi (gen_rtx_MEM (QImode
,
7043 plus_constant (base
, offset
+ 1)),
7044 gen_lowpart (QImode
, scratch
)));
7048 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7049 (padded to the size of a word) should be passed in a register. */
7052 arm_must_pass_in_stack (enum machine_mode mode
, tree type
)
7054 if (TARGET_AAPCS_BASED
)
7055 return must_pass_in_stack_var_size (mode
, type
);
7057 return must_pass_in_stack_var_size_or_pad (mode
, type
);
7061 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7062 Return true if an argument passed on the stack should be padded upwards,
7063 i.e. if the least-significant byte has useful data.
7064 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7065 aggregate types are placed in the lowest memory address. */
7068 arm_pad_arg_upward (enum machine_mode mode
, tree type
)
7070 if (!TARGET_AAPCS_BASED
)
7071 return DEFAULT_FUNCTION_ARG_PADDING(mode
, type
) == upward
;
7073 if (type
&& BYTES_BIG_ENDIAN
&& INTEGRAL_TYPE_P (type
))
7080 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7081 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7082 byte of the register has useful data, and return the opposite if the
7083 most significant byte does.
7084 For AAPCS, small aggregates and small complex types are always padded
7088 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED
,
7089 tree type
, int first ATTRIBUTE_UNUSED
)
7091 if (TARGET_AAPCS_BASED
7093 && (AGGREGATE_TYPE_P (type
) || TREE_CODE (type
) == COMPLEX_TYPE
)
7094 && int_size_in_bytes (type
) <= 4)
7097 /* Otherwise, use default padding. */
7098 return !BYTES_BIG_ENDIAN
;
7102 /* Print a symbolic form of X to the debug file, F. */
7104 arm_print_value (FILE *f
, rtx x
)
7106 switch (GET_CODE (x
))
7109 fprintf (f
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (x
));
7113 fprintf (f
, "<0x%lx,0x%lx>", (long)XWINT (x
, 2), (long)XWINT (x
, 3));
7121 for (i
= 0; i
< CONST_VECTOR_NUNITS (x
); i
++)
7123 fprintf (f
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (CONST_VECTOR_ELT (x
, i
)));
7124 if (i
< (CONST_VECTOR_NUNITS (x
) - 1))
7132 fprintf (f
, "\"%s\"", XSTR (x
, 0));
7136 fprintf (f
, "`%s'", XSTR (x
, 0));
7140 fprintf (f
, "L%d", INSN_UID (XEXP (x
, 0)));
7144 arm_print_value (f
, XEXP (x
, 0));
7148 arm_print_value (f
, XEXP (x
, 0));
7150 arm_print_value (f
, XEXP (x
, 1));
7158 fprintf (f
, "????");
7163 /* Routines for manipulation of the constant pool. */
7165 /* Arm instructions cannot load a large constant directly into a
7166 register; they have to come from a pc relative load. The constant
7167 must therefore be placed in the addressable range of the pc
7168 relative load. Depending on the precise pc relative load
7169 instruction the range is somewhere between 256 bytes and 4k. This
7170 means that we often have to dump a constant inside a function, and
7171 generate code to branch around it.
7173 It is important to minimize this, since the branches will slow
7174 things down and make the code larger.
7176 Normally we can hide the table after an existing unconditional
7177 branch so that there is no interruption of the flow, but in the
7178 worst case the code looks like this:
7196 We fix this by performing a scan after scheduling, which notices
7197 which instructions need to have their operands fetched from the
7198 constant table and builds the table.
7200 The algorithm starts by building a table of all the constants that
7201 need fixing up and all the natural barriers in the function (places
7202 where a constant table can be dropped without breaking the flow).
7203 For each fixup we note how far the pc-relative replacement will be
7204 able to reach and the offset of the instruction into the function.
7206 Having built the table we then group the fixes together to form
7207 tables that are as large as possible (subject to addressing
7208 constraints) and emit each table of constants after the last
7209 barrier that is within range of all the instructions in the group.
7210 If a group does not contain a barrier, then we forcibly create one
7211 by inserting a jump instruction into the flow. Once the table has
7212 been inserted, the insns are then modified to reference the
7213 relevant entry in the pool.
7215 Possible enhancements to the algorithm (not implemented) are:
7217 1) For some processors and object formats, there may be benefit in
7218 aligning the pools to the start of cache lines; this alignment
7219 would need to be taken into account when calculating addressability
7222 /* These typedefs are located at the start of this file, so that
7223 they can be used in the prototypes there. This comment is to
7224 remind readers of that fact so that the following structures
7225 can be understood more easily.
7227 typedef struct minipool_node Mnode;
7228 typedef struct minipool_fixup Mfix; */
7230 struct minipool_node
7232 /* Doubly linked chain of entries. */
7235 /* The maximum offset into the code that this entry can be placed. While
7236 pushing fixes for forward references, all entries are sorted in order
7237 of increasing max_address. */
7238 HOST_WIDE_INT max_address
;
7239 /* Similarly for an entry inserted for a backwards ref. */
7240 HOST_WIDE_INT min_address
;
7241 /* The number of fixes referencing this entry. This can become zero
7242 if we "unpush" an entry. In this case we ignore the entry when we
7243 come to emit the code. */
7245 /* The offset from the start of the minipool. */
7246 HOST_WIDE_INT offset
;
7247 /* The value in table. */
7249 /* The mode of value. */
7250 enum machine_mode mode
;
7251 /* The size of the value. With iWMMXt enabled
7252 sizes > 4 also imply an alignment of 8-bytes. */
7256 struct minipool_fixup
7260 HOST_WIDE_INT address
;
7262 enum machine_mode mode
;
7266 HOST_WIDE_INT forwards
;
7267 HOST_WIDE_INT backwards
;
7270 /* Fixes less than a word need padding out to a word boundary. */
7271 #define MINIPOOL_FIX_SIZE(mode) \
7272 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7274 static Mnode
* minipool_vector_head
;
7275 static Mnode
* minipool_vector_tail
;
7276 static rtx minipool_vector_label
;
7277 static int minipool_pad
;
7279 /* The linked list of all minipool fixes required for this function. */
7280 Mfix
* minipool_fix_head
;
7281 Mfix
* minipool_fix_tail
;
7282 /* The fix entry for the current minipool, once it has been placed. */
7283 Mfix
* minipool_barrier
;
7285 /* Determines if INSN is the start of a jump table. Returns the end
7286 of the TABLE or NULL_RTX. */
7288 is_jump_table (rtx insn
)
7292 if (GET_CODE (insn
) == JUMP_INSN
7293 && JUMP_LABEL (insn
) != NULL
7294 && ((table
= next_real_insn (JUMP_LABEL (insn
)))
7295 == next_real_insn (insn
))
7297 && GET_CODE (table
) == JUMP_INSN
7298 && (GET_CODE (PATTERN (table
)) == ADDR_VEC
7299 || GET_CODE (PATTERN (table
)) == ADDR_DIFF_VEC
))
7305 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7306 #define JUMP_TABLES_IN_TEXT_SECTION 0
7309 static HOST_WIDE_INT
7310 get_jump_table_size (rtx insn
)
7312 /* ADDR_VECs only take room if read-only data does into the text
7314 if (JUMP_TABLES_IN_TEXT_SECTION
7315 #if !defined(READONLY_DATA_SECTION) && !defined(READONLY_DATA_SECTION_ASM_OP)
7320 rtx body
= PATTERN (insn
);
7321 int elt
= GET_CODE (body
) == ADDR_DIFF_VEC
? 1 : 0;
7323 return GET_MODE_SIZE (GET_MODE (body
)) * XVECLEN (body
, elt
);
7329 /* Move a minipool fix MP from its current location to before MAX_MP.
7330 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7331 constraints may need updating. */
7333 move_minipool_fix_forward_ref (Mnode
*mp
, Mnode
*max_mp
,
7334 HOST_WIDE_INT max_address
)
7336 /* The code below assumes these are different. */
7337 gcc_assert (mp
!= max_mp
);
7341 if (max_address
< mp
->max_address
)
7342 mp
->max_address
= max_address
;
7346 if (max_address
> max_mp
->max_address
- mp
->fix_size
)
7347 mp
->max_address
= max_mp
->max_address
- mp
->fix_size
;
7349 mp
->max_address
= max_address
;
7351 /* Unlink MP from its current position. Since max_mp is non-null,
7352 mp->prev must be non-null. */
7353 mp
->prev
->next
= mp
->next
;
7354 if (mp
->next
!= NULL
)
7355 mp
->next
->prev
= mp
->prev
;
7357 minipool_vector_tail
= mp
->prev
;
7359 /* Re-insert it before MAX_MP. */
7361 mp
->prev
= max_mp
->prev
;
7364 if (mp
->prev
!= NULL
)
7365 mp
->prev
->next
= mp
;
7367 minipool_vector_head
= mp
;
7370 /* Save the new entry. */
7373 /* Scan over the preceding entries and adjust their addresses as
7375 while (mp
->prev
!= NULL
7376 && mp
->prev
->max_address
> mp
->max_address
- mp
->prev
->fix_size
)
7378 mp
->prev
->max_address
= mp
->max_address
- mp
->prev
->fix_size
;
7385 /* Add a constant to the minipool for a forward reference. Returns the
7386 node added or NULL if the constant will not fit in this pool. */
7388 add_minipool_forward_ref (Mfix
*fix
)
7390 /* If set, max_mp is the first pool_entry that has a lower
7391 constraint than the one we are trying to add. */
7392 Mnode
* max_mp
= NULL
;
7393 HOST_WIDE_INT max_address
= fix
->address
+ fix
->forwards
- minipool_pad
;
7396 /* If this fix's address is greater than the address of the first
7397 entry, then we can't put the fix in this pool. We subtract the
7398 size of the current fix to ensure that if the table is fully
7399 packed we still have enough room to insert this value by shuffling
7400 the other fixes forwards. */
7401 if (minipool_vector_head
&&
7402 fix
->address
>= minipool_vector_head
->max_address
- fix
->fix_size
)
7405 /* Scan the pool to see if a constant with the same value has
7406 already been added. While we are doing this, also note the
7407 location where we must insert the constant if it doesn't already
7409 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
7411 if (GET_CODE (fix
->value
) == GET_CODE (mp
->value
)
7412 && fix
->mode
== mp
->mode
7413 && (GET_CODE (fix
->value
) != CODE_LABEL
7414 || (CODE_LABEL_NUMBER (fix
->value
)
7415 == CODE_LABEL_NUMBER (mp
->value
)))
7416 && rtx_equal_p (fix
->value
, mp
->value
))
7418 /* More than one fix references this entry. */
7420 return move_minipool_fix_forward_ref (mp
, max_mp
, max_address
);
7423 /* Note the insertion point if necessary. */
7425 && mp
->max_address
> max_address
)
7428 /* If we are inserting an 8-bytes aligned quantity and
7429 we have not already found an insertion point, then
7430 make sure that all such 8-byte aligned quantities are
7431 placed at the start of the pool. */
7432 if (ARM_DOUBLEWORD_ALIGN
7434 && fix
->fix_size
== 8
7435 && mp
->fix_size
!= 8)
7438 max_address
= mp
->max_address
;
7442 /* The value is not currently in the minipool, so we need to create
7443 a new entry for it. If MAX_MP is NULL, the entry will be put on
7444 the end of the list since the placement is less constrained than
7445 any existing entry. Otherwise, we insert the new fix before
7446 MAX_MP and, if necessary, adjust the constraints on the other
7448 mp
= xmalloc (sizeof (* mp
));
7449 mp
->fix_size
= fix
->fix_size
;
7450 mp
->mode
= fix
->mode
;
7451 mp
->value
= fix
->value
;
7453 /* Not yet required for a backwards ref. */
7454 mp
->min_address
= -65536;
7458 mp
->max_address
= max_address
;
7460 mp
->prev
= minipool_vector_tail
;
7462 if (mp
->prev
== NULL
)
7464 minipool_vector_head
= mp
;
7465 minipool_vector_label
= gen_label_rtx ();
7468 mp
->prev
->next
= mp
;
7470 minipool_vector_tail
= mp
;
7474 if (max_address
> max_mp
->max_address
- mp
->fix_size
)
7475 mp
->max_address
= max_mp
->max_address
- mp
->fix_size
;
7477 mp
->max_address
= max_address
;
7480 mp
->prev
= max_mp
->prev
;
7482 if (mp
->prev
!= NULL
)
7483 mp
->prev
->next
= mp
;
7485 minipool_vector_head
= mp
;
7488 /* Save the new entry. */
7491 /* Scan over the preceding entries and adjust their addresses as
7493 while (mp
->prev
!= NULL
7494 && mp
->prev
->max_address
> mp
->max_address
- mp
->prev
->fix_size
)
7496 mp
->prev
->max_address
= mp
->max_address
- mp
->prev
->fix_size
;
7504 move_minipool_fix_backward_ref (Mnode
*mp
, Mnode
*min_mp
,
7505 HOST_WIDE_INT min_address
)
7507 HOST_WIDE_INT offset
;
7509 /* The code below assumes these are different. */
7510 gcc_assert (mp
!= min_mp
);
7514 if (min_address
> mp
->min_address
)
7515 mp
->min_address
= min_address
;
7519 /* We will adjust this below if it is too loose. */
7520 mp
->min_address
= min_address
;
7522 /* Unlink MP from its current position. Since min_mp is non-null,
7523 mp->next must be non-null. */
7524 mp
->next
->prev
= mp
->prev
;
7525 if (mp
->prev
!= NULL
)
7526 mp
->prev
->next
= mp
->next
;
7528 minipool_vector_head
= mp
->next
;
7530 /* Reinsert it after MIN_MP. */
7532 mp
->next
= min_mp
->next
;
7534 if (mp
->next
!= NULL
)
7535 mp
->next
->prev
= mp
;
7537 minipool_vector_tail
= mp
;
7543 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
7545 mp
->offset
= offset
;
7546 if (mp
->refcount
> 0)
7547 offset
+= mp
->fix_size
;
7549 if (mp
->next
&& mp
->next
->min_address
< mp
->min_address
+ mp
->fix_size
)
7550 mp
->next
->min_address
= mp
->min_address
+ mp
->fix_size
;
7556 /* Add a constant to the minipool for a backward reference. Returns the
7557 node added or NULL if the constant will not fit in this pool.
7559 Note that the code for insertion for a backwards reference can be
7560 somewhat confusing because the calculated offsets for each fix do
7561 not take into account the size of the pool (which is still under
7564 add_minipool_backward_ref (Mfix
*fix
)
7566 /* If set, min_mp is the last pool_entry that has a lower constraint
7567 than the one we are trying to add. */
7568 Mnode
*min_mp
= NULL
;
7569 /* This can be negative, since it is only a constraint. */
7570 HOST_WIDE_INT min_address
= fix
->address
- fix
->backwards
;
7573 /* If we can't reach the current pool from this insn, or if we can't
7574 insert this entry at the end of the pool without pushing other
7575 fixes out of range, then we don't try. This ensures that we
7576 can't fail later on. */
7577 if (min_address
>= minipool_barrier
->address
7578 || (minipool_vector_tail
->min_address
+ fix
->fix_size
7579 >= minipool_barrier
->address
))
7582 /* Scan the pool to see if a constant with the same value has
7583 already been added. While we are doing this, also note the
7584 location where we must insert the constant if it doesn't already
7586 for (mp
= minipool_vector_tail
; mp
!= NULL
; mp
= mp
->prev
)
7588 if (GET_CODE (fix
->value
) == GET_CODE (mp
->value
)
7589 && fix
->mode
== mp
->mode
7590 && (GET_CODE (fix
->value
) != CODE_LABEL
7591 || (CODE_LABEL_NUMBER (fix
->value
)
7592 == CODE_LABEL_NUMBER (mp
->value
)))
7593 && rtx_equal_p (fix
->value
, mp
->value
)
7594 /* Check that there is enough slack to move this entry to the
7595 end of the table (this is conservative). */
7597 > (minipool_barrier
->address
7598 + minipool_vector_tail
->offset
7599 + minipool_vector_tail
->fix_size
)))
7602 return move_minipool_fix_backward_ref (mp
, min_mp
, min_address
);
7606 mp
->min_address
+= fix
->fix_size
;
7609 /* Note the insertion point if necessary. */
7610 if (mp
->min_address
< min_address
)
7612 /* For now, we do not allow the insertion of 8-byte alignment
7613 requiring nodes anywhere but at the start of the pool. */
7614 if (ARM_DOUBLEWORD_ALIGN
7615 && fix
->fix_size
== 8 && mp
->fix_size
!= 8)
7620 else if (mp
->max_address
7621 < minipool_barrier
->address
+ mp
->offset
+ fix
->fix_size
)
7623 /* Inserting before this entry would push the fix beyond
7624 its maximum address (which can happen if we have
7625 re-located a forwards fix); force the new fix to come
7628 min_address
= mp
->min_address
+ fix
->fix_size
;
7630 /* If we are inserting an 8-bytes aligned quantity and
7631 we have not already found an insertion point, then
7632 make sure that all such 8-byte aligned quantities are
7633 placed at the start of the pool. */
7634 else if (ARM_DOUBLEWORD_ALIGN
7636 && fix
->fix_size
== 8
7637 && mp
->fix_size
< 8)
7640 min_address
= mp
->min_address
+ fix
->fix_size
;
7645 /* We need to create a new entry. */
7646 mp
= xmalloc (sizeof (* mp
));
7647 mp
->fix_size
= fix
->fix_size
;
7648 mp
->mode
= fix
->mode
;
7649 mp
->value
= fix
->value
;
7651 mp
->max_address
= minipool_barrier
->address
+ 65536;
7653 mp
->min_address
= min_address
;
7658 mp
->next
= minipool_vector_head
;
7660 if (mp
->next
== NULL
)
7662 minipool_vector_tail
= mp
;
7663 minipool_vector_label
= gen_label_rtx ();
7666 mp
->next
->prev
= mp
;
7668 minipool_vector_head
= mp
;
7672 mp
->next
= min_mp
->next
;
7676 if (mp
->next
!= NULL
)
7677 mp
->next
->prev
= mp
;
7679 minipool_vector_tail
= mp
;
7682 /* Save the new entry. */
7690 /* Scan over the following entries and adjust their offsets. */
7691 while (mp
->next
!= NULL
)
7693 if (mp
->next
->min_address
< mp
->min_address
+ mp
->fix_size
)
7694 mp
->next
->min_address
= mp
->min_address
+ mp
->fix_size
;
7697 mp
->next
->offset
= mp
->offset
+ mp
->fix_size
;
7699 mp
->next
->offset
= mp
->offset
;
7708 assign_minipool_offsets (Mfix
*barrier
)
7710 HOST_WIDE_INT offset
= 0;
7713 minipool_barrier
= barrier
;
7715 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
7717 mp
->offset
= offset
;
7719 if (mp
->refcount
> 0)
7720 offset
+= mp
->fix_size
;
7724 /* Output the literal table */
7726 dump_minipool (rtx scan
)
7732 if (ARM_DOUBLEWORD_ALIGN
)
7733 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= mp
->next
)
7734 if (mp
->refcount
> 0 && mp
->fix_size
== 8)
7742 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
7743 INSN_UID (scan
), (unsigned long) minipool_barrier
->address
, align64
? 8 : 4);
7745 scan
= emit_label_after (gen_label_rtx (), scan
);
7746 scan
= emit_insn_after (align64
? gen_align_8 () : gen_align_4 (), scan
);
7747 scan
= emit_label_after (minipool_vector_label
, scan
);
7749 for (mp
= minipool_vector_head
; mp
!= NULL
; mp
= nmp
)
7751 if (mp
->refcount
> 0)
7756 ";; Offset %u, min %ld, max %ld ",
7757 (unsigned) mp
->offset
, (unsigned long) mp
->min_address
,
7758 (unsigned long) mp
->max_address
);
7759 arm_print_value (dump_file
, mp
->value
);
7760 fputc ('\n', dump_file
);
7763 switch (mp
->fix_size
)
7765 #ifdef HAVE_consttable_1
7767 scan
= emit_insn_after (gen_consttable_1 (mp
->value
), scan
);
7771 #ifdef HAVE_consttable_2
7773 scan
= emit_insn_after (gen_consttable_2 (mp
->value
), scan
);
7777 #ifdef HAVE_consttable_4
7779 scan
= emit_insn_after (gen_consttable_4 (mp
->value
), scan
);
7783 #ifdef HAVE_consttable_8
7785 scan
= emit_insn_after (gen_consttable_8 (mp
->value
), scan
);
7798 minipool_vector_head
= minipool_vector_tail
= NULL
;
7799 scan
= emit_insn_after (gen_consttable_end (), scan
);
7800 scan
= emit_barrier_after (scan
);
7803 /* Return the cost of forcibly inserting a barrier after INSN. */
7805 arm_barrier_cost (rtx insn
)
7807 /* Basing the location of the pool on the loop depth is preferable,
7808 but at the moment, the basic block information seems to be
7809 corrupt by this stage of the compilation. */
7811 rtx next
= next_nonnote_insn (insn
);
7813 if (next
!= NULL
&& GET_CODE (next
) == CODE_LABEL
)
7816 switch (GET_CODE (insn
))
7819 /* It will always be better to place the table before the label, rather
7828 return base_cost
- 10;
7831 return base_cost
+ 10;
7835 /* Find the best place in the insn stream in the range
7836 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
7837 Create the barrier by inserting a jump and add a new fix entry for
7840 create_fix_barrier (Mfix
*fix
, HOST_WIDE_INT max_address
)
7842 HOST_WIDE_INT count
= 0;
7844 rtx from
= fix
->insn
;
7845 rtx selected
= from
;
7847 HOST_WIDE_INT selected_address
;
7849 HOST_WIDE_INT max_count
= max_address
- fix
->address
;
7850 rtx label
= gen_label_rtx ();
7852 selected_cost
= arm_barrier_cost (from
);
7853 selected_address
= fix
->address
;
7855 while (from
&& count
< max_count
)
7860 /* This code shouldn't have been called if there was a natural barrier
7862 gcc_assert (GET_CODE (from
) != BARRIER
);
7864 /* Count the length of this insn. */
7865 count
+= get_attr_length (from
);
7867 /* If there is a jump table, add its length. */
7868 tmp
= is_jump_table (from
);
7871 count
+= get_jump_table_size (tmp
);
7873 /* Jump tables aren't in a basic block, so base the cost on
7874 the dispatch insn. If we select this location, we will
7875 still put the pool after the table. */
7876 new_cost
= arm_barrier_cost (from
);
7878 if (count
< max_count
&& new_cost
<= selected_cost
)
7881 selected_cost
= new_cost
;
7882 selected_address
= fix
->address
+ count
;
7885 /* Continue after the dispatch table. */
7886 from
= NEXT_INSN (tmp
);
7890 new_cost
= arm_barrier_cost (from
);
7892 if (count
< max_count
&& new_cost
<= selected_cost
)
7895 selected_cost
= new_cost
;
7896 selected_address
= fix
->address
+ count
;
7899 from
= NEXT_INSN (from
);
7902 /* Create a new JUMP_INSN that branches around a barrier. */
7903 from
= emit_jump_insn_after (gen_jump (label
), selected
);
7904 JUMP_LABEL (from
) = label
;
7905 barrier
= emit_barrier_after (from
);
7906 emit_label_after (label
, barrier
);
7908 /* Create a minipool barrier entry for the new barrier. */
7909 new_fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (* new_fix
));
7910 new_fix
->insn
= barrier
;
7911 new_fix
->address
= selected_address
;
7912 new_fix
->next
= fix
->next
;
7913 fix
->next
= new_fix
;
7918 /* Record that there is a natural barrier in the insn stream at
7921 push_minipool_barrier (rtx insn
, HOST_WIDE_INT address
)
7923 Mfix
* fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (* fix
));
7926 fix
->address
= address
;
7929 if (minipool_fix_head
!= NULL
)
7930 minipool_fix_tail
->next
= fix
;
7932 minipool_fix_head
= fix
;
7934 minipool_fix_tail
= fix
;
7937 /* Record INSN, which will need fixing up to load a value from the
7938 minipool. ADDRESS is the offset of the insn since the start of the
7939 function; LOC is a pointer to the part of the insn which requires
7940 fixing; VALUE is the constant that must be loaded, which is of type
7943 push_minipool_fix (rtx insn
, HOST_WIDE_INT address
, rtx
*loc
,
7944 enum machine_mode mode
, rtx value
)
7946 Mfix
* fix
= (Mfix
*) obstack_alloc (&minipool_obstack
, sizeof (* fix
));
7948 #ifdef AOF_ASSEMBLER
7949 /* PIC symbol references need to be converted into offsets into the
7951 /* XXX This shouldn't be done here. */
7952 if (flag_pic
&& GET_CODE (value
) == SYMBOL_REF
)
7953 value
= aof_pic_entry (value
);
7954 #endif /* AOF_ASSEMBLER */
7957 fix
->address
= address
;
7960 fix
->fix_size
= MINIPOOL_FIX_SIZE (mode
);
7962 fix
->forwards
= get_attr_pool_range (insn
);
7963 fix
->backwards
= get_attr_neg_pool_range (insn
);
7964 fix
->minipool
= NULL
;
7966 /* If an insn doesn't have a range defined for it, then it isn't
7967 expecting to be reworked by this code. Better to stop now than
7968 to generate duff assembly code. */
7969 gcc_assert (fix
->forwards
|| fix
->backwards
);
7971 /* If an entry requires 8-byte alignment then assume all constant pools
7972 require 4 bytes of padding. Trying to do this later on a per-pool
7973 basis is awkward becuse existing pool entries have to be modified. */
7974 if (ARM_DOUBLEWORD_ALIGN
&& fix
->fix_size
== 8)
7980 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
7981 GET_MODE_NAME (mode
),
7982 INSN_UID (insn
), (unsigned long) address
,
7983 -1 * (long)fix
->backwards
, (long)fix
->forwards
);
7984 arm_print_value (dump_file
, fix
->value
);
7985 fprintf (dump_file
, "\n");
7988 /* Add it to the chain of fixes. */
7991 if (minipool_fix_head
!= NULL
)
7992 minipool_fix_tail
->next
= fix
;
7994 minipool_fix_head
= fix
;
7996 minipool_fix_tail
= fix
;
7999 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8000 Returns the number of insns needed, or 99 if we don't know how to
8003 arm_const_double_inline_cost (rtx val
)
8005 rtx lowpart
, highpart
;
8006 enum machine_mode mode
;
8008 mode
= GET_MODE (val
);
8010 if (mode
== VOIDmode
)
8013 gcc_assert (GET_MODE_SIZE (mode
) == 8);
8015 lowpart
= gen_lowpart (SImode
, val
);
8016 highpart
= gen_highpart_mode (SImode
, mode
, val
);
8018 gcc_assert (GET_CODE (lowpart
) == CONST_INT
);
8019 gcc_assert (GET_CODE (highpart
) == CONST_INT
);
8021 return (arm_gen_constant (SET
, SImode
, NULL_RTX
, INTVAL (lowpart
),
8022 NULL_RTX
, NULL_RTX
, 0, 0)
8023 + arm_gen_constant (SET
, SImode
, NULL_RTX
, INTVAL (highpart
),
8024 NULL_RTX
, NULL_RTX
, 0, 0));
8027 /* Return true if it is worthwhile to split a 64-bit constant into two
8028 32-bit operations. This is the case if optimizing for size, or
8029 if we have load delay slots, or if one 32-bit part can be done with
8030 a single data operation. */
8032 arm_const_double_by_parts (rtx val
)
8034 enum machine_mode mode
= GET_MODE (val
);
8037 if (optimize_size
|| arm_ld_sched
)
8040 if (mode
== VOIDmode
)
8043 part
= gen_highpart_mode (SImode
, mode
, val
);
8045 gcc_assert (GET_CODE (part
) == CONST_INT
);
8047 if (const_ok_for_arm (INTVAL (part
))
8048 || const_ok_for_arm (~INTVAL (part
)))
8051 part
= gen_lowpart (SImode
, val
);
8053 gcc_assert (GET_CODE (part
) == CONST_INT
);
8055 if (const_ok_for_arm (INTVAL (part
))
8056 || const_ok_for_arm (~INTVAL (part
)))
8062 /* Scan INSN and note any of its operands that need fixing.
8063 If DO_PUSHES is false we do not actually push any of the fixups
8064 needed. The function returns TRUE if any fixups were needed/pushed.
8065 This is used by arm_memory_load_p() which needs to know about loads
8066 of constants that will be converted into minipool loads. */
8068 note_invalid_constants (rtx insn
, HOST_WIDE_INT address
, int do_pushes
)
8070 bool result
= false;
8073 extract_insn (insn
);
8075 if (!constrain_operands (1))
8076 fatal_insn_not_found (insn
);
8078 if (recog_data
.n_alternatives
== 0)
8081 /* Fill in recog_op_alt with information about the constraints of
8083 preprocess_constraints ();
8085 for (opno
= 0; opno
< recog_data
.n_operands
; opno
++)
8087 /* Things we need to fix can only occur in inputs. */
8088 if (recog_data
.operand_type
[opno
] != OP_IN
)
8091 /* If this alternative is a memory reference, then any mention
8092 of constants in this alternative is really to fool reload
8093 into allowing us to accept one there. We need to fix them up
8094 now so that we output the right code. */
8095 if (recog_op_alt
[opno
][which_alternative
].memory_ok
)
8097 rtx op
= recog_data
.operand
[opno
];
8099 if (CONSTANT_P (op
))
8102 push_minipool_fix (insn
, address
, recog_data
.operand_loc
[opno
],
8103 recog_data
.operand_mode
[opno
], op
);
8106 else if (GET_CODE (op
) == MEM
8107 && GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
8108 && CONSTANT_POOL_ADDRESS_P (XEXP (op
, 0)))
8112 rtx cop
= avoid_constant_pool_reference (op
);
8114 /* Casting the address of something to a mode narrower
8115 than a word can cause avoid_constant_pool_reference()
8116 to return the pool reference itself. That's no good to
8117 us here. Lets just hope that we can use the
8118 constant pool value directly. */
8120 cop
= get_pool_constant (XEXP (op
, 0));
8122 push_minipool_fix (insn
, address
,
8123 recog_data
.operand_loc
[opno
],
8124 recog_data
.operand_mode
[opno
], cop
);
8135 /* Gcc puts the pool in the wrong place for ARM, since we can only
8136 load addresses a limited distance around the pc. We do some
8137 special munging to move the constant pool values to the correct
8138 point in the code. */
8143 HOST_WIDE_INT address
= 0;
8146 minipool_fix_head
= minipool_fix_tail
= NULL
;
8148 /* The first insn must always be a note, or the code below won't
8149 scan it properly. */
8150 insn
= get_insns ();
8151 gcc_assert (GET_CODE (insn
) == NOTE
);
8154 /* Scan all the insns and record the operands that will need fixing. */
8155 for (insn
= next_nonnote_insn (insn
); insn
; insn
= next_nonnote_insn (insn
))
8157 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8158 && (arm_cirrus_insn_p (insn
)
8159 || GET_CODE (insn
) == JUMP_INSN
8160 || arm_memory_load_p (insn
)))
8161 cirrus_reorg (insn
);
8163 if (GET_CODE (insn
) == BARRIER
)
8164 push_minipool_barrier (insn
, address
);
8165 else if (INSN_P (insn
))
8169 note_invalid_constants (insn
, address
, true);
8170 address
+= get_attr_length (insn
);
8172 /* If the insn is a vector jump, add the size of the table
8173 and skip the table. */
8174 if ((table
= is_jump_table (insn
)) != NULL
)
8176 address
+= get_jump_table_size (table
);
8182 fix
= minipool_fix_head
;
8184 /* Now scan the fixups and perform the required changes. */
8189 Mfix
* last_added_fix
;
8190 Mfix
* last_barrier
= NULL
;
8193 /* Skip any further barriers before the next fix. */
8194 while (fix
&& GET_CODE (fix
->insn
) == BARRIER
)
8197 /* No more fixes. */
8201 last_added_fix
= NULL
;
8203 for (ftmp
= fix
; ftmp
; ftmp
= ftmp
->next
)
8205 if (GET_CODE (ftmp
->insn
) == BARRIER
)
8207 if (ftmp
->address
>= minipool_vector_head
->max_address
)
8210 last_barrier
= ftmp
;
8212 else if ((ftmp
->minipool
= add_minipool_forward_ref (ftmp
)) == NULL
)
8215 last_added_fix
= ftmp
; /* Keep track of the last fix added. */
8218 /* If we found a barrier, drop back to that; any fixes that we
8219 could have reached but come after the barrier will now go in
8220 the next mini-pool. */
8221 if (last_barrier
!= NULL
)
8223 /* Reduce the refcount for those fixes that won't go into this
8225 for (fdel
= last_barrier
->next
;
8226 fdel
&& fdel
!= ftmp
;
8229 fdel
->minipool
->refcount
--;
8230 fdel
->minipool
= NULL
;
8233 ftmp
= last_barrier
;
8237 /* ftmp is first fix that we can't fit into this pool and
8238 there no natural barriers that we could use. Insert a
8239 new barrier in the code somewhere between the previous
8240 fix and this one, and arrange to jump around it. */
8241 HOST_WIDE_INT max_address
;
8243 /* The last item on the list of fixes must be a barrier, so
8244 we can never run off the end of the list of fixes without
8245 last_barrier being set. */
8248 max_address
= minipool_vector_head
->max_address
;
8249 /* Check that there isn't another fix that is in range that
8250 we couldn't fit into this pool because the pool was
8251 already too large: we need to put the pool before such an
8253 if (ftmp
->address
< max_address
)
8254 max_address
= ftmp
->address
;
8256 last_barrier
= create_fix_barrier (last_added_fix
, max_address
);
8259 assign_minipool_offsets (last_barrier
);
8263 if (GET_CODE (ftmp
->insn
) != BARRIER
8264 && ((ftmp
->minipool
= add_minipool_backward_ref (ftmp
))
8271 /* Scan over the fixes we have identified for this pool, fixing them
8272 up and adding the constants to the pool itself. */
8273 for (this_fix
= fix
; this_fix
&& ftmp
!= this_fix
;
8274 this_fix
= this_fix
->next
)
8275 if (GET_CODE (this_fix
->insn
) != BARRIER
)
8278 = plus_constant (gen_rtx_LABEL_REF (VOIDmode
,
8279 minipool_vector_label
),
8280 this_fix
->minipool
->offset
);
8281 *this_fix
->loc
= gen_rtx_MEM (this_fix
->mode
, addr
);
8284 dump_minipool (last_barrier
->insn
);
8288 /* From now on we must synthesize any constants that we can't handle
8289 directly. This can happen if the RTL gets split during final
8290 instruction generation. */
8291 after_arm_reorg
= 1;
8293 /* Free the minipool memory. */
8294 obstack_free (&minipool_obstack
, minipool_startobj
);
8297 /* Routines to output assembly language. */
8299 /* If the rtx is the correct value then return the string of the number.
8300 In this way we can ensure that valid double constants are generated even
8301 when cross compiling. */
8303 fp_immediate_constant (rtx x
)
8308 if (!fp_consts_inited
)
8311 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
8312 for (i
= 0; i
< 8; i
++)
8313 if (REAL_VALUES_EQUAL (r
, values_fp
[i
]))
8314 return strings_fp
[i
];
8319 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8321 fp_const_from_val (REAL_VALUE_TYPE
*r
)
8325 if (!fp_consts_inited
)
8328 for (i
= 0; i
< 8; i
++)
8329 if (REAL_VALUES_EQUAL (*r
, values_fp
[i
]))
8330 return strings_fp
[i
];
8335 /* Output the operands of a LDM/STM instruction to STREAM.
8336 MASK is the ARM register set mask of which only bits 0-15 are important.
8337 REG is the base register, either the frame pointer or the stack pointer,
8338 INSTR is the possibly suffixed load or store instruction. */
8341 print_multi_reg (FILE *stream
, const char *instr
, unsigned reg
,
8345 bool not_first
= FALSE
;
8347 fputc ('\t', stream
);
8348 asm_fprintf (stream
, instr
, reg
);
8349 fputs (", {", stream
);
8351 for (i
= 0; i
<= LAST_ARM_REGNUM
; i
++)
8352 if (mask
& (1 << i
))
8355 fprintf (stream
, ", ");
8357 asm_fprintf (stream
, "%r", i
);
8361 fprintf (stream
, "}\n");
8365 /* Output a FLDMX instruction to STREAM.
8366 BASE if the register containing the address.
8367 REG and COUNT specify the register range.
8368 Extra registers may be added to avoid hardware bugs. */
8371 arm_output_fldmx (FILE * stream
, unsigned int base
, int reg
, int count
)
8375 /* Workaround ARM10 VFPr1 bug. */
8376 if (count
== 2 && !arm_arch6
)
8383 fputc ('\t', stream
);
8384 asm_fprintf (stream
, "fldmfdx\t%r!, {", base
);
8386 for (i
= reg
; i
< reg
+ count
; i
++)
8389 fputs (", ", stream
);
8390 asm_fprintf (stream
, "d%d", i
);
8392 fputs ("}\n", stream
);
8397 /* Output the assembly for a store multiple. */
8400 vfp_output_fstmx (rtx
* operands
)
8407 strcpy (pattern
, "fstmfdx\t%m0!, {%P1");
8408 p
= strlen (pattern
);
8410 gcc_assert (GET_CODE (operands
[1]) == REG
);
8412 base
= (REGNO (operands
[1]) - FIRST_VFP_REGNUM
) / 2;
8413 for (i
= 1; i
< XVECLEN (operands
[2], 0); i
++)
8415 p
+= sprintf (&pattern
[p
], ", d%d", base
+ i
);
8417 strcpy (&pattern
[p
], "}");
8419 output_asm_insn (pattern
, operands
);
8424 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8425 number of bytes pushed. */
8428 vfp_emit_fstmx (int base_reg
, int count
)
8435 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8436 register pairs are stored by a store multiple insn. We avoid this
8437 by pushing an extra pair. */
8438 if (count
== 2 && !arm_arch6
)
8440 if (base_reg
== LAST_VFP_REGNUM
- 3)
8445 /* ??? The frame layout is implementation defined. We describe
8446 standard format 1 (equivalent to a FSTMD insn and unused pad word).
8447 We really need some way of representing the whole block so that the
8448 unwinder can figure it out at runtime. */
8449 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
));
8450 dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (count
+ 1));
8452 reg
= gen_rtx_REG (DFmode
, base_reg
);
8456 = gen_rtx_SET (VOIDmode
,
8457 gen_frame_mem (BLKmode
,
8458 gen_rtx_PRE_DEC (BLKmode
,
8459 stack_pointer_rtx
)),
8460 gen_rtx_UNSPEC (BLKmode
,
8464 tmp
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
8465 gen_rtx_PLUS (SImode
, stack_pointer_rtx
,
8466 GEN_INT (-(count
* 8 + 4))));
8467 RTX_FRAME_RELATED_P (tmp
) = 1;
8468 XVECEXP (dwarf
, 0, 0) = tmp
;
8470 tmp
= gen_rtx_SET (VOIDmode
,
8471 gen_frame_mem (DFmode
, stack_pointer_rtx
),
8473 RTX_FRAME_RELATED_P (tmp
) = 1;
8474 XVECEXP (dwarf
, 0, 1) = tmp
;
8476 for (i
= 1; i
< count
; i
++)
8478 reg
= gen_rtx_REG (DFmode
, base_reg
);
8480 XVECEXP (par
, 0, i
) = gen_rtx_USE (VOIDmode
, reg
);
8482 tmp
= gen_rtx_SET (VOIDmode
,
8483 gen_frame_mem (DFmode
,
8484 gen_rtx_PLUS (SImode
,
8488 RTX_FRAME_RELATED_P (tmp
) = 1;
8489 XVECEXP (dwarf
, 0, i
+ 1) = tmp
;
8492 par
= emit_insn (par
);
8493 REG_NOTES (par
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
8495 RTX_FRAME_RELATED_P (par
) = 1;
8497 return count
* 8 + 4;
8501 /* Output a 'call' insn. */
8503 output_call (rtx
*operands
)
8505 gcc_assert (!arm_arch5
); /* Patterns should call blx <reg> directly. */
8507 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8508 if (REGNO (operands
[0]) == LR_REGNUM
)
8510 operands
[0] = gen_rtx_REG (SImode
, IP_REGNUM
);
8511 output_asm_insn ("mov%?\t%0, %|lr", operands
);
8514 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
8516 if (TARGET_INTERWORK
|| arm_arch4t
)
8517 output_asm_insn ("bx%?\t%0", operands
);
8519 output_asm_insn ("mov%?\t%|pc, %0", operands
);
8524 /* Output a 'call' insn that is a reference in memory. */
8526 output_call_mem (rtx
*operands
)
8528 if (TARGET_INTERWORK
&& !arm_arch5
)
8530 output_asm_insn ("ldr%?\t%|ip, %0", operands
);
8531 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
8532 output_asm_insn ("bx%?\t%|ip", operands
);
8534 else if (regno_use_in (LR_REGNUM
, operands
[0]))
8536 /* LR is used in the memory address. We load the address in the
8537 first instruction. It's safe to use IP as the target of the
8538 load since the call will kill it anyway. */
8539 output_asm_insn ("ldr%?\t%|ip, %0", operands
);
8541 output_asm_insn ("blx%?\t%|ip", operands
);
8544 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
8546 output_asm_insn ("bx%?\t%|ip", operands
);
8548 output_asm_insn ("mov%?\t%|pc, %|ip", operands
);
8553 output_asm_insn ("mov%?\t%|lr, %|pc", operands
);
8554 output_asm_insn ("ldr%?\t%|pc, %0", operands
);
8561 /* Output a move from arm registers to an fpa registers.
8562 OPERANDS[0] is an fpa register.
8563 OPERANDS[1] is the first registers of an arm register pair. */
8565 output_mov_long_double_fpa_from_arm (rtx
*operands
)
8567 int arm_reg0
= REGNO (operands
[1]);
8570 gcc_assert (arm_reg0
!= IP_REGNUM
);
8572 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
8573 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
8574 ops
[2] = gen_rtx_REG (SImode
, 2 + arm_reg0
);
8576 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1, %2}", ops
);
8577 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands
);
8582 /* Output a move from an fpa register to arm registers.
8583 OPERANDS[0] is the first registers of an arm register pair.
8584 OPERANDS[1] is an fpa register. */
8586 output_mov_long_double_arm_from_fpa (rtx
*operands
)
8588 int arm_reg0
= REGNO (operands
[0]);
8591 gcc_assert (arm_reg0
!= IP_REGNUM
);
8593 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
8594 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
8595 ops
[2] = gen_rtx_REG (SImode
, 2 + arm_reg0
);
8597 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands
);
8598 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1, %2}", ops
);
8602 /* Output a move from arm registers to arm registers of a long double
8603 OPERANDS[0] is the destination.
8604 OPERANDS[1] is the source. */
8606 output_mov_long_double_arm_from_arm (rtx
*operands
)
8608 /* We have to be careful here because the two might overlap. */
8609 int dest_start
= REGNO (operands
[0]);
8610 int src_start
= REGNO (operands
[1]);
8614 if (dest_start
< src_start
)
8616 for (i
= 0; i
< 3; i
++)
8618 ops
[0] = gen_rtx_REG (SImode
, dest_start
+ i
);
8619 ops
[1] = gen_rtx_REG (SImode
, src_start
+ i
);
8620 output_asm_insn ("mov%?\t%0, %1", ops
);
8625 for (i
= 2; i
>= 0; i
--)
8627 ops
[0] = gen_rtx_REG (SImode
, dest_start
+ i
);
8628 ops
[1] = gen_rtx_REG (SImode
, src_start
+ i
);
8629 output_asm_insn ("mov%?\t%0, %1", ops
);
8637 /* Output a move from arm registers to an fpa registers.
8638 OPERANDS[0] is an fpa register.
8639 OPERANDS[1] is the first registers of an arm register pair. */
8641 output_mov_double_fpa_from_arm (rtx
*operands
)
8643 int arm_reg0
= REGNO (operands
[1]);
8646 gcc_assert (arm_reg0
!= IP_REGNUM
);
8648 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
8649 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
8650 output_asm_insn ("stm%?fd\t%|sp!, {%0, %1}", ops
);
8651 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands
);
8655 /* Output a move from an fpa register to arm registers.
8656 OPERANDS[0] is the first registers of an arm register pair.
8657 OPERANDS[1] is an fpa register. */
8659 output_mov_double_arm_from_fpa (rtx
*operands
)
8661 int arm_reg0
= REGNO (operands
[0]);
8664 gcc_assert (arm_reg0
!= IP_REGNUM
);
8666 ops
[0] = gen_rtx_REG (SImode
, arm_reg0
);
8667 ops
[1] = gen_rtx_REG (SImode
, 1 + arm_reg0
);
8668 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands
);
8669 output_asm_insn ("ldm%?fd\t%|sp!, {%0, %1}", ops
);
8673 /* Output a move between double words.
8674 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
8675 or MEM<-REG and all MEMs must be offsettable addresses. */
8677 output_move_double (rtx
*operands
)
8679 enum rtx_code code0
= GET_CODE (operands
[0]);
8680 enum rtx_code code1
= GET_CODE (operands
[1]);
8685 int reg0
= REGNO (operands
[0]);
8687 otherops
[0] = gen_rtx_REG (SImode
, 1 + reg0
);
8689 gcc_assert (code1
== MEM
); /* Constraints should ensure this. */
8691 switch (GET_CODE (XEXP (operands
[1], 0)))
8694 output_asm_insn ("ldm%?ia\t%m1, %M0", operands
);
8698 gcc_assert (TARGET_LDRD
);
8699 output_asm_insn ("ldr%?d\t%0, [%m1, #8]!", operands
);
8703 output_asm_insn ("ldm%?db\t%m1!, %M0", operands
);
8707 output_asm_insn ("ldm%?ia\t%m1!, %M0", operands
);
8711 gcc_assert (TARGET_LDRD
);
8712 output_asm_insn ("ldr%?d\t%0, [%m1], #-8", operands
);
8717 otherops
[0] = operands
[0];
8718 otherops
[1] = XEXP (XEXP (XEXP (operands
[1], 0), 1), 0);
8719 otherops
[2] = XEXP (XEXP (XEXP (operands
[1], 0), 1), 1);
8721 if (GET_CODE (XEXP (operands
[1], 0)) == PRE_MODIFY
)
8723 if (reg_overlap_mentioned_p (otherops
[0], otherops
[2]))
8725 /* Registers overlap so split out the increment. */
8726 output_asm_insn ("add%?\t%1, %1, %2", otherops
);
8727 output_asm_insn ("ldr%?d\t%0, [%1] @split", otherops
);
8730 output_asm_insn ("ldr%?d\t%0, [%1, %2]!", otherops
);
8734 /* We only allow constant increments, so this is safe. */
8735 output_asm_insn ("ldr%?d\t%0, [%1], %2", otherops
);
8741 output_asm_insn ("adr%?\t%0, %1", operands
);
8742 output_asm_insn ("ldm%?ia\t%0, %M0", operands
);
8746 if (arm_add_operand (XEXP (XEXP (operands
[1], 0), 1),
8747 GET_MODE (XEXP (XEXP (operands
[1], 0), 1))))
8749 otherops
[0] = operands
[0];
8750 otherops
[1] = XEXP (XEXP (operands
[1], 0), 0);
8751 otherops
[2] = XEXP (XEXP (operands
[1], 0), 1);
8753 if (GET_CODE (XEXP (operands
[1], 0)) == PLUS
)
8755 if (GET_CODE (otherops
[2]) == CONST_INT
)
8757 switch ((int) INTVAL (otherops
[2]))
8760 output_asm_insn ("ldm%?db\t%1, %M0", otherops
);
8763 output_asm_insn ("ldm%?da\t%1, %M0", otherops
);
8766 output_asm_insn ("ldm%?ib\t%1, %M0", otherops
);
8771 && (GET_CODE (otherops
[2]) == REG
8772 || (GET_CODE (otherops
[2]) == CONST_INT
8773 && INTVAL (otherops
[2]) > -256
8774 && INTVAL (otherops
[2]) < 256)))
8776 if (reg_overlap_mentioned_p (otherops
[0],
8779 /* Swap base and index registers over to
8780 avoid a conflict. */
8781 otherops
[1] = XEXP (XEXP (operands
[1], 0), 1);
8782 otherops
[2] = XEXP (XEXP (operands
[1], 0), 0);
8784 /* If both registers conflict, it will usually
8785 have been fixed by a splitter. */
8786 if (reg_overlap_mentioned_p (otherops
[0], otherops
[2]))
8788 output_asm_insn ("add%?\t%1, %1, %2", otherops
);
8789 output_asm_insn ("ldr%?d\t%0, [%1]",
8793 output_asm_insn ("ldr%?d\t%0, [%1, %2]", otherops
);
8797 if (GET_CODE (otherops
[2]) == CONST_INT
)
8799 if (!(const_ok_for_arm (INTVAL (otherops
[2]))))
8800 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops
);
8802 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
8805 output_asm_insn ("add%?\t%0, %1, %2", otherops
);
8808 output_asm_insn ("sub%?\t%0, %1, %2", otherops
);
8810 return "ldm%?ia\t%0, %M0";
8814 otherops
[1] = adjust_address (operands
[1], SImode
, 4);
8815 /* Take care of overlapping base/data reg. */
8816 if (reg_mentioned_p (operands
[0], operands
[1]))
8818 output_asm_insn ("ldr%?\t%0, %1", otherops
);
8819 output_asm_insn ("ldr%?\t%0, %1", operands
);
8823 output_asm_insn ("ldr%?\t%0, %1", operands
);
8824 output_asm_insn ("ldr%?\t%0, %1", otherops
);
8831 /* Constraints should ensure this. */
8832 gcc_assert (code0
== MEM
&& code1
== REG
);
8833 gcc_assert (REGNO (operands
[1]) != IP_REGNUM
);
8835 switch (GET_CODE (XEXP (operands
[0], 0)))
8838 output_asm_insn ("stm%?ia\t%m0, %M1", operands
);
8842 gcc_assert (TARGET_LDRD
);
8843 output_asm_insn ("str%?d\t%1, [%m0, #8]!", operands
);
8847 output_asm_insn ("stm%?db\t%m0!, %M1", operands
);
8851 output_asm_insn ("stm%?ia\t%m0!, %M1", operands
);
8855 gcc_assert (TARGET_LDRD
);
8856 output_asm_insn ("str%?d\t%1, [%m0], #-8", operands
);
8861 otherops
[0] = operands
[1];
8862 otherops
[1] = XEXP (XEXP (XEXP (operands
[0], 0), 1), 0);
8863 otherops
[2] = XEXP (XEXP (XEXP (operands
[0], 0), 1), 1);
8865 if (GET_CODE (XEXP (operands
[0], 0)) == PRE_MODIFY
)
8866 output_asm_insn ("str%?d\t%0, [%1, %2]!", otherops
);
8868 output_asm_insn ("str%?d\t%0, [%1], %2", otherops
);
8872 otherops
[2] = XEXP (XEXP (operands
[0], 0), 1);
8873 if (GET_CODE (otherops
[2]) == CONST_INT
)
8875 switch ((int) INTVAL (XEXP (XEXP (operands
[0], 0), 1)))
8878 output_asm_insn ("stm%?db\t%m0, %M1", operands
);
8882 output_asm_insn ("stm%?da\t%m0, %M1", operands
);
8886 output_asm_insn ("stm%?ib\t%m0, %M1", operands
);
8891 && (GET_CODE (otherops
[2]) == REG
8892 || (GET_CODE (otherops
[2]) == CONST_INT
8893 && INTVAL (otherops
[2]) > -256
8894 && INTVAL (otherops
[2]) < 256)))
8896 otherops
[0] = operands
[1];
8897 otherops
[1] = XEXP (XEXP (operands
[0], 0), 0);
8898 output_asm_insn ("str%?d\t%0, [%1, %2]", otherops
);
8904 otherops
[0] = adjust_address (operands
[0], SImode
, 4);
8905 otherops
[1] = gen_rtx_REG (SImode
, 1 + REGNO (operands
[1]));
8906 output_asm_insn ("str%?\t%1, %0", operands
);
8907 output_asm_insn ("str%?\t%1, %0", otherops
);
8914 /* Output an ADD r, s, #n where n may be too big for one instruction.
8915 If adding zero to one register, output nothing. */
8917 output_add_immediate (rtx
*operands
)
8919 HOST_WIDE_INT n
= INTVAL (operands
[2]);
8921 if (n
!= 0 || REGNO (operands
[0]) != REGNO (operands
[1]))
8924 output_multi_immediate (operands
,
8925 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
8928 output_multi_immediate (operands
,
8929 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
8936 /* Output a multiple immediate operation.
8937 OPERANDS is the vector of operands referred to in the output patterns.
8938 INSTR1 is the output pattern to use for the first constant.
8939 INSTR2 is the output pattern to use for subsequent constants.
8940 IMMED_OP is the index of the constant slot in OPERANDS.
8941 N is the constant value. */
8943 output_multi_immediate (rtx
*operands
, const char *instr1
, const char *instr2
,
8944 int immed_op
, HOST_WIDE_INT n
)
8946 #if HOST_BITS_PER_WIDE_INT > 32
8952 /* Quick and easy output. */
8953 operands
[immed_op
] = const0_rtx
;
8954 output_asm_insn (instr1
, operands
);
8959 const char * instr
= instr1
;
8961 /* Note that n is never zero here (which would give no output). */
8962 for (i
= 0; i
< 32; i
+= 2)
8966 operands
[immed_op
] = GEN_INT (n
& (255 << i
));
8967 output_asm_insn (instr
, operands
);
8977 /* Return the appropriate ARM instruction for the operation code.
8978 The returned result should not be overwritten. OP is the rtx of the
8979 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
8982 arithmetic_instr (rtx op
, int shift_first_arg
)
8984 switch (GET_CODE (op
))
8990 return shift_first_arg
? "rsb" : "sub";
9006 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9007 for the operation code. The returned result should not be overwritten.
9008 OP is the rtx code of the shift.
9009 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9012 shift_op (rtx op
, HOST_WIDE_INT
*amountp
)
9015 enum rtx_code code
= GET_CODE (op
);
9017 switch (GET_CODE (XEXP (op
, 1)))
9025 *amountp
= INTVAL (XEXP (op
, 1));
9047 gcc_assert (*amountp
!= -1);
9048 *amountp
= 32 - *amountp
;
9057 /* We never have to worry about the amount being other than a
9058 power of 2, since this case can never be reloaded from a reg. */
9059 gcc_assert (*amountp
!= -1);
9060 *amountp
= int_log2 (*amountp
);
9069 /* This is not 100% correct, but follows from the desire to merge
9070 multiplication by a power of 2 with the recognizer for a
9071 shift. >=32 is not a valid shift for "asl", so we must try and
9072 output a shift that produces the correct arithmetical result.
9073 Using lsr #32 is identical except for the fact that the carry bit
9074 is not set correctly if we set the flags; but we never use the
9075 carry bit from such an operation, so we can ignore that. */
9076 if (code
== ROTATERT
)
9077 /* Rotate is just modulo 32. */
9079 else if (*amountp
!= (*amountp
& 31))
9086 /* Shifts of 0 are no-ops. */
9094 /* Obtain the shift from the POWER of two. */
9096 static HOST_WIDE_INT
9097 int_log2 (HOST_WIDE_INT power
)
9099 HOST_WIDE_INT shift
= 0;
9101 while ((((HOST_WIDE_INT
) 1 << shift
) & power
) == 0)
9103 gcc_assert (shift
<= 31);
9110 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9111 because /bin/as is horribly restrictive. The judgement about
9112 whether or not each character is 'printable' (and can be output as
9113 is) or not (and must be printed with an octal escape) must be made
9114 with reference to the *host* character set -- the situation is
9115 similar to that discussed in the comments above pp_c_char in
9116 c-pretty-print.c. */
9118 #define MAX_ASCII_LEN 51
9121 output_ascii_pseudo_op (FILE *stream
, const unsigned char *p
, int len
)
9126 fputs ("\t.ascii\t\"", stream
);
9128 for (i
= 0; i
< len
; i
++)
9132 if (len_so_far
>= MAX_ASCII_LEN
)
9134 fputs ("\"\n\t.ascii\t\"", stream
);
9140 if (c
== '\\' || c
== '\"')
9142 putc ('\\', stream
);
9150 fprintf (stream
, "\\%03o", c
);
9155 fputs ("\"\n", stream
);
9158 /* Compute the register save mask for registers 0 through 12
9159 inclusive. This code is used by arm_compute_save_reg_mask. */
9161 static unsigned long
9162 arm_compute_save_reg0_reg12_mask (void)
9164 unsigned long func_type
= arm_current_func_type ();
9165 unsigned long save_reg_mask
= 0;
9168 if (IS_INTERRUPT (func_type
))
9170 unsigned int max_reg
;
9171 /* Interrupt functions must not corrupt any registers,
9172 even call clobbered ones. If this is a leaf function
9173 we can just examine the registers used by the RTL, but
9174 otherwise we have to assume that whatever function is
9175 called might clobber anything, and so we have to save
9176 all the call-clobbered registers as well. */
9177 if (ARM_FUNC_TYPE (func_type
) == ARM_FT_FIQ
)
9178 /* FIQ handlers have registers r8 - r12 banked, so
9179 we only need to check r0 - r7, Normal ISRs only
9180 bank r14 and r15, so we must check up to r12.
9181 r13 is the stack pointer which is always preserved,
9182 so we do not need to consider it here. */
9187 for (reg
= 0; reg
<= max_reg
; reg
++)
9188 if (regs_ever_live
[reg
]
9189 || (! current_function_is_leaf
&& call_used_regs
[reg
]))
9190 save_reg_mask
|= (1 << reg
);
9192 /* Also save the pic base register if necessary. */
9194 && !TARGET_SINGLE_PIC_BASE
9195 && current_function_uses_pic_offset_table
)
9196 save_reg_mask
|= 1 << PIC_OFFSET_TABLE_REGNUM
;
9200 /* In the normal case we only need to save those registers
9201 which are call saved and which are used by this function. */
9202 for (reg
= 0; reg
<= 10; reg
++)
9203 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
9204 save_reg_mask
|= (1 << reg
);
9206 /* Handle the frame pointer as a special case. */
9207 if (! TARGET_APCS_FRAME
9208 && ! frame_pointer_needed
9209 && regs_ever_live
[HARD_FRAME_POINTER_REGNUM
]
9210 && ! call_used_regs
[HARD_FRAME_POINTER_REGNUM
])
9211 save_reg_mask
|= 1 << HARD_FRAME_POINTER_REGNUM
;
9213 /* If we aren't loading the PIC register,
9214 don't stack it even though it may be live. */
9216 && !TARGET_SINGLE_PIC_BASE
9217 && (regs_ever_live
[PIC_OFFSET_TABLE_REGNUM
]
9218 || current_function_uses_pic_offset_table
))
9219 save_reg_mask
|= 1 << PIC_OFFSET_TABLE_REGNUM
;
9222 /* Save registers so the exception handler can modify them. */
9223 if (current_function_calls_eh_return
)
9229 reg
= EH_RETURN_DATA_REGNO (i
);
9230 if (reg
== INVALID_REGNUM
)
9232 save_reg_mask
|= 1 << reg
;
9236 return save_reg_mask
;
9239 /* Compute a bit mask of which registers need to be
9240 saved on the stack for the current function. */
9242 static unsigned long
9243 arm_compute_save_reg_mask (void)
9245 unsigned int save_reg_mask
= 0;
9246 unsigned long func_type
= arm_current_func_type ();
9248 if (IS_NAKED (func_type
))
9249 /* This should never really happen. */
9252 /* If we are creating a stack frame, then we must save the frame pointer,
9253 IP (which will hold the old stack pointer), LR and the PC. */
9254 if (frame_pointer_needed
)
9256 (1 << ARM_HARD_FRAME_POINTER_REGNUM
)
9261 /* Volatile functions do not return, so there
9262 is no need to save any other registers. */
9263 if (IS_VOLATILE (func_type
))
9264 return save_reg_mask
;
9266 save_reg_mask
|= arm_compute_save_reg0_reg12_mask ();
9268 /* Decide if we need to save the link register.
9269 Interrupt routines have their own banked link register,
9270 so they never need to save it.
9271 Otherwise if we do not use the link register we do not need to save
9272 it. If we are pushing other registers onto the stack however, we
9273 can save an instruction in the epilogue by pushing the link register
9274 now and then popping it back into the PC. This incurs extra memory
9275 accesses though, so we only do it when optimizing for size, and only
9276 if we know that we will not need a fancy return sequence. */
9277 if (regs_ever_live
[LR_REGNUM
]
9280 && ARM_FUNC_TYPE (func_type
) == ARM_FT_NORMAL
9281 && !current_function_calls_eh_return
))
9282 save_reg_mask
|= 1 << LR_REGNUM
;
9284 if (cfun
->machine
->lr_save_eliminated
)
9285 save_reg_mask
&= ~ (1 << LR_REGNUM
);
9287 if (TARGET_REALLY_IWMMXT
9288 && ((bit_count (save_reg_mask
)
9289 + ARM_NUM_INTS (current_function_pretend_args_size
)) % 2) != 0)
9293 /* The total number of registers that are going to be pushed
9294 onto the stack is odd. We need to ensure that the stack
9295 is 64-bit aligned before we start to save iWMMXt registers,
9296 and also before we start to create locals. (A local variable
9297 might be a double or long long which we will load/store using
9298 an iWMMXt instruction). Therefore we need to push another
9299 ARM register, so that the stack will be 64-bit aligned. We
9300 try to avoid using the arg registers (r0 -r3) as they might be
9301 used to pass values in a tail call. */
9302 for (reg
= 4; reg
<= 12; reg
++)
9303 if ((save_reg_mask
& (1 << reg
)) == 0)
9307 save_reg_mask
|= (1 << reg
);
9310 cfun
->machine
->sibcall_blocked
= 1;
9311 save_reg_mask
|= (1 << 3);
9315 return save_reg_mask
;
9319 /* Compute a bit mask of which registers need to be
9320 saved on the stack for the current function. */
9321 static unsigned long
9322 thumb_compute_save_reg_mask (void)
9328 for (reg
= 0; reg
< 12; reg
++)
9329 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9333 && !TARGET_SINGLE_PIC_BASE
9334 && current_function_uses_pic_offset_table
)
9335 mask
|= 1 << PIC_OFFSET_TABLE_REGNUM
;
9337 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9338 if (!frame_pointer_needed
&& CALLER_INTERWORKING_SLOT_SIZE
> 0)
9339 mask
|= 1 << ARM_HARD_FRAME_POINTER_REGNUM
;
9341 /* LR will also be pushed if any lo regs are pushed. */
9342 if (mask
& 0xff || thumb_force_lr_save ())
9343 mask
|= (1 << LR_REGNUM
);
9345 /* Make sure we have a low work register if we need one.
9346 We will need one if we are going to push a high register,
9347 but we are not currently intending to push a low register. */
9348 if ((mask
& 0xff) == 0
9349 && ((mask
& 0x0f00) || TARGET_BACKTRACE
))
9351 /* Use thumb_find_work_register to choose which register
9352 we will use. If the register is live then we will
9353 have to push it. Use LAST_LO_REGNUM as our fallback
9354 choice for the register to select. */
9355 reg
= thumb_find_work_register (1 << LAST_LO_REGNUM
);
9357 if (! call_used_regs
[reg
])
9365 /* Return the number of bytes required to save VFP registers. */
9367 arm_get_vfp_saved_size (void)
9374 /* Space for saved VFP registers. */
9375 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
9378 for (regno
= FIRST_VFP_REGNUM
;
9379 regno
< LAST_VFP_REGNUM
;
9382 if ((!regs_ever_live
[regno
] || call_used_regs
[regno
])
9383 && (!regs_ever_live
[regno
+ 1] || call_used_regs
[regno
+ 1]))
9387 /* Workaround ARM10 VFPr1 bug. */
9388 if (count
== 2 && !arm_arch6
)
9390 saved
+= count
* 8 + 4;
9399 if (count
== 2 && !arm_arch6
)
9401 saved
+= count
* 8 + 4;
9408 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9409 everything bar the final return instruction. */
9411 output_return_instruction (rtx operand
, int really_return
, int reverse
)
9413 char conditional
[10];
9416 unsigned long live_regs_mask
;
9417 unsigned long func_type
;
9418 arm_stack_offsets
*offsets
;
9420 func_type
= arm_current_func_type ();
9422 if (IS_NAKED (func_type
))
9425 if (IS_VOLATILE (func_type
) && TARGET_ABORT_NORETURN
)
9427 /* If this function was declared non-returning, and we have
9428 found a tail call, then we have to trust that the called
9429 function won't return. */
9434 /* Otherwise, trap an attempted return by aborting. */
9436 ops
[1] = gen_rtx_SYMBOL_REF (Pmode
, NEED_PLT_RELOC
? "abort(PLT)"
9438 assemble_external_libcall (ops
[1]);
9439 output_asm_insn (reverse
? "bl%D0\t%a1" : "bl%d0\t%a1", ops
);
9445 gcc_assert (!current_function_calls_alloca
|| really_return
);
9447 sprintf (conditional
, "%%?%%%c0", reverse
? 'D' : 'd');
9449 return_used_this_function
= 1;
9451 live_regs_mask
= arm_compute_save_reg_mask ();
9455 const char * return_reg
;
9457 /* If we do not have any special requirements for function exit
9458 (e.g. interworking, or ISR) then we can load the return address
9459 directly into the PC. Otherwise we must load it into LR. */
9461 && ! TARGET_INTERWORK
)
9462 return_reg
= reg_names
[PC_REGNUM
];
9464 return_reg
= reg_names
[LR_REGNUM
];
9466 if ((live_regs_mask
& (1 << IP_REGNUM
)) == (1 << IP_REGNUM
))
9468 /* There are three possible reasons for the IP register
9469 being saved. 1) a stack frame was created, in which case
9470 IP contains the old stack pointer, or 2) an ISR routine
9471 corrupted it, or 3) it was saved to align the stack on
9472 iWMMXt. In case 1, restore IP into SP, otherwise just
9474 if (frame_pointer_needed
)
9476 live_regs_mask
&= ~ (1 << IP_REGNUM
);
9477 live_regs_mask
|= (1 << SP_REGNUM
);
9480 gcc_assert (IS_INTERRUPT (func_type
) || TARGET_REALLY_IWMMXT
);
9483 /* On some ARM architectures it is faster to use LDR rather than
9484 LDM to load a single register. On other architectures, the
9485 cost is the same. In 26 bit mode, or for exception handlers,
9486 we have to use LDM to load the PC so that the CPSR is also
9488 for (reg
= 0; reg
<= LAST_ARM_REGNUM
; reg
++)
9489 if (live_regs_mask
== (1U << reg
))
9492 if (reg
<= LAST_ARM_REGNUM
9493 && (reg
!= LR_REGNUM
9495 || ! IS_INTERRUPT (func_type
)))
9497 sprintf (instr
, "ldr%s\t%%|%s, [%%|sp], #4", conditional
,
9498 (reg
== LR_REGNUM
) ? return_reg
: reg_names
[reg
]);
9505 /* Generate the load multiple instruction to restore the
9506 registers. Note we can get here, even if
9507 frame_pointer_needed is true, but only if sp already
9508 points to the base of the saved core registers. */
9509 if (live_regs_mask
& (1 << SP_REGNUM
))
9511 unsigned HOST_WIDE_INT stack_adjust
;
9513 offsets
= arm_get_frame_offsets ();
9514 stack_adjust
= offsets
->outgoing_args
- offsets
->saved_regs
;
9515 gcc_assert (stack_adjust
== 0 || stack_adjust
== 4);
9517 if (stack_adjust
&& arm_arch5
)
9518 sprintf (instr
, "ldm%sib\t%%|sp, {", conditional
);
9521 /* If we can't use ldmib (SA110 bug),
9522 then try to pop r3 instead. */
9524 live_regs_mask
|= 1 << 3;
9525 sprintf (instr
, "ldm%sfd\t%%|sp, {", conditional
);
9529 sprintf (instr
, "ldm%sfd\t%%|sp!, {", conditional
);
9531 p
= instr
+ strlen (instr
);
9533 for (reg
= 0; reg
<= SP_REGNUM
; reg
++)
9534 if (live_regs_mask
& (1 << reg
))
9536 int l
= strlen (reg_names
[reg
]);
9542 memcpy (p
, ", ", 2);
9546 memcpy (p
, "%|", 2);
9547 memcpy (p
+ 2, reg_names
[reg
], l
);
9551 if (live_regs_mask
& (1 << LR_REGNUM
))
9553 sprintf (p
, "%s%%|%s}", first
? "" : ", ", return_reg
);
9554 /* If returning from an interrupt, restore the CPSR. */
9555 if (IS_INTERRUPT (func_type
))
9562 output_asm_insn (instr
, & operand
);
9564 /* See if we need to generate an extra instruction to
9565 perform the actual function return. */
9567 && func_type
!= ARM_FT_INTERWORKED
9568 && (live_regs_mask
& (1 << LR_REGNUM
)) != 0)
9570 /* The return has already been handled
9571 by loading the LR into the PC. */
9578 switch ((int) ARM_FUNC_TYPE (func_type
))
9582 sprintf (instr
, "sub%ss\t%%|pc, %%|lr, #4", conditional
);
9585 case ARM_FT_INTERWORKED
:
9586 sprintf (instr
, "bx%s\t%%|lr", conditional
);
9589 case ARM_FT_EXCEPTION
:
9590 sprintf (instr
, "mov%ss\t%%|pc, %%|lr", conditional
);
9594 /* Use bx if it's available. */
9595 if (arm_arch5
|| arm_arch4t
)
9596 sprintf (instr
, "bx%s\t%%|lr", conditional
);
9598 sprintf (instr
, "mov%s\t%%|pc, %%|lr", conditional
);
9602 output_asm_insn (instr
, & operand
);
9608 /* Write the function name into the code section, directly preceding
9609 the function prologue.
9611 Code will be output similar to this:
9613 .ascii "arm_poke_function_name", 0
9616 .word 0xff000000 + (t1 - t0)
9617 arm_poke_function_name
9619 stmfd sp!, {fp, ip, lr, pc}
9622 When performing a stack backtrace, code can inspect the value
9623 of 'pc' stored at 'fp' + 0. If the trace function then looks
9624 at location pc - 12 and the top 8 bits are set, then we know
9625 that there is a function name embedded immediately preceding this
9626 location and has length ((pc[-3]) & 0xff000000).
9628 We assume that pc is declared as a pointer to an unsigned long.
9630 It is of no benefit to output the function name if we are assembling
9631 a leaf function. These function types will not contain a stack
9632 backtrace structure, therefore it is not possible to determine the
9635 arm_poke_function_name (FILE *stream
, const char *name
)
9637 unsigned long alignlength
;
9638 unsigned long length
;
9641 length
= strlen (name
) + 1;
9642 alignlength
= ROUND_UP_WORD (length
);
9644 ASM_OUTPUT_ASCII (stream
, name
, length
);
9645 ASM_OUTPUT_ALIGN (stream
, 2);
9646 x
= GEN_INT ((unsigned HOST_WIDE_INT
) 0xff000000 + alignlength
);
9647 assemble_aligned_integer (UNITS_PER_WORD
, x
);
9650 /* Place some comments into the assembler stream
9651 describing the current function. */
9653 arm_output_function_prologue (FILE *f
, HOST_WIDE_INT frame_size
)
9655 unsigned long func_type
;
9659 thumb_output_function_prologue (f
, frame_size
);
9664 gcc_assert (!arm_ccfsm_state
&& !arm_target_insn
);
9666 func_type
= arm_current_func_type ();
9668 switch ((int) ARM_FUNC_TYPE (func_type
))
9673 case ARM_FT_INTERWORKED
:
9674 asm_fprintf (f
, "\t%@ Function supports interworking.\n");
9677 asm_fprintf (f
, "\t%@ Interrupt Service Routine.\n");
9680 asm_fprintf (f
, "\t%@ Fast Interrupt Service Routine.\n");
9682 case ARM_FT_EXCEPTION
:
9683 asm_fprintf (f
, "\t%@ ARM Exception Handler.\n");
9687 if (IS_NAKED (func_type
))
9688 asm_fprintf (f
, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
9690 if (IS_VOLATILE (func_type
))
9691 asm_fprintf (f
, "\t%@ Volatile: function does not return.\n");
9693 if (IS_NESTED (func_type
))
9694 asm_fprintf (f
, "\t%@ Nested: function declared inside another function.\n");
9696 asm_fprintf (f
, "\t%@ args = %d, pretend = %d, frame = %wd\n",
9697 current_function_args_size
,
9698 current_function_pretend_args_size
, frame_size
);
9700 asm_fprintf (f
, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
9701 frame_pointer_needed
,
9702 cfun
->machine
->uses_anonymous_args
);
9704 if (cfun
->machine
->lr_save_eliminated
)
9705 asm_fprintf (f
, "\t%@ link register save eliminated.\n");
9707 if (current_function_calls_eh_return
)
9708 asm_fprintf (f
, "\t@ Calls __builtin_eh_return.\n");
9710 #ifdef AOF_ASSEMBLER
9712 asm_fprintf (f
, "\tmov\t%r, %r\n", IP_REGNUM
, PIC_OFFSET_TABLE_REGNUM
);
9715 return_used_this_function
= 0;
9719 arm_output_epilogue (rtx sibling
)
9722 unsigned long saved_regs_mask
;
9723 unsigned long func_type
;
9724 /* Floats_offset is the offset from the "virtual" frame. In an APCS
9725 frame that is $fp + 4 for a non-variadic function. */
9726 int floats_offset
= 0;
9728 FILE * f
= asm_out_file
;
9729 unsigned int lrm_count
= 0;
9730 int really_return
= (sibling
== NULL
);
9732 arm_stack_offsets
*offsets
;
9734 /* If we have already generated the return instruction
9735 then it is futile to generate anything else. */
9736 if (use_return_insn (FALSE
, sibling
) && return_used_this_function
)
9739 func_type
= arm_current_func_type ();
9741 if (IS_NAKED (func_type
))
9742 /* Naked functions don't have epilogues. */
9745 if (IS_VOLATILE (func_type
) && TARGET_ABORT_NORETURN
)
9749 /* A volatile function should never return. Call abort. */
9750 op
= gen_rtx_SYMBOL_REF (Pmode
, NEED_PLT_RELOC
? "abort(PLT)" : "abort");
9751 assemble_external_libcall (op
);
9752 output_asm_insn ("bl\t%a0", &op
);
9757 /* If we are throwing an exception, then we really must be doing a
9758 return, so we can't tail-call. */
9759 gcc_assert (!current_function_calls_eh_return
|| really_return
);
9761 offsets
= arm_get_frame_offsets ();
9762 saved_regs_mask
= arm_compute_save_reg_mask ();
9765 lrm_count
= bit_count (saved_regs_mask
);
9767 floats_offset
= offsets
->saved_args
;
9768 /* Compute how far away the floats will be. */
9769 for (reg
= 0; reg
<= LAST_ARM_REGNUM
; reg
++)
9770 if (saved_regs_mask
& (1 << reg
))
9773 if (frame_pointer_needed
)
9775 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
9776 int vfp_offset
= offsets
->frame
;
9778 if (arm_fpu_arch
== FPUTYPE_FPA_EMU2
)
9780 for (reg
= LAST_FPA_REGNUM
; reg
>= FIRST_FPA_REGNUM
; reg
--)
9781 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9783 floats_offset
+= 12;
9784 asm_fprintf (f
, "\tldfe\t%r, [%r, #-%d]\n",
9785 reg
, FP_REGNUM
, floats_offset
- vfp_offset
);
9790 start_reg
= LAST_FPA_REGNUM
;
9792 for (reg
= LAST_FPA_REGNUM
; reg
>= FIRST_FPA_REGNUM
; reg
--)
9794 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9796 floats_offset
+= 12;
9798 /* We can't unstack more than four registers at once. */
9799 if (start_reg
- reg
== 3)
9801 asm_fprintf (f
, "\tlfm\t%r, 4, [%r, #-%d]\n",
9802 reg
, FP_REGNUM
, floats_offset
- vfp_offset
);
9803 start_reg
= reg
- 1;
9808 if (reg
!= start_reg
)
9809 asm_fprintf (f
, "\tlfm\t%r, %d, [%r, #-%d]\n",
9810 reg
+ 1, start_reg
- reg
,
9811 FP_REGNUM
, floats_offset
- vfp_offset
);
9812 start_reg
= reg
- 1;
9816 /* Just in case the last register checked also needs unstacking. */
9817 if (reg
!= start_reg
)
9818 asm_fprintf (f
, "\tlfm\t%r, %d, [%r, #-%d]\n",
9819 reg
+ 1, start_reg
- reg
,
9820 FP_REGNUM
, floats_offset
- vfp_offset
);
9823 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
9827 /* The fldmx insn does not have base+offset addressing modes,
9828 so we use IP to hold the address. */
9829 saved_size
= arm_get_vfp_saved_size ();
9833 floats_offset
+= saved_size
;
9834 asm_fprintf (f
, "\tsub\t%r, %r, #%d\n", IP_REGNUM
,
9835 FP_REGNUM
, floats_offset
- vfp_offset
);
9837 start_reg
= FIRST_VFP_REGNUM
;
9838 for (reg
= FIRST_VFP_REGNUM
; reg
< LAST_VFP_REGNUM
; reg
+= 2)
9840 if ((!regs_ever_live
[reg
] || call_used_regs
[reg
])
9841 && (!regs_ever_live
[reg
+ 1] || call_used_regs
[reg
+ 1]))
9843 if (start_reg
!= reg
)
9844 arm_output_fldmx (f
, IP_REGNUM
,
9845 (start_reg
- FIRST_VFP_REGNUM
) / 2,
9846 (reg
- start_reg
) / 2);
9847 start_reg
= reg
+ 2;
9850 if (start_reg
!= reg
)
9851 arm_output_fldmx (f
, IP_REGNUM
,
9852 (start_reg
- FIRST_VFP_REGNUM
) / 2,
9853 (reg
- start_reg
) / 2);
9858 /* The frame pointer is guaranteed to be non-double-word aligned.
9859 This is because it is set to (old_stack_pointer - 4) and the
9860 old_stack_pointer was double word aligned. Thus the offset to
9861 the iWMMXt registers to be loaded must also be non-double-word
9862 sized, so that the resultant address *is* double-word aligned.
9863 We can ignore floats_offset since that was already included in
9864 the live_regs_mask. */
9865 lrm_count
+= (lrm_count
% 2 ? 2 : 1);
9867 for (reg
= LAST_IWMMXT_REGNUM
; reg
>= FIRST_IWMMXT_REGNUM
; reg
--)
9868 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9870 asm_fprintf (f
, "\twldrd\t%r, [%r, #-%d]\n",
9871 reg
, FP_REGNUM
, lrm_count
* 4);
9876 /* saved_regs_mask should contain the IP, which at the time of stack
9877 frame generation actually contains the old stack pointer. So a
9878 quick way to unwind the stack is just pop the IP register directly
9879 into the stack pointer. */
9880 gcc_assert (saved_regs_mask
& (1 << IP_REGNUM
));
9881 saved_regs_mask
&= ~ (1 << IP_REGNUM
);
9882 saved_regs_mask
|= (1 << SP_REGNUM
);
9884 /* There are two registers left in saved_regs_mask - LR and PC. We
9885 only need to restore the LR register (the return address), but to
9886 save time we can load it directly into the PC, unless we need a
9887 special function exit sequence, or we are not really returning. */
9889 && ARM_FUNC_TYPE (func_type
) == ARM_FT_NORMAL
9890 && !current_function_calls_eh_return
)
9891 /* Delete the LR from the register mask, so that the LR on
9892 the stack is loaded into the PC in the register mask. */
9893 saved_regs_mask
&= ~ (1 << LR_REGNUM
);
9895 saved_regs_mask
&= ~ (1 << PC_REGNUM
);
9897 /* We must use SP as the base register, because SP is one of the
9898 registers being restored. If an interrupt or page fault
9899 happens in the ldm instruction, the SP might or might not
9900 have been restored. That would be bad, as then SP will no
9901 longer indicate the safe area of stack, and we can get stack
9902 corruption. Using SP as the base register means that it will
9903 be reset correctly to the original value, should an interrupt
9904 occur. If the stack pointer already points at the right
9905 place, then omit the subtraction. */
9906 if (offsets
->outgoing_args
!= (1 + (int) bit_count (saved_regs_mask
))
9907 || current_function_calls_alloca
)
9908 asm_fprintf (f
, "\tsub\t%r, %r, #%d\n", SP_REGNUM
, FP_REGNUM
,
9909 4 * bit_count (saved_regs_mask
));
9910 print_multi_reg (f
, "ldmfd\t%r", SP_REGNUM
, saved_regs_mask
);
9912 if (IS_INTERRUPT (func_type
))
9913 /* Interrupt handlers will have pushed the
9914 IP onto the stack, so restore it now. */
9915 print_multi_reg (f
, "ldmfd\t%r!", SP_REGNUM
, 1 << IP_REGNUM
);
9919 /* Restore stack pointer if necessary. */
9920 if (offsets
->outgoing_args
!= offsets
->saved_regs
)
9922 operands
[0] = operands
[1] = stack_pointer_rtx
;
9923 operands
[2] = GEN_INT (offsets
->outgoing_args
- offsets
->saved_regs
);
9924 output_add_immediate (operands
);
9927 if (arm_fpu_arch
== FPUTYPE_FPA_EMU2
)
9929 for (reg
= FIRST_FPA_REGNUM
; reg
<= LAST_FPA_REGNUM
; reg
++)
9930 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9931 asm_fprintf (f
, "\tldfe\t%r, [%r], #12\n",
9936 start_reg
= FIRST_FPA_REGNUM
;
9938 for (reg
= FIRST_FPA_REGNUM
; reg
<= LAST_FPA_REGNUM
; reg
++)
9940 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9942 if (reg
- start_reg
== 3)
9944 asm_fprintf (f
, "\tlfmfd\t%r, 4, [%r]!\n",
9945 start_reg
, SP_REGNUM
);
9946 start_reg
= reg
+ 1;
9951 if (reg
!= start_reg
)
9952 asm_fprintf (f
, "\tlfmfd\t%r, %d, [%r]!\n",
9953 start_reg
, reg
- start_reg
,
9956 start_reg
= reg
+ 1;
9960 /* Just in case the last register checked also needs unstacking. */
9961 if (reg
!= start_reg
)
9962 asm_fprintf (f
, "\tlfmfd\t%r, %d, [%r]!\n",
9963 start_reg
, reg
- start_reg
, SP_REGNUM
);
9966 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
9968 start_reg
= FIRST_VFP_REGNUM
;
9969 for (reg
= FIRST_VFP_REGNUM
; reg
< LAST_VFP_REGNUM
; reg
+= 2)
9971 if ((!regs_ever_live
[reg
] || call_used_regs
[reg
])
9972 && (!regs_ever_live
[reg
+ 1] || call_used_regs
[reg
+ 1]))
9974 if (start_reg
!= reg
)
9975 arm_output_fldmx (f
, SP_REGNUM
,
9976 (start_reg
- FIRST_VFP_REGNUM
) / 2,
9977 (reg
- start_reg
) / 2);
9978 start_reg
= reg
+ 2;
9981 if (start_reg
!= reg
)
9982 arm_output_fldmx (f
, SP_REGNUM
,
9983 (start_reg
- FIRST_VFP_REGNUM
) / 2,
9984 (reg
- start_reg
) / 2);
9987 for (reg
= FIRST_IWMMXT_REGNUM
; reg
<= LAST_IWMMXT_REGNUM
; reg
++)
9988 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
9989 asm_fprintf (f
, "\twldrd\t%r, [%r], #8\n", reg
, SP_REGNUM
);
9991 /* If we can, restore the LR into the PC. */
9992 if (ARM_FUNC_TYPE (func_type
) == ARM_FT_NORMAL
9994 && current_function_pretend_args_size
== 0
9995 && saved_regs_mask
& (1 << LR_REGNUM
)
9996 && !current_function_calls_eh_return
)
9998 saved_regs_mask
&= ~ (1 << LR_REGNUM
);
9999 saved_regs_mask
|= (1 << PC_REGNUM
);
10002 /* Load the registers off the stack. If we only have one register
10003 to load use the LDR instruction - it is faster. */
10004 if (saved_regs_mask
== (1 << LR_REGNUM
))
10006 asm_fprintf (f
, "\tldr\t%r, [%r], #4\n", LR_REGNUM
, SP_REGNUM
);
10008 else if (saved_regs_mask
)
10010 if (saved_regs_mask
& (1 << SP_REGNUM
))
10011 /* Note - write back to the stack register is not enabled
10012 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10013 in the list of registers and if we add writeback the
10014 instruction becomes UNPREDICTABLE. */
10015 print_multi_reg (f
, "ldmfd\t%r", SP_REGNUM
, saved_regs_mask
);
10017 print_multi_reg (f
, "ldmfd\t%r!", SP_REGNUM
, saved_regs_mask
);
10020 if (current_function_pretend_args_size
)
10022 /* Unwind the pre-pushed regs. */
10023 operands
[0] = operands
[1] = stack_pointer_rtx
;
10024 operands
[2] = GEN_INT (current_function_pretend_args_size
);
10025 output_add_immediate (operands
);
10029 /* We may have already restored PC directly from the stack. */
10030 if (!really_return
|| saved_regs_mask
& (1 << PC_REGNUM
))
10033 /* Stack adjustment for exception handler. */
10034 if (current_function_calls_eh_return
)
10035 asm_fprintf (f
, "\tadd\t%r, %r, %r\n", SP_REGNUM
, SP_REGNUM
,
10036 ARM_EH_STACKADJ_REGNUM
);
10038 /* Generate the return instruction. */
10039 switch ((int) ARM_FUNC_TYPE (func_type
))
10043 asm_fprintf (f
, "\tsubs\t%r, %r, #4\n", PC_REGNUM
, LR_REGNUM
);
10046 case ARM_FT_EXCEPTION
:
10047 asm_fprintf (f
, "\tmovs\t%r, %r\n", PC_REGNUM
, LR_REGNUM
);
10050 case ARM_FT_INTERWORKED
:
10051 asm_fprintf (f
, "\tbx\t%r\n", LR_REGNUM
);
10055 if (arm_arch5
|| arm_arch4t
)
10056 asm_fprintf (f
, "\tbx\t%r\n", LR_REGNUM
);
10058 asm_fprintf (f
, "\tmov\t%r, %r\n", PC_REGNUM
, LR_REGNUM
);
10066 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED
,
10067 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED
)
10069 arm_stack_offsets
*offsets
;
10075 /* Emit any call-via-reg trampolines that are needed for v4t support
10076 of call_reg and call_value_reg type insns. */
10077 for (regno
= 0; regno
< LR_REGNUM
; regno
++)
10079 rtx label
= cfun
->machine
->call_via
[regno
];
10083 function_section (current_function_decl
);
10084 targetm
.asm_out
.internal_label (asm_out_file
, "L",
10085 CODE_LABEL_NUMBER (label
));
10086 asm_fprintf (asm_out_file
, "\tbx\t%r\n", regno
);
10090 /* ??? Probably not safe to set this here, since it assumes that a
10091 function will be emitted as assembly immediately after we generate
10092 RTL for it. This does not happen for inline functions. */
10093 return_used_this_function
= 0;
10097 /* We need to take into account any stack-frame rounding. */
10098 offsets
= arm_get_frame_offsets ();
10100 gcc_assert (!use_return_insn (FALSE
, NULL
)
10101 || !return_used_this_function
10102 || offsets
->saved_regs
== offsets
->outgoing_args
10103 || frame_pointer_needed
);
10105 /* Reset the ARM-specific per-function variables. */
10106 after_arm_reorg
= 0;
10110 /* Generate and emit an insn that we will recognize as a push_multi.
10111 Unfortunately, since this insn does not reflect very well the actual
10112 semantics of the operation, we need to annotate the insn for the benefit
10113 of DWARF2 frame unwind information. */
10115 emit_multi_reg_push (unsigned long mask
)
10118 int num_dwarf_regs
;
10122 int dwarf_par_index
;
10125 for (i
= 0; i
<= LAST_ARM_REGNUM
; i
++)
10126 if (mask
& (1 << i
))
10129 gcc_assert (num_regs
&& num_regs
<= 16);
10131 /* We don't record the PC in the dwarf frame information. */
10132 num_dwarf_regs
= num_regs
;
10133 if (mask
& (1 << PC_REGNUM
))
10136 /* For the body of the insn we are going to generate an UNSPEC in
10137 parallel with several USEs. This allows the insn to be recognized
10138 by the push_multi pattern in the arm.md file. The insn looks
10139 something like this:
10142 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10143 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10144 (use (reg:SI 11 fp))
10145 (use (reg:SI 12 ip))
10146 (use (reg:SI 14 lr))
10147 (use (reg:SI 15 pc))
10150 For the frame note however, we try to be more explicit and actually
10151 show each register being stored into the stack frame, plus a (single)
10152 decrement of the stack pointer. We do it this way in order to be
10153 friendly to the stack unwinding code, which only wants to see a single
10154 stack decrement per instruction. The RTL we generate for the note looks
10155 something like this:
10158 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10159 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10160 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10161 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10162 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10165 This sequence is used both by the code to support stack unwinding for
10166 exceptions handlers and the code to generate dwarf2 frame debugging. */
10168 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (num_regs
));
10169 dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (num_dwarf_regs
+ 1));
10170 dwarf_par_index
= 1;
10172 for (i
= 0; i
<= LAST_ARM_REGNUM
; i
++)
10174 if (mask
& (1 << i
))
10176 reg
= gen_rtx_REG (SImode
, i
);
10178 XVECEXP (par
, 0, 0)
10179 = gen_rtx_SET (VOIDmode
,
10180 gen_frame_mem (BLKmode
,
10181 gen_rtx_PRE_DEC (BLKmode
,
10182 stack_pointer_rtx
)),
10183 gen_rtx_UNSPEC (BLKmode
,
10184 gen_rtvec (1, reg
),
10185 UNSPEC_PUSH_MULT
));
10187 if (i
!= PC_REGNUM
)
10189 tmp
= gen_rtx_SET (VOIDmode
,
10190 gen_frame_mem (SImode
, stack_pointer_rtx
),
10192 RTX_FRAME_RELATED_P (tmp
) = 1;
10193 XVECEXP (dwarf
, 0, dwarf_par_index
) = tmp
;
10201 for (j
= 1, i
++; j
< num_regs
; i
++)
10203 if (mask
& (1 << i
))
10205 reg
= gen_rtx_REG (SImode
, i
);
10207 XVECEXP (par
, 0, j
) = gen_rtx_USE (VOIDmode
, reg
);
10209 if (i
!= PC_REGNUM
)
10212 = gen_rtx_SET (VOIDmode
,
10213 gen_frame_mem (SImode
,
10214 plus_constant (stack_pointer_rtx
,
10217 RTX_FRAME_RELATED_P (tmp
) = 1;
10218 XVECEXP (dwarf
, 0, dwarf_par_index
++) = tmp
;
10225 par
= emit_insn (par
);
10227 tmp
= gen_rtx_SET (SImode
,
10229 gen_rtx_PLUS (SImode
,
10231 GEN_INT (-4 * num_regs
)));
10232 RTX_FRAME_RELATED_P (tmp
) = 1;
10233 XVECEXP (dwarf
, 0, 0) = tmp
;
10235 REG_NOTES (par
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
10240 /* Calculate the size of the return value that is passed in registers. */
10242 arm_size_return_regs (void)
10244 enum machine_mode mode
;
10246 if (current_function_return_rtx
!= 0)
10247 mode
= GET_MODE (current_function_return_rtx
);
10249 mode
= DECL_MODE (DECL_RESULT (current_function_decl
));
10251 return GET_MODE_SIZE (mode
);
10255 emit_sfm (int base_reg
, int count
)
10262 par
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (count
));
10263 dwarf
= gen_rtx_SEQUENCE (VOIDmode
, rtvec_alloc (count
+ 1));
10265 reg
= gen_rtx_REG (XFmode
, base_reg
++);
10267 XVECEXP (par
, 0, 0)
10268 = gen_rtx_SET (VOIDmode
,
10269 gen_frame_mem (BLKmode
,
10270 gen_rtx_PRE_DEC (BLKmode
,
10271 stack_pointer_rtx
)),
10272 gen_rtx_UNSPEC (BLKmode
,
10273 gen_rtvec (1, reg
),
10274 UNSPEC_PUSH_MULT
));
10275 tmp
= gen_rtx_SET (VOIDmode
,
10276 gen_frame_mem (XFmode
, stack_pointer_rtx
), reg
);
10277 RTX_FRAME_RELATED_P (tmp
) = 1;
10278 XVECEXP (dwarf
, 0, 1) = tmp
;
10280 for (i
= 1; i
< count
; i
++)
10282 reg
= gen_rtx_REG (XFmode
, base_reg
++);
10283 XVECEXP (par
, 0, i
) = gen_rtx_USE (VOIDmode
, reg
);
10285 tmp
= gen_rtx_SET (VOIDmode
,
10286 gen_frame_mem (XFmode
,
10287 plus_constant (stack_pointer_rtx
,
10290 RTX_FRAME_RELATED_P (tmp
) = 1;
10291 XVECEXP (dwarf
, 0, i
+ 1) = tmp
;
10294 tmp
= gen_rtx_SET (VOIDmode
,
10296 gen_rtx_PLUS (SImode
,
10298 GEN_INT (-12 * count
)));
10299 RTX_FRAME_RELATED_P (tmp
) = 1;
10300 XVECEXP (dwarf
, 0, 0) = tmp
;
10302 par
= emit_insn (par
);
10303 REG_NOTES (par
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
10309 /* Return true if the current function needs to save/restore LR. */
10312 thumb_force_lr_save (void)
10314 return !cfun
->machine
->lr_save_eliminated
10315 && (!leaf_function_p ()
10316 || thumb_far_jump_used_p ()
10317 || regs_ever_live
[LR_REGNUM
]);
10321 /* Compute the distance from register FROM to register TO.
10322 These can be the arg pointer (26), the soft frame pointer (25),
10323 the stack pointer (13) or the hard frame pointer (11).
10324 In thumb mode r7 is used as the soft frame pointer, if needed.
10325 Typical stack layout looks like this:
10327 old stack pointer -> | |
10330 | | saved arguments for
10331 | | vararg functions
10334 hard FP & arg pointer -> | | \
10342 soft frame pointer -> | | /
10347 locals base pointer -> | | /
10352 current stack pointer -> | | /
10355 For a given function some or all of these stack components
10356 may not be needed, giving rise to the possibility of
10357 eliminating some of the registers.
10359 The values returned by this function must reflect the behavior
10360 of arm_expand_prologue() and arm_compute_save_reg_mask().
10362 The sign of the number returned reflects the direction of stack
10363 growth, so the values are positive for all eliminations except
10364 from the soft frame pointer to the hard frame pointer.
10366 SFP may point just inside the local variables block to ensure correct
10370 /* Calculate stack offsets. These are used to calculate register elimination
10371 offsets and in prologue/epilogue code. */
10373 static arm_stack_offsets
*
10374 arm_get_frame_offsets (void)
10376 struct arm_stack_offsets
*offsets
;
10377 unsigned long func_type
;
10380 HOST_WIDE_INT frame_size
;
10382 offsets
= &cfun
->machine
->stack_offsets
;
10384 /* We need to know if we are a leaf function. Unfortunately, it
10385 is possible to be called after start_sequence has been called,
10386 which causes get_insns to return the insns for the sequence,
10387 not the function, which will cause leaf_function_p to return
10388 the incorrect result.
10390 to know about leaf functions once reload has completed, and the
10391 frame size cannot be changed after that time, so we can safely
10392 use the cached value. */
10394 if (reload_completed
)
10397 /* Initially this is the size of the local variables. It will translated
10398 into an offset once we have determined the size of preceding data. */
10399 frame_size
= ROUND_UP_WORD (get_frame_size ());
10401 leaf
= leaf_function_p ();
10403 /* Space for variadic functions. */
10404 offsets
->saved_args
= current_function_pretend_args_size
;
10406 offsets
->frame
= offsets
->saved_args
+ (frame_pointer_needed
? 4 : 0);
10410 unsigned int regno
;
10412 saved
= bit_count (arm_compute_save_reg_mask ()) * 4;
10414 /* We know that SP will be doubleword aligned on entry, and we must
10415 preserve that condition at any subroutine call. We also require the
10416 soft frame pointer to be doubleword aligned. */
10418 if (TARGET_REALLY_IWMMXT
)
10420 /* Check for the call-saved iWMMXt registers. */
10421 for (regno
= FIRST_IWMMXT_REGNUM
;
10422 regno
<= LAST_IWMMXT_REGNUM
;
10424 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
10428 func_type
= arm_current_func_type ();
10429 if (! IS_VOLATILE (func_type
))
10431 /* Space for saved FPA registers. */
10432 for (regno
= FIRST_FPA_REGNUM
; regno
<= LAST_FPA_REGNUM
; regno
++)
10433 if (regs_ever_live
[regno
] && ! call_used_regs
[regno
])
10436 /* Space for saved VFP registers. */
10437 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
10438 saved
+= arm_get_vfp_saved_size ();
10441 else /* TARGET_THUMB */
10443 saved
= bit_count (thumb_compute_save_reg_mask ()) * 4;
10444 if (TARGET_BACKTRACE
)
10448 /* Saved registers include the stack frame. */
10449 offsets
->saved_regs
= offsets
->saved_args
+ saved
;
10450 offsets
->soft_frame
= offsets
->saved_regs
+ CALLER_INTERWORKING_SLOT_SIZE
;
10451 /* A leaf function does not need any stack alignment if it has nothing
10453 if (leaf
&& frame_size
== 0)
10455 offsets
->outgoing_args
= offsets
->soft_frame
;
10456 offsets
->locals_base
= offsets
->soft_frame
;
10460 /* Ensure SFP has the correct alignment. */
10461 if (ARM_DOUBLEWORD_ALIGN
10462 && (offsets
->soft_frame
& 7))
10463 offsets
->soft_frame
+= 4;
10465 offsets
->locals_base
= offsets
->soft_frame
+ frame_size
;
10466 offsets
->outgoing_args
= (offsets
->locals_base
10467 + current_function_outgoing_args_size
);
10469 if (ARM_DOUBLEWORD_ALIGN
)
10471 /* Ensure SP remains doubleword aligned. */
10472 if (offsets
->outgoing_args
& 7)
10473 offsets
->outgoing_args
+= 4;
10474 gcc_assert (!(offsets
->outgoing_args
& 7));
10481 /* Calculate the relative offsets for the different stack pointers. Positive
10482 offsets are in the direction of stack growth. */
10485 arm_compute_initial_elimination_offset (unsigned int from
, unsigned int to
)
10487 arm_stack_offsets
*offsets
;
10489 offsets
= arm_get_frame_offsets ();
10491 /* OK, now we have enough information to compute the distances.
10492 There must be an entry in these switch tables for each pair
10493 of registers in ELIMINABLE_REGS, even if some of the entries
10494 seem to be redundant or useless. */
10497 case ARG_POINTER_REGNUM
:
10500 case THUMB_HARD_FRAME_POINTER_REGNUM
:
10503 case FRAME_POINTER_REGNUM
:
10504 /* This is the reverse of the soft frame pointer
10505 to hard frame pointer elimination below. */
10506 return offsets
->soft_frame
- offsets
->saved_args
;
10508 case ARM_HARD_FRAME_POINTER_REGNUM
:
10509 /* If there is no stack frame then the hard
10510 frame pointer and the arg pointer coincide. */
10511 if (offsets
->frame
== offsets
->saved_regs
)
10513 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
10514 return (frame_pointer_needed
10515 && cfun
->static_chain_decl
!= NULL
10516 && ! cfun
->machine
->uses_anonymous_args
) ? 4 : 0;
10518 case STACK_POINTER_REGNUM
:
10519 /* If nothing has been pushed on the stack at all
10520 then this will return -4. This *is* correct! */
10521 return offsets
->outgoing_args
- (offsets
->saved_args
+ 4);
10524 gcc_unreachable ();
10526 gcc_unreachable ();
10528 case FRAME_POINTER_REGNUM
:
10531 case THUMB_HARD_FRAME_POINTER_REGNUM
:
10534 case ARM_HARD_FRAME_POINTER_REGNUM
:
10535 /* The hard frame pointer points to the top entry in the
10536 stack frame. The soft frame pointer to the bottom entry
10537 in the stack frame. If there is no stack frame at all,
10538 then they are identical. */
10540 return offsets
->frame
- offsets
->soft_frame
;
10542 case STACK_POINTER_REGNUM
:
10543 return offsets
->outgoing_args
- offsets
->soft_frame
;
10546 gcc_unreachable ();
10548 gcc_unreachable ();
10551 /* You cannot eliminate from the stack pointer.
10552 In theory you could eliminate from the hard frame
10553 pointer to the stack pointer, but this will never
10554 happen, since if a stack frame is not needed the
10555 hard frame pointer will never be used. */
10556 gcc_unreachable ();
10561 /* Generate the prologue instructions for entry into an ARM function. */
10563 arm_expand_prologue (void)
10569 unsigned long live_regs_mask
;
10570 unsigned long func_type
;
10572 int saved_pretend_args
= 0;
10573 int saved_regs
= 0;
10574 unsigned HOST_WIDE_INT args_to_push
;
10575 arm_stack_offsets
*offsets
;
10577 func_type
= arm_current_func_type ();
10579 /* Naked functions don't have prologues. */
10580 if (IS_NAKED (func_type
))
10583 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
10584 args_to_push
= current_function_pretend_args_size
;
10586 /* Compute which register we will have to save onto the stack. */
10587 live_regs_mask
= arm_compute_save_reg_mask ();
10589 ip_rtx
= gen_rtx_REG (SImode
, IP_REGNUM
);
10591 if (frame_pointer_needed
)
10593 if (IS_INTERRUPT (func_type
))
10595 /* Interrupt functions must not corrupt any registers.
10596 Creating a frame pointer however, corrupts the IP
10597 register, so we must push it first. */
10598 insn
= emit_multi_reg_push (1 << IP_REGNUM
);
10600 /* Do not set RTX_FRAME_RELATED_P on this insn.
10601 The dwarf stack unwinding code only wants to see one
10602 stack decrement per function, and this is not it. If
10603 this instruction is labeled as being part of the frame
10604 creation sequence then dwarf2out_frame_debug_expr will
10605 die when it encounters the assignment of IP to FP
10606 later on, since the use of SP here establishes SP as
10607 the CFA register and not IP.
10609 Anyway this instruction is not really part of the stack
10610 frame creation although it is part of the prologue. */
10612 else if (IS_NESTED (func_type
))
10614 /* The Static chain register is the same as the IP register
10615 used as a scratch register during stack frame creation.
10616 To get around this need to find somewhere to store IP
10617 whilst the frame is being created. We try the following
10620 1. The last argument register.
10621 2. A slot on the stack above the frame. (This only
10622 works if the function is not a varargs function).
10623 3. Register r3, after pushing the argument registers
10626 Note - we only need to tell the dwarf2 backend about the SP
10627 adjustment in the second variant; the static chain register
10628 doesn't need to be unwound, as it doesn't contain a value
10629 inherited from the caller. */
10631 if (regs_ever_live
[3] == 0)
10633 insn
= gen_rtx_REG (SImode
, 3);
10634 insn
= gen_rtx_SET (SImode
, insn
, ip_rtx
);
10635 insn
= emit_insn (insn
);
10637 else if (args_to_push
== 0)
10640 insn
= gen_rtx_PRE_DEC (SImode
, stack_pointer_rtx
);
10641 insn
= gen_frame_mem (SImode
, insn
);
10642 insn
= gen_rtx_SET (VOIDmode
, insn
, ip_rtx
);
10643 insn
= emit_insn (insn
);
10647 /* Just tell the dwarf backend that we adjusted SP. */
10648 dwarf
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
,
10649 gen_rtx_PLUS (SImode
, stack_pointer_rtx
,
10650 GEN_INT (-fp_offset
)));
10651 RTX_FRAME_RELATED_P (insn
) = 1;
10652 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
,
10653 dwarf
, REG_NOTES (insn
));
10657 /* Store the args on the stack. */
10658 if (cfun
->machine
->uses_anonymous_args
)
10659 insn
= emit_multi_reg_push
10660 ((0xf0 >> (args_to_push
/ 4)) & 0xf);
10663 (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10664 GEN_INT (- args_to_push
)));
10666 RTX_FRAME_RELATED_P (insn
) = 1;
10668 saved_pretend_args
= 1;
10669 fp_offset
= args_to_push
;
10672 /* Now reuse r3 to preserve IP. */
10673 insn
= gen_rtx_REG (SImode
, 3);
10674 insn
= gen_rtx_SET (SImode
, insn
, ip_rtx
);
10675 (void) emit_insn (insn
);
10681 insn
= gen_rtx_PLUS (SImode
, stack_pointer_rtx
, GEN_INT (fp_offset
));
10682 insn
= gen_rtx_SET (SImode
, ip_rtx
, insn
);
10685 insn
= gen_movsi (ip_rtx
, stack_pointer_rtx
);
10687 insn
= emit_insn (insn
);
10688 RTX_FRAME_RELATED_P (insn
) = 1;
10693 /* Push the argument registers, or reserve space for them. */
10694 if (cfun
->machine
->uses_anonymous_args
)
10695 insn
= emit_multi_reg_push
10696 ((0xf0 >> (args_to_push
/ 4)) & 0xf);
10699 (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10700 GEN_INT (- args_to_push
)));
10701 RTX_FRAME_RELATED_P (insn
) = 1;
10704 /* If this is an interrupt service routine, and the link register
10705 is going to be pushed, and we are not creating a stack frame,
10706 (which would involve an extra push of IP and a pop in the epilogue)
10707 subtracting four from LR now will mean that the function return
10708 can be done with a single instruction. */
10709 if ((func_type
== ARM_FT_ISR
|| func_type
== ARM_FT_FIQ
)
10710 && (live_regs_mask
& (1 << LR_REGNUM
)) != 0
10711 && ! frame_pointer_needed
)
10712 emit_insn (gen_rtx_SET (SImode
,
10713 gen_rtx_REG (SImode
, LR_REGNUM
),
10714 gen_rtx_PLUS (SImode
,
10715 gen_rtx_REG (SImode
, LR_REGNUM
),
10718 if (live_regs_mask
)
10720 insn
= emit_multi_reg_push (live_regs_mask
);
10721 saved_regs
+= bit_count (live_regs_mask
) * 4;
10722 RTX_FRAME_RELATED_P (insn
) = 1;
10726 for (reg
= LAST_IWMMXT_REGNUM
; reg
>= FIRST_IWMMXT_REGNUM
; reg
--)
10727 if (regs_ever_live
[reg
] && ! call_used_regs
[reg
])
10729 insn
= gen_rtx_PRE_DEC (V2SImode
, stack_pointer_rtx
);
10730 insn
= gen_frame_mem (V2SImode
, insn
);
10731 insn
= emit_insn (gen_rtx_SET (VOIDmode
, insn
,
10732 gen_rtx_REG (V2SImode
, reg
)));
10733 RTX_FRAME_RELATED_P (insn
) = 1;
10737 if (! IS_VOLATILE (func_type
))
10741 /* Save any floating point call-saved registers used by this
10743 if (arm_fpu_arch
== FPUTYPE_FPA_EMU2
)
10745 for (reg
= LAST_FPA_REGNUM
; reg
>= FIRST_FPA_REGNUM
; reg
--)
10746 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
10748 insn
= gen_rtx_PRE_DEC (XFmode
, stack_pointer_rtx
);
10749 insn
= gen_frame_mem (XFmode
, insn
);
10750 insn
= emit_insn (gen_rtx_SET (VOIDmode
, insn
,
10751 gen_rtx_REG (XFmode
, reg
)));
10752 RTX_FRAME_RELATED_P (insn
) = 1;
10758 start_reg
= LAST_FPA_REGNUM
;
10760 for (reg
= LAST_FPA_REGNUM
; reg
>= FIRST_FPA_REGNUM
; reg
--)
10762 if (regs_ever_live
[reg
] && !call_used_regs
[reg
])
10764 if (start_reg
- reg
== 3)
10766 insn
= emit_sfm (reg
, 4);
10767 RTX_FRAME_RELATED_P (insn
) = 1;
10769 start_reg
= reg
- 1;
10774 if (start_reg
!= reg
)
10776 insn
= emit_sfm (reg
+ 1, start_reg
- reg
);
10777 RTX_FRAME_RELATED_P (insn
) = 1;
10778 saved_regs
+= (start_reg
- reg
) * 12;
10780 start_reg
= reg
- 1;
10784 if (start_reg
!= reg
)
10786 insn
= emit_sfm (reg
+ 1, start_reg
- reg
);
10787 saved_regs
+= (start_reg
- reg
) * 12;
10788 RTX_FRAME_RELATED_P (insn
) = 1;
10791 if (TARGET_HARD_FLOAT
&& TARGET_VFP
)
10793 start_reg
= FIRST_VFP_REGNUM
;
10795 for (reg
= FIRST_VFP_REGNUM
; reg
< LAST_VFP_REGNUM
; reg
+= 2)
10797 if ((!regs_ever_live
[reg
] || call_used_regs
[reg
])
10798 && (!regs_ever_live
[reg
+ 1] || call_used_regs
[reg
+ 1]))
10800 if (start_reg
!= reg
)
10801 saved_regs
+= vfp_emit_fstmx (start_reg
,
10802 (reg
- start_reg
) / 2);
10803 start_reg
= reg
+ 2;
10806 if (start_reg
!= reg
)
10807 saved_regs
+= vfp_emit_fstmx (start_reg
,
10808 (reg
- start_reg
) / 2);
10812 if (frame_pointer_needed
)
10814 /* Create the new frame pointer. */
10815 insn
= GEN_INT (-(4 + args_to_push
+ fp_offset
));
10816 insn
= emit_insn (gen_addsi3 (hard_frame_pointer_rtx
, ip_rtx
, insn
));
10817 RTX_FRAME_RELATED_P (insn
) = 1;
10819 if (IS_NESTED (func_type
))
10821 /* Recover the static chain register. */
10822 if (regs_ever_live
[3] == 0
10823 || saved_pretend_args
)
10824 insn
= gen_rtx_REG (SImode
, 3);
10825 else /* if (current_function_pretend_args_size == 0) */
10827 insn
= gen_rtx_PLUS (SImode
, hard_frame_pointer_rtx
,
10829 insn
= gen_frame_mem (SImode
, insn
);
10832 emit_insn (gen_rtx_SET (SImode
, ip_rtx
, insn
));
10833 /* Add a USE to stop propagate_one_insn() from barfing. */
10834 emit_insn (gen_prologue_use (ip_rtx
));
10838 offsets
= arm_get_frame_offsets ();
10839 if (offsets
->outgoing_args
!= offsets
->saved_args
+ saved_regs
)
10841 /* This add can produce multiple insns for a large constant, so we
10842 need to get tricky. */
10843 rtx last
= get_last_insn ();
10845 amount
= GEN_INT (offsets
->saved_args
+ saved_regs
10846 - offsets
->outgoing_args
);
10848 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
10852 last
= last
? NEXT_INSN (last
) : get_insns ();
10853 RTX_FRAME_RELATED_P (last
) = 1;
10855 while (last
!= insn
);
10857 /* If the frame pointer is needed, emit a special barrier that
10858 will prevent the scheduler from moving stores to the frame
10859 before the stack adjustment. */
10860 if (frame_pointer_needed
)
10861 insn
= emit_insn (gen_stack_tie (stack_pointer_rtx
,
10862 hard_frame_pointer_rtx
));
10867 arm_load_pic_register (0UL);
10869 /* If we are profiling, make sure no instructions are scheduled before
10870 the call to mcount. Similarly if the user has requested no
10871 scheduling in the prolog. Similarly if we want non-call exceptions
10872 using the EABI unwinder, to prevent faulting instructions from being
10873 swapped with a stack adjustment. */
10874 if (current_function_profile
|| !TARGET_SCHED_PROLOG
10875 || (ARM_EABI_UNWIND_TABLES
&& flag_non_call_exceptions
))
10876 emit_insn (gen_blockage ());
10878 /* If the link register is being kept alive, with the return address in it,
10879 then make sure that it does not get reused by the ce2 pass. */
10880 if ((live_regs_mask
& (1 << LR_REGNUM
)) == 0)
10882 emit_insn (gen_prologue_use (gen_rtx_REG (SImode
, LR_REGNUM
)));
10883 cfun
->machine
->lr_save_eliminated
= 1;
10887 /* If CODE is 'd', then the X is a condition operand and the instruction
10888 should only be executed if the condition is true.
10889 if CODE is 'D', then the X is a condition operand and the instruction
10890 should only be executed if the condition is false: however, if the mode
10891 of the comparison is CCFPEmode, then always execute the instruction -- we
10892 do this because in these circumstances !GE does not necessarily imply LT;
10893 in these cases the instruction pattern will take care to make sure that
10894 an instruction containing %d will follow, thereby undoing the effects of
10895 doing this instruction unconditionally.
10896 If CODE is 'N' then X is a floating point operand that must be negated
10898 If CODE is 'B' then output a bitwise inverted value of X (a const int).
10899 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
10901 arm_print_operand (FILE *stream
, rtx x
, int code
)
10906 fputs (ASM_COMMENT_START
, stream
);
10910 fputs (user_label_prefix
, stream
);
10914 fputs (REGISTER_PREFIX
, stream
);
10918 if (arm_ccfsm_state
== 3 || arm_ccfsm_state
== 4)
10922 output_operand_lossage ("predicated Thumb instruction");
10925 if (current_insn_predicate
!= NULL
)
10927 output_operand_lossage
10928 ("predicated instruction in conditional sequence");
10932 fputs (arm_condition_codes
[arm_current_cc
], stream
);
10934 else if (current_insn_predicate
)
10936 enum arm_cond_code code
;
10940 output_operand_lossage ("predicated Thumb instruction");
10944 code
= get_arm_condition_code (current_insn_predicate
);
10945 fputs (arm_condition_codes
[code
], stream
);
10952 REAL_VALUE_FROM_CONST_DOUBLE (r
, x
);
10953 r
= REAL_VALUE_NEGATE (r
);
10954 fprintf (stream
, "%s", fp_const_from_val (&r
));
10959 if (GET_CODE (x
) == CONST_INT
)
10962 val
= ARM_SIGN_EXTEND (~INTVAL (x
));
10963 fprintf (stream
, HOST_WIDE_INT_PRINT_DEC
, val
);
10967 putc ('~', stream
);
10968 output_addr_const (stream
, x
);
10973 fprintf (stream
, "%s", arithmetic_instr (x
, 1));
10976 /* Truncate Cirrus shift counts. */
10978 if (GET_CODE (x
) == CONST_INT
)
10980 fprintf (stream
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 0x3f);
10983 arm_print_operand (stream
, x
, 0);
10987 fprintf (stream
, "%s", arithmetic_instr (x
, 0));
10993 const char * shift
= shift_op (x
, &val
);
10997 fprintf (stream
, ", %s ", shift_op (x
, &val
));
10999 arm_print_operand (stream
, XEXP (x
, 1), 0);
11001 fprintf (stream
, "#" HOST_WIDE_INT_PRINT_DEC
, val
);
11006 /* An explanation of the 'Q', 'R' and 'H' register operands:
11008 In a pair of registers containing a DI or DF value the 'Q'
11009 operand returns the register number of the register containing
11010 the least significant part of the value. The 'R' operand returns
11011 the register number of the register containing the most
11012 significant part of the value.
11014 The 'H' operand returns the higher of the two register numbers.
11015 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11016 same as the 'Q' operand, since the most significant part of the
11017 value is held in the lower number register. The reverse is true
11018 on systems where WORDS_BIG_ENDIAN is false.
11020 The purpose of these operands is to distinguish between cases
11021 where the endian-ness of the values is important (for example
11022 when they are added together), and cases where the endian-ness
11023 is irrelevant, but the order of register operations is important.
11024 For example when loading a value from memory into a register
11025 pair, the endian-ness does not matter. Provided that the value
11026 from the lower memory address is put into the lower numbered
11027 register, and the value from the higher address is put into the
11028 higher numbered register, the load will work regardless of whether
11029 the value being loaded is big-wordian or little-wordian. The
11030 order of the two register loads can matter however, if the address
11031 of the memory location is actually held in one of the registers
11032 being overwritten by the load. */
11034 if (GET_CODE (x
) != REG
|| REGNO (x
) > LAST_ARM_REGNUM
)
11036 output_operand_lossage ("invalid operand for code '%c'", code
);
11040 asm_fprintf (stream
, "%r", REGNO (x
) + (WORDS_BIG_ENDIAN
? 1 : 0));
11044 if (GET_CODE (x
) != REG
|| REGNO (x
) > LAST_ARM_REGNUM
)
11046 output_operand_lossage ("invalid operand for code '%c'", code
);
11050 asm_fprintf (stream
, "%r", REGNO (x
) + (WORDS_BIG_ENDIAN
? 0 : 1));
11054 if (GET_CODE (x
) != REG
|| REGNO (x
) > LAST_ARM_REGNUM
)
11056 output_operand_lossage ("invalid operand for code '%c'", code
);
11060 asm_fprintf (stream
, "%r", REGNO (x
) + 1);
11064 asm_fprintf (stream
, "%r",
11065 GET_CODE (XEXP (x
, 0)) == REG
11066 ? REGNO (XEXP (x
, 0)) : REGNO (XEXP (XEXP (x
, 0), 0)));
11070 asm_fprintf (stream
, "{%r-%r}",
11072 REGNO (x
) + ARM_NUM_REGS (GET_MODE (x
)) - 1);
11076 /* CONST_TRUE_RTX means always -- that's the default. */
11077 if (x
== const_true_rtx
)
11080 if (!COMPARISON_P (x
))
11082 output_operand_lossage ("invalid operand for code '%c'", code
);
11086 fputs (arm_condition_codes
[get_arm_condition_code (x
)],
11091 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11092 want to do that. */
11093 if (x
== const_true_rtx
)
11095 output_operand_lossage ("instruction never exectued");
11098 if (!COMPARISON_P (x
))
11100 output_operand_lossage ("invalid operand for code '%c'", code
);
11104 fputs (arm_condition_codes
[ARM_INVERSE_CONDITION_CODE
11105 (get_arm_condition_code (x
))],
11109 /* Cirrus registers can be accessed in a variety of ways:
11110 single floating point (f)
11111 double floating point (d)
11113 64bit integer (dx). */
11114 case 'W': /* Cirrus register in F mode. */
11115 case 'X': /* Cirrus register in D mode. */
11116 case 'Y': /* Cirrus register in FX mode. */
11117 case 'Z': /* Cirrus register in DX mode. */
11118 gcc_assert (GET_CODE (x
) == REG
11119 && REGNO_REG_CLASS (REGNO (x
)) == CIRRUS_REGS
);
11121 fprintf (stream
, "mv%s%s",
11123 : code
== 'X' ? "d"
11124 : code
== 'Y' ? "fx" : "dx", reg_names
[REGNO (x
)] + 2);
11128 /* Print cirrus register in the mode specified by the register's mode. */
11131 int mode
= GET_MODE (x
);
11133 if (GET_CODE (x
) != REG
|| REGNO_REG_CLASS (REGNO (x
)) != CIRRUS_REGS
)
11135 output_operand_lossage ("invalid operand for code '%c'", code
);
11139 fprintf (stream
, "mv%s%s",
11140 mode
== DFmode
? "d"
11141 : mode
== SImode
? "fx"
11142 : mode
== DImode
? "dx"
11143 : "f", reg_names
[REGNO (x
)] + 2);
11149 if (GET_CODE (x
) != REG
11150 || REGNO (x
) < FIRST_IWMMXT_GR_REGNUM
11151 || REGNO (x
) > LAST_IWMMXT_GR_REGNUM
)
11152 /* Bad value for wCG register number. */
11154 output_operand_lossage ("invalid operand for code '%c'", code
);
11159 fprintf (stream
, "%d", REGNO (x
) - FIRST_IWMMXT_GR_REGNUM
);
11162 /* Print an iWMMXt control register name. */
11164 if (GET_CODE (x
) != CONST_INT
11166 || INTVAL (x
) >= 16)
11167 /* Bad value for wC register number. */
11169 output_operand_lossage ("invalid operand for code '%c'", code
);
11175 static const char * wc_reg_names
[16] =
11177 "wCID", "wCon", "wCSSF", "wCASF",
11178 "wC4", "wC5", "wC6", "wC7",
11179 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11180 "wC12", "wC13", "wC14", "wC15"
11183 fprintf (stream
, wc_reg_names
[INTVAL (x
)]);
11187 /* Print a VFP double precision register name. */
11190 int mode
= GET_MODE (x
);
11193 if (mode
!= DImode
&& mode
!= DFmode
)
11195 output_operand_lossage ("invalid operand for code '%c'", code
);
11199 if (GET_CODE (x
) != REG
11200 || !IS_VFP_REGNUM (REGNO (x
)))
11202 output_operand_lossage ("invalid operand for code '%c'", code
);
11206 num
= REGNO(x
) - FIRST_VFP_REGNUM
;
11209 output_operand_lossage ("invalid operand for code '%c'", code
);
11213 fprintf (stream
, "d%d", num
>> 1);
11220 output_operand_lossage ("missing operand");
11224 switch (GET_CODE (x
))
11227 asm_fprintf (stream
, "%r", REGNO (x
));
11231 output_memory_reference_mode
= GET_MODE (x
);
11232 output_address (XEXP (x
, 0));
11236 fprintf (stream
, "#%s", fp_immediate_constant (x
));
11240 gcc_assert (GET_CODE (x
) != NEG
);
11241 fputc ('#', stream
);
11242 output_addr_const (stream
, x
);
11248 #ifndef AOF_ASSEMBLER
11249 /* Target hook for assembling integer objects. The ARM version needs to
11250 handle word-sized values specially. */
11252 arm_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
11254 if (size
== UNITS_PER_WORD
&& aligned_p
)
11256 fputs ("\t.word\t", asm_out_file
);
11257 output_addr_const (asm_out_file
, x
);
11259 /* Mark symbols as position independent. We only do this in the
11260 .text segment, not in the .data segment. */
11261 if (NEED_GOT_RELOC
&& flag_pic
&& making_const_table
&&
11262 (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
))
11264 if (GET_CODE (x
) == SYMBOL_REF
11265 && (CONSTANT_POOL_ADDRESS_P (x
)
11266 || SYMBOL_REF_LOCAL_P (x
)))
11267 fputs ("(GOTOFF)", asm_out_file
);
11268 else if (GET_CODE (x
) == LABEL_REF
)
11269 fputs ("(GOTOFF)", asm_out_file
);
11271 fputs ("(GOT)", asm_out_file
);
11273 fputc ('\n', asm_out_file
);
11277 if (arm_vector_mode_supported_p (GET_MODE (x
)))
11281 gcc_assert (GET_CODE (x
) == CONST_VECTOR
);
11283 units
= CONST_VECTOR_NUNITS (x
);
11285 switch (GET_MODE (x
))
11287 case V2SImode
: size
= 4; break;
11288 case V4HImode
: size
= 2; break;
11289 case V8QImode
: size
= 1; break;
11291 gcc_unreachable ();
11294 for (i
= 0; i
< units
; i
++)
11298 elt
= CONST_VECTOR_ELT (x
, i
);
11300 (elt
, size
, i
== 0 ? BIGGEST_ALIGNMENT
: size
* BITS_PER_UNIT
, 1);
11306 return default_assemble_integer (x
, size
, aligned_p
);
11310 /* Add a function to the list of static constructors. */
11313 arm_elf_asm_constructor (rtx symbol
, int priority ATTRIBUTE_UNUSED
)
11315 if (!TARGET_AAPCS_BASED
)
11317 default_named_section_asm_out_constructor (symbol
, priority
);
11321 /* Put these in the .init_array section, using a special relocation. */
11323 assemble_align (POINTER_SIZE
);
11324 fputs ("\t.word\t", asm_out_file
);
11325 output_addr_const (asm_out_file
, symbol
);
11326 fputs ("(target1)\n", asm_out_file
);
11330 /* A finite state machine takes care of noticing whether or not instructions
11331 can be conditionally executed, and thus decrease execution time and code
11332 size by deleting branch instructions. The fsm is controlled by
11333 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
11335 /* The state of the fsm controlling condition codes are:
11336 0: normal, do nothing special
11337 1: make ASM_OUTPUT_OPCODE not output this instruction
11338 2: make ASM_OUTPUT_OPCODE not output this instruction
11339 3: make instructions conditional
11340 4: make instructions conditional
11342 State transitions (state->state by whom under condition):
11343 0 -> 1 final_prescan_insn if the `target' is a label
11344 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
11345 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
11346 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
11347 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
11348 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
11349 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
11350 (the target insn is arm_target_insn).
11352 If the jump clobbers the conditions then we use states 2 and 4.
11354 A similar thing can be done with conditional return insns.
11356 XXX In case the `target' is an unconditional branch, this conditionalising
11357 of the instructions always reduces code size, but not always execution
11358 time. But then, I want to reduce the code size to somewhere near what
11359 /bin/cc produces. */
11361 /* Returns the index of the ARM condition code string in
11362 `arm_condition_codes'. COMPARISON should be an rtx like
11363 `(eq (...) (...))'. */
11364 static enum arm_cond_code
11365 get_arm_condition_code (rtx comparison
)
11367 enum machine_mode mode
= GET_MODE (XEXP (comparison
, 0));
11369 enum rtx_code comp_code
= GET_CODE (comparison
);
11371 if (GET_MODE_CLASS (mode
) != MODE_CC
)
11372 mode
= SELECT_CC_MODE (comp_code
, XEXP (comparison
, 0),
11373 XEXP (comparison
, 1));
11377 case CC_DNEmode
: code
= ARM_NE
; goto dominance
;
11378 case CC_DEQmode
: code
= ARM_EQ
; goto dominance
;
11379 case CC_DGEmode
: code
= ARM_GE
; goto dominance
;
11380 case CC_DGTmode
: code
= ARM_GT
; goto dominance
;
11381 case CC_DLEmode
: code
= ARM_LE
; goto dominance
;
11382 case CC_DLTmode
: code
= ARM_LT
; goto dominance
;
11383 case CC_DGEUmode
: code
= ARM_CS
; goto dominance
;
11384 case CC_DGTUmode
: code
= ARM_HI
; goto dominance
;
11385 case CC_DLEUmode
: code
= ARM_LS
; goto dominance
;
11386 case CC_DLTUmode
: code
= ARM_CC
;
11389 gcc_assert (comp_code
== EQ
|| comp_code
== NE
);
11391 if (comp_code
== EQ
)
11392 return ARM_INVERSE_CONDITION_CODE (code
);
11398 case NE
: return ARM_NE
;
11399 case EQ
: return ARM_EQ
;
11400 case GE
: return ARM_PL
;
11401 case LT
: return ARM_MI
;
11402 default: gcc_unreachable ();
11408 case NE
: return ARM_NE
;
11409 case EQ
: return ARM_EQ
;
11410 default: gcc_unreachable ();
11416 case NE
: return ARM_MI
;
11417 case EQ
: return ARM_PL
;
11418 default: gcc_unreachable ();
11423 /* These encodings assume that AC=1 in the FPA system control
11424 byte. This allows us to handle all cases except UNEQ and
11428 case GE
: return ARM_GE
;
11429 case GT
: return ARM_GT
;
11430 case LE
: return ARM_LS
;
11431 case LT
: return ARM_MI
;
11432 case NE
: return ARM_NE
;
11433 case EQ
: return ARM_EQ
;
11434 case ORDERED
: return ARM_VC
;
11435 case UNORDERED
: return ARM_VS
;
11436 case UNLT
: return ARM_LT
;
11437 case UNLE
: return ARM_LE
;
11438 case UNGT
: return ARM_HI
;
11439 case UNGE
: return ARM_PL
;
11440 /* UNEQ and LTGT do not have a representation. */
11441 case UNEQ
: /* Fall through. */
11442 case LTGT
: /* Fall through. */
11443 default: gcc_unreachable ();
11449 case NE
: return ARM_NE
;
11450 case EQ
: return ARM_EQ
;
11451 case GE
: return ARM_LE
;
11452 case GT
: return ARM_LT
;
11453 case LE
: return ARM_GE
;
11454 case LT
: return ARM_GT
;
11455 case GEU
: return ARM_LS
;
11456 case GTU
: return ARM_CC
;
11457 case LEU
: return ARM_CS
;
11458 case LTU
: return ARM_HI
;
11459 default: gcc_unreachable ();
11465 case LTU
: return ARM_CS
;
11466 case GEU
: return ARM_CC
;
11467 default: gcc_unreachable ();
11473 case NE
: return ARM_NE
;
11474 case EQ
: return ARM_EQ
;
11475 case GE
: return ARM_GE
;
11476 case GT
: return ARM_GT
;
11477 case LE
: return ARM_LE
;
11478 case LT
: return ARM_LT
;
11479 case GEU
: return ARM_CS
;
11480 case GTU
: return ARM_HI
;
11481 case LEU
: return ARM_LS
;
11482 case LTU
: return ARM_CC
;
11483 default: gcc_unreachable ();
11486 default: gcc_unreachable ();
11491 arm_final_prescan_insn (rtx insn
)
11493 /* BODY will hold the body of INSN. */
11494 rtx body
= PATTERN (insn
);
11496 /* This will be 1 if trying to repeat the trick, and things need to be
11497 reversed if it appears to fail. */
11500 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
11501 taken are clobbered, even if the rtl suggests otherwise. It also
11502 means that we have to grub around within the jump expression to find
11503 out what the conditions are when the jump isn't taken. */
11504 int jump_clobbers
= 0;
11506 /* If we start with a return insn, we only succeed if we find another one. */
11507 int seeking_return
= 0;
11509 /* START_INSN will hold the insn from where we start looking. This is the
11510 first insn after the following code_label if REVERSE is true. */
11511 rtx start_insn
= insn
;
11513 /* If in state 4, check if the target branch is reached, in order to
11514 change back to state 0. */
11515 if (arm_ccfsm_state
== 4)
11517 if (insn
== arm_target_insn
)
11519 arm_target_insn
= NULL
;
11520 arm_ccfsm_state
= 0;
11525 /* If in state 3, it is possible to repeat the trick, if this insn is an
11526 unconditional branch to a label, and immediately following this branch
11527 is the previous target label which is only used once, and the label this
11528 branch jumps to is not too far off. */
11529 if (arm_ccfsm_state
== 3)
11531 if (simplejump_p (insn
))
11533 start_insn
= next_nonnote_insn (start_insn
);
11534 if (GET_CODE (start_insn
) == BARRIER
)
11536 /* XXX Isn't this always a barrier? */
11537 start_insn
= next_nonnote_insn (start_insn
);
11539 if (GET_CODE (start_insn
) == CODE_LABEL
11540 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
11541 && LABEL_NUSES (start_insn
) == 1)
11546 else if (GET_CODE (body
) == RETURN
)
11548 start_insn
= next_nonnote_insn (start_insn
);
11549 if (GET_CODE (start_insn
) == BARRIER
)
11550 start_insn
= next_nonnote_insn (start_insn
);
11551 if (GET_CODE (start_insn
) == CODE_LABEL
11552 && CODE_LABEL_NUMBER (start_insn
) == arm_target_label
11553 && LABEL_NUSES (start_insn
) == 1)
11556 seeking_return
= 1;
11565 gcc_assert (!arm_ccfsm_state
|| reverse
);
11566 if (GET_CODE (insn
) != JUMP_INSN
)
11569 /* This jump might be paralleled with a clobber of the condition codes
11570 the jump should always come first */
11571 if (GET_CODE (body
) == PARALLEL
&& XVECLEN (body
, 0) > 0)
11572 body
= XVECEXP (body
, 0, 0);
11575 || (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == PC
11576 && GET_CODE (SET_SRC (body
)) == IF_THEN_ELSE
))
11579 int fail
= FALSE
, succeed
= FALSE
;
11580 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
11581 int then_not_else
= TRUE
;
11582 rtx this_insn
= start_insn
, label
= 0;
11584 /* If the jump cannot be done with one instruction, we cannot
11585 conditionally execute the instruction in the inverse case. */
11586 if (get_attr_conds (insn
) == CONDS_JUMP_CLOB
)
11592 /* Register the insn jumped to. */
11595 if (!seeking_return
)
11596 label
= XEXP (SET_SRC (body
), 0);
11598 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == LABEL_REF
)
11599 label
= XEXP (XEXP (SET_SRC (body
), 1), 0);
11600 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == LABEL_REF
)
11602 label
= XEXP (XEXP (SET_SRC (body
), 2), 0);
11603 then_not_else
= FALSE
;
11605 else if (GET_CODE (XEXP (SET_SRC (body
), 1)) == RETURN
)
11606 seeking_return
= 1;
11607 else if (GET_CODE (XEXP (SET_SRC (body
), 2)) == RETURN
)
11609 seeking_return
= 1;
11610 then_not_else
= FALSE
;
11613 gcc_unreachable ();
11615 /* See how many insns this branch skips, and what kind of insns. If all
11616 insns are okay, and the label or unconditional branch to the same
11617 label is not too far away, succeed. */
11618 for (insns_skipped
= 0;
11619 !fail
&& !succeed
&& insns_skipped
++ < max_insns_skipped
;)
11623 this_insn
= next_nonnote_insn (this_insn
);
11627 switch (GET_CODE (this_insn
))
11630 /* Succeed if it is the target label, otherwise fail since
11631 control falls in from somewhere else. */
11632 if (this_insn
== label
)
11636 arm_ccfsm_state
= 2;
11637 this_insn
= next_nonnote_insn (this_insn
);
11640 arm_ccfsm_state
= 1;
11648 /* Succeed if the following insn is the target label.
11650 If return insns are used then the last insn in a function
11651 will be a barrier. */
11652 this_insn
= next_nonnote_insn (this_insn
);
11653 if (this_insn
&& this_insn
== label
)
11657 arm_ccfsm_state
= 2;
11658 this_insn
= next_nonnote_insn (this_insn
);
11661 arm_ccfsm_state
= 1;
11669 /* The AAPCS says that conditional calls should not be
11670 used since they make interworking inefficient (the
11671 linker can't transform BL<cond> into BLX). That's
11672 only a problem if the machine has BLX. */
11679 /* Succeed if the following insn is the target label, or
11680 if the following two insns are a barrier and the
11682 this_insn
= next_nonnote_insn (this_insn
);
11683 if (this_insn
&& GET_CODE (this_insn
) == BARRIER
)
11684 this_insn
= next_nonnote_insn (this_insn
);
11686 if (this_insn
&& this_insn
== label
11687 && insns_skipped
< max_insns_skipped
)
11691 arm_ccfsm_state
= 2;
11692 this_insn
= next_nonnote_insn (this_insn
);
11695 arm_ccfsm_state
= 1;
11703 /* If this is an unconditional branch to the same label, succeed.
11704 If it is to another label, do nothing. If it is conditional,
11706 /* XXX Probably, the tests for SET and the PC are
11709 scanbody
= PATTERN (this_insn
);
11710 if (GET_CODE (scanbody
) == SET
11711 && GET_CODE (SET_DEST (scanbody
)) == PC
)
11713 if (GET_CODE (SET_SRC (scanbody
)) == LABEL_REF
11714 && XEXP (SET_SRC (scanbody
), 0) == label
&& !reverse
)
11716 arm_ccfsm_state
= 2;
11719 else if (GET_CODE (SET_SRC (scanbody
)) == IF_THEN_ELSE
)
11722 /* Fail if a conditional return is undesirable (e.g. on a
11723 StrongARM), but still allow this if optimizing for size. */
11724 else if (GET_CODE (scanbody
) == RETURN
11725 && !use_return_insn (TRUE
, NULL
)
11728 else if (GET_CODE (scanbody
) == RETURN
11731 arm_ccfsm_state
= 2;
11734 else if (GET_CODE (scanbody
) == PARALLEL
)
11736 switch (get_attr_conds (this_insn
))
11746 fail
= TRUE
; /* Unrecognized jump (e.g. epilogue). */
11751 /* Instructions using or affecting the condition codes make it
11753 scanbody
= PATTERN (this_insn
);
11754 if (!(GET_CODE (scanbody
) == SET
11755 || GET_CODE (scanbody
) == PARALLEL
)
11756 || get_attr_conds (this_insn
) != CONDS_NOCOND
)
11759 /* A conditional cirrus instruction must be followed by
11760 a non Cirrus instruction. However, since we
11761 conditionalize instructions in this function and by
11762 the time we get here we can't add instructions
11763 (nops), because shorten_branches() has already been
11764 called, we will disable conditionalizing Cirrus
11765 instructions to be safe. */
11766 if (GET_CODE (scanbody
) != USE
11767 && GET_CODE (scanbody
) != CLOBBER
11768 && get_attr_cirrus (this_insn
) != CIRRUS_NOT
)
11778 if ((!seeking_return
) && (arm_ccfsm_state
== 1 || reverse
))
11779 arm_target_label
= CODE_LABEL_NUMBER (label
);
11782 gcc_assert (seeking_return
|| arm_ccfsm_state
== 2);
11784 while (this_insn
&& GET_CODE (PATTERN (this_insn
)) == USE
)
11786 this_insn
= next_nonnote_insn (this_insn
);
11787 gcc_assert (!this_insn
11788 || (GET_CODE (this_insn
) != BARRIER
11789 && GET_CODE (this_insn
) != CODE_LABEL
));
11793 /* Oh, dear! we ran off the end.. give up. */
11794 recog (PATTERN (insn
), insn
, NULL
);
11795 arm_ccfsm_state
= 0;
11796 arm_target_insn
= NULL
;
11799 arm_target_insn
= this_insn
;
11803 gcc_assert (!reverse
);
11805 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body
),
11807 if (GET_CODE (XEXP (XEXP (SET_SRC (body
), 0), 0)) == AND
)
11808 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
11809 if (GET_CODE (XEXP (SET_SRC (body
), 0)) == NE
)
11810 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
11814 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
11817 arm_current_cc
= get_arm_condition_code (XEXP (SET_SRC (body
),
11821 if (reverse
|| then_not_else
)
11822 arm_current_cc
= ARM_INVERSE_CONDITION_CODE (arm_current_cc
);
11825 /* Restore recog_data (getting the attributes of other insns can
11826 destroy this array, but final.c assumes that it remains intact
11827 across this call; since the insn has been recognized already we
11828 call recog direct). */
11829 recog (PATTERN (insn
), insn
, NULL
);
11833 /* Returns true if REGNO is a valid register
11834 for holding a quantity of type MODE. */
11836 arm_hard_regno_mode_ok (unsigned int regno
, enum machine_mode mode
)
11838 if (GET_MODE_CLASS (mode
) == MODE_CC
)
11839 return (regno
== CC_REGNUM
11840 || (TARGET_HARD_FLOAT
&& TARGET_VFP
11841 && regno
== VFPCC_REGNUM
));
11844 /* For the Thumb we only allow values bigger than SImode in
11845 registers 0 - 6, so that there is always a second low
11846 register available to hold the upper part of the value.
11847 We probably we ought to ensure that the register is the
11848 start of an even numbered register pair. */
11849 return (ARM_NUM_REGS (mode
) < 2) || (regno
< LAST_LO_REGNUM
);
11851 if (TARGET_HARD_FLOAT
&& TARGET_MAVERICK
11852 && IS_CIRRUS_REGNUM (regno
))
11853 /* We have outlawed SI values in Cirrus registers because they
11854 reside in the lower 32 bits, but SF values reside in the
11855 upper 32 bits. This causes gcc all sorts of grief. We can't
11856 even split the registers into pairs because Cirrus SI values
11857 get sign extended to 64bits-- aldyh. */
11858 return (GET_MODE_CLASS (mode
) == MODE_FLOAT
) || (mode
== DImode
);
11860 if (TARGET_HARD_FLOAT
&& TARGET_VFP
11861 && IS_VFP_REGNUM (regno
))
11863 if (mode
== SFmode
|| mode
== SImode
)
11866 /* DFmode values are only valid in even register pairs. */
11867 if (mode
== DFmode
)
11868 return ((regno
- FIRST_VFP_REGNUM
) & 1) == 0;
11872 if (TARGET_REALLY_IWMMXT
)
11874 if (IS_IWMMXT_GR_REGNUM (regno
))
11875 return mode
== SImode
;
11877 if (IS_IWMMXT_REGNUM (regno
))
11878 return VALID_IWMMXT_REG_MODE (mode
);
11881 /* We allow any value to be stored in the general registers.
11882 Restrict doubleword quantities to even register pairs so that we can
11884 if (regno
<= LAST_ARM_REGNUM
)
11885 return !(TARGET_LDRD
&& GET_MODE_SIZE (mode
) > 4 && (regno
& 1) != 0);
11887 if (regno
== FRAME_POINTER_REGNUM
11888 || regno
== ARG_POINTER_REGNUM
)
11889 /* We only allow integers in the fake hard registers. */
11890 return GET_MODE_CLASS (mode
) == MODE_INT
;
11892 /* The only registers left are the FPA registers
11893 which we only allow to hold FP values. */
11894 return (TARGET_HARD_FLOAT
&& TARGET_FPA
11895 && GET_MODE_CLASS (mode
) == MODE_FLOAT
11896 && regno
>= FIRST_FPA_REGNUM
11897 && regno
<= LAST_FPA_REGNUM
);
11901 arm_regno_class (int regno
)
11905 if (regno
== STACK_POINTER_REGNUM
)
11907 if (regno
== CC_REGNUM
)
11914 if ( regno
<= LAST_ARM_REGNUM
11915 || regno
== FRAME_POINTER_REGNUM
11916 || regno
== ARG_POINTER_REGNUM
)
11917 return GENERAL_REGS
;
11919 if (regno
== CC_REGNUM
|| regno
== VFPCC_REGNUM
)
11922 if (IS_CIRRUS_REGNUM (regno
))
11923 return CIRRUS_REGS
;
11925 if (IS_VFP_REGNUM (regno
))
11928 if (IS_IWMMXT_REGNUM (regno
))
11929 return IWMMXT_REGS
;
11931 if (IS_IWMMXT_GR_REGNUM (regno
))
11932 return IWMMXT_GR_REGS
;
11937 /* Handle a special case when computing the offset
11938 of an argument from the frame pointer. */
11940 arm_debugger_arg_offset (int value
, rtx addr
)
11944 /* We are only interested if dbxout_parms() failed to compute the offset. */
11948 /* We can only cope with the case where the address is held in a register. */
11949 if (GET_CODE (addr
) != REG
)
11952 /* If we are using the frame pointer to point at the argument, then
11953 an offset of 0 is correct. */
11954 if (REGNO (addr
) == (unsigned) HARD_FRAME_POINTER_REGNUM
)
11957 /* If we are using the stack pointer to point at the
11958 argument, then an offset of 0 is correct. */
11959 if ((TARGET_THUMB
|| !frame_pointer_needed
)
11960 && REGNO (addr
) == SP_REGNUM
)
11963 /* Oh dear. The argument is pointed to by a register rather
11964 than being held in a register, or being stored at a known
11965 offset from the frame pointer. Since GDB only understands
11966 those two kinds of argument we must translate the address
11967 held in the register into an offset from the frame pointer.
11968 We do this by searching through the insns for the function
11969 looking to see where this register gets its value. If the
11970 register is initialized from the frame pointer plus an offset
11971 then we are in luck and we can continue, otherwise we give up.
11973 This code is exercised by producing debugging information
11974 for a function with arguments like this:
11976 double func (double a, double b, int c, double d) {return d;}
11978 Without this code the stab for parameter 'd' will be set to
11979 an offset of 0 from the frame pointer, rather than 8. */
11981 /* The if() statement says:
11983 If the insn is a normal instruction
11984 and if the insn is setting the value in a register
11985 and if the register being set is the register holding the address of the argument
11986 and if the address is computing by an addition
11987 that involves adding to a register
11988 which is the frame pointer
11993 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
11995 if ( GET_CODE (insn
) == INSN
11996 && GET_CODE (PATTERN (insn
)) == SET
11997 && REGNO (XEXP (PATTERN (insn
), 0)) == REGNO (addr
)
11998 && GET_CODE (XEXP (PATTERN (insn
), 1)) == PLUS
11999 && GET_CODE (XEXP (XEXP (PATTERN (insn
), 1), 0)) == REG
12000 && REGNO (XEXP (XEXP (PATTERN (insn
), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12001 && GET_CODE (XEXP (XEXP (PATTERN (insn
), 1), 1)) == CONST_INT
12004 value
= INTVAL (XEXP (XEXP (PATTERN (insn
), 1), 1));
12013 warning (0, "unable to compute real location of stacked parameter");
12014 value
= 8; /* XXX magic hack */
12020 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12023 if ((MASK) & insn_flags) \
12024 lang_hooks.builtin_function ((NAME), (TYPE), (CODE), \
12025 BUILT_IN_MD, NULL, NULL_TREE); \
12029 struct builtin_description
12031 const unsigned int mask
;
12032 const enum insn_code icode
;
12033 const char * const name
;
12034 const enum arm_builtins code
;
12035 const enum rtx_code comparison
;
12036 const unsigned int flag
;
12039 static const struct builtin_description bdesc_2arg
[] =
12041 #define IWMMXT_BUILTIN(code, string, builtin) \
12042 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12043 ARM_BUILTIN_##builtin, 0, 0 },
12045 IWMMXT_BUILTIN (addv8qi3
, "waddb", WADDB
)
12046 IWMMXT_BUILTIN (addv4hi3
, "waddh", WADDH
)
12047 IWMMXT_BUILTIN (addv2si3
, "waddw", WADDW
)
12048 IWMMXT_BUILTIN (subv8qi3
, "wsubb", WSUBB
)
12049 IWMMXT_BUILTIN (subv4hi3
, "wsubh", WSUBH
)
12050 IWMMXT_BUILTIN (subv2si3
, "wsubw", WSUBW
)
12051 IWMMXT_BUILTIN (ssaddv8qi3
, "waddbss", WADDSSB
)
12052 IWMMXT_BUILTIN (ssaddv4hi3
, "waddhss", WADDSSH
)
12053 IWMMXT_BUILTIN (ssaddv2si3
, "waddwss", WADDSSW
)
12054 IWMMXT_BUILTIN (sssubv8qi3
, "wsubbss", WSUBSSB
)
12055 IWMMXT_BUILTIN (sssubv4hi3
, "wsubhss", WSUBSSH
)
12056 IWMMXT_BUILTIN (sssubv2si3
, "wsubwss", WSUBSSW
)
12057 IWMMXT_BUILTIN (usaddv8qi3
, "waddbus", WADDUSB
)
12058 IWMMXT_BUILTIN (usaddv4hi3
, "waddhus", WADDUSH
)
12059 IWMMXT_BUILTIN (usaddv2si3
, "waddwus", WADDUSW
)
12060 IWMMXT_BUILTIN (ussubv8qi3
, "wsubbus", WSUBUSB
)
12061 IWMMXT_BUILTIN (ussubv4hi3
, "wsubhus", WSUBUSH
)
12062 IWMMXT_BUILTIN (ussubv2si3
, "wsubwus", WSUBUSW
)
12063 IWMMXT_BUILTIN (mulv4hi3
, "wmulul", WMULUL
)
12064 IWMMXT_BUILTIN (smulv4hi3_highpart
, "wmulsm", WMULSM
)
12065 IWMMXT_BUILTIN (umulv4hi3_highpart
, "wmulum", WMULUM
)
12066 IWMMXT_BUILTIN (eqv8qi3
, "wcmpeqb", WCMPEQB
)
12067 IWMMXT_BUILTIN (eqv4hi3
, "wcmpeqh", WCMPEQH
)
12068 IWMMXT_BUILTIN (eqv2si3
, "wcmpeqw", WCMPEQW
)
12069 IWMMXT_BUILTIN (gtuv8qi3
, "wcmpgtub", WCMPGTUB
)
12070 IWMMXT_BUILTIN (gtuv4hi3
, "wcmpgtuh", WCMPGTUH
)
12071 IWMMXT_BUILTIN (gtuv2si3
, "wcmpgtuw", WCMPGTUW
)
12072 IWMMXT_BUILTIN (gtv8qi3
, "wcmpgtsb", WCMPGTSB
)
12073 IWMMXT_BUILTIN (gtv4hi3
, "wcmpgtsh", WCMPGTSH
)
12074 IWMMXT_BUILTIN (gtv2si3
, "wcmpgtsw", WCMPGTSW
)
12075 IWMMXT_BUILTIN (umaxv8qi3
, "wmaxub", WMAXUB
)
12076 IWMMXT_BUILTIN (smaxv8qi3
, "wmaxsb", WMAXSB
)
12077 IWMMXT_BUILTIN (umaxv4hi3
, "wmaxuh", WMAXUH
)
12078 IWMMXT_BUILTIN (smaxv4hi3
, "wmaxsh", WMAXSH
)
12079 IWMMXT_BUILTIN (umaxv2si3
, "wmaxuw", WMAXUW
)
12080 IWMMXT_BUILTIN (smaxv2si3
, "wmaxsw", WMAXSW
)
12081 IWMMXT_BUILTIN (uminv8qi3
, "wminub", WMINUB
)
12082 IWMMXT_BUILTIN (sminv8qi3
, "wminsb", WMINSB
)
12083 IWMMXT_BUILTIN (uminv4hi3
, "wminuh", WMINUH
)
12084 IWMMXT_BUILTIN (sminv4hi3
, "wminsh", WMINSH
)
12085 IWMMXT_BUILTIN (uminv2si3
, "wminuw", WMINUW
)
12086 IWMMXT_BUILTIN (sminv2si3
, "wminsw", WMINSW
)
12087 IWMMXT_BUILTIN (iwmmxt_anddi3
, "wand", WAND
)
12088 IWMMXT_BUILTIN (iwmmxt_nanddi3
, "wandn", WANDN
)
12089 IWMMXT_BUILTIN (iwmmxt_iordi3
, "wor", WOR
)
12090 IWMMXT_BUILTIN (iwmmxt_xordi3
, "wxor", WXOR
)
12091 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3
, "wavg2b", WAVG2B
)
12092 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3
, "wavg2h", WAVG2H
)
12093 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3
, "wavg2br", WAVG2BR
)
12094 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3
, "wavg2hr", WAVG2HR
)
12095 IWMMXT_BUILTIN (iwmmxt_wunpckilb
, "wunpckilb", WUNPCKILB
)
12096 IWMMXT_BUILTIN (iwmmxt_wunpckilh
, "wunpckilh", WUNPCKILH
)
12097 IWMMXT_BUILTIN (iwmmxt_wunpckilw
, "wunpckilw", WUNPCKILW
)
12098 IWMMXT_BUILTIN (iwmmxt_wunpckihb
, "wunpckihb", WUNPCKIHB
)
12099 IWMMXT_BUILTIN (iwmmxt_wunpckihh
, "wunpckihh", WUNPCKIHH
)
12100 IWMMXT_BUILTIN (iwmmxt_wunpckihw
, "wunpckihw", WUNPCKIHW
)
12101 IWMMXT_BUILTIN (iwmmxt_wmadds
, "wmadds", WMADDS
)
12102 IWMMXT_BUILTIN (iwmmxt_wmaddu
, "wmaddu", WMADDU
)
12104 #define IWMMXT_BUILTIN2(code, builtin) \
12105 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12107 IWMMXT_BUILTIN2 (iwmmxt_wpackhss
, WPACKHSS
)
12108 IWMMXT_BUILTIN2 (iwmmxt_wpackwss
, WPACKWSS
)
12109 IWMMXT_BUILTIN2 (iwmmxt_wpackdss
, WPACKDSS
)
12110 IWMMXT_BUILTIN2 (iwmmxt_wpackhus
, WPACKHUS
)
12111 IWMMXT_BUILTIN2 (iwmmxt_wpackwus
, WPACKWUS
)
12112 IWMMXT_BUILTIN2 (iwmmxt_wpackdus
, WPACKDUS
)
12113 IWMMXT_BUILTIN2 (ashlv4hi3_di
, WSLLH
)
12114 IWMMXT_BUILTIN2 (ashlv4hi3
, WSLLHI
)
12115 IWMMXT_BUILTIN2 (ashlv2si3_di
, WSLLW
)
12116 IWMMXT_BUILTIN2 (ashlv2si3
, WSLLWI
)
12117 IWMMXT_BUILTIN2 (ashldi3_di
, WSLLD
)
12118 IWMMXT_BUILTIN2 (ashldi3_iwmmxt
, WSLLDI
)
12119 IWMMXT_BUILTIN2 (lshrv4hi3_di
, WSRLH
)
12120 IWMMXT_BUILTIN2 (lshrv4hi3
, WSRLHI
)
12121 IWMMXT_BUILTIN2 (lshrv2si3_di
, WSRLW
)
12122 IWMMXT_BUILTIN2 (lshrv2si3
, WSRLWI
)
12123 IWMMXT_BUILTIN2 (lshrdi3_di
, WSRLD
)
12124 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt
, WSRLDI
)
12125 IWMMXT_BUILTIN2 (ashrv4hi3_di
, WSRAH
)
12126 IWMMXT_BUILTIN2 (ashrv4hi3
, WSRAHI
)
12127 IWMMXT_BUILTIN2 (ashrv2si3_di
, WSRAW
)
12128 IWMMXT_BUILTIN2 (ashrv2si3
, WSRAWI
)
12129 IWMMXT_BUILTIN2 (ashrdi3_di
, WSRAD
)
12130 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt
, WSRADI
)
12131 IWMMXT_BUILTIN2 (rorv4hi3_di
, WRORH
)
12132 IWMMXT_BUILTIN2 (rorv4hi3
, WRORHI
)
12133 IWMMXT_BUILTIN2 (rorv2si3_di
, WRORW
)
12134 IWMMXT_BUILTIN2 (rorv2si3
, WRORWI
)
12135 IWMMXT_BUILTIN2 (rordi3_di
, WRORD
)
12136 IWMMXT_BUILTIN2 (rordi3
, WRORDI
)
12137 IWMMXT_BUILTIN2 (iwmmxt_wmacuz
, WMACUZ
)
12138 IWMMXT_BUILTIN2 (iwmmxt_wmacsz
, WMACSZ
)
12141 static const struct builtin_description bdesc_1arg
[] =
12143 IWMMXT_BUILTIN (iwmmxt_tmovmskb
, "tmovmskb", TMOVMSKB
)
12144 IWMMXT_BUILTIN (iwmmxt_tmovmskh
, "tmovmskh", TMOVMSKH
)
12145 IWMMXT_BUILTIN (iwmmxt_tmovmskw
, "tmovmskw", TMOVMSKW
)
12146 IWMMXT_BUILTIN (iwmmxt_waccb
, "waccb", WACCB
)
12147 IWMMXT_BUILTIN (iwmmxt_wacch
, "wacch", WACCH
)
12148 IWMMXT_BUILTIN (iwmmxt_waccw
, "waccw", WACCW
)
12149 IWMMXT_BUILTIN (iwmmxt_wunpckehub
, "wunpckehub", WUNPCKEHUB
)
12150 IWMMXT_BUILTIN (iwmmxt_wunpckehuh
, "wunpckehuh", WUNPCKEHUH
)
12151 IWMMXT_BUILTIN (iwmmxt_wunpckehuw
, "wunpckehuw", WUNPCKEHUW
)
12152 IWMMXT_BUILTIN (iwmmxt_wunpckehsb
, "wunpckehsb", WUNPCKEHSB
)
12153 IWMMXT_BUILTIN (iwmmxt_wunpckehsh
, "wunpckehsh", WUNPCKEHSH
)
12154 IWMMXT_BUILTIN (iwmmxt_wunpckehsw
, "wunpckehsw", WUNPCKEHSW
)
12155 IWMMXT_BUILTIN (iwmmxt_wunpckelub
, "wunpckelub", WUNPCKELUB
)
12156 IWMMXT_BUILTIN (iwmmxt_wunpckeluh
, "wunpckeluh", WUNPCKELUH
)
12157 IWMMXT_BUILTIN (iwmmxt_wunpckeluw
, "wunpckeluw", WUNPCKELUW
)
12158 IWMMXT_BUILTIN (iwmmxt_wunpckelsb
, "wunpckelsb", WUNPCKELSB
)
12159 IWMMXT_BUILTIN (iwmmxt_wunpckelsh
, "wunpckelsh", WUNPCKELSH
)
12160 IWMMXT_BUILTIN (iwmmxt_wunpckelsw
, "wunpckelsw", WUNPCKELSW
)
12163 /* Set up all the iWMMXt builtins. This is
12164 not called if TARGET_IWMMXT is zero. */
12167 arm_init_iwmmxt_builtins (void)
12169 const struct builtin_description
* d
;
12171 tree endlink
= void_list_node
;
12173 tree V2SI_type_node
= build_vector_type_for_mode (intSI_type_node
, V2SImode
);
12174 tree V4HI_type_node
= build_vector_type_for_mode (intHI_type_node
, V4HImode
);
12175 tree V8QI_type_node
= build_vector_type_for_mode (intQI_type_node
, V8QImode
);
12178 = build_function_type (integer_type_node
,
12179 tree_cons (NULL_TREE
, integer_type_node
, endlink
));
12180 tree v8qi_ftype_v8qi_v8qi_int
12181 = build_function_type (V8QI_type_node
,
12182 tree_cons (NULL_TREE
, V8QI_type_node
,
12183 tree_cons (NULL_TREE
, V8QI_type_node
,
12184 tree_cons (NULL_TREE
,
12187 tree v4hi_ftype_v4hi_int
12188 = build_function_type (V4HI_type_node
,
12189 tree_cons (NULL_TREE
, V4HI_type_node
,
12190 tree_cons (NULL_TREE
, integer_type_node
,
12192 tree v2si_ftype_v2si_int
12193 = build_function_type (V2SI_type_node
,
12194 tree_cons (NULL_TREE
, V2SI_type_node
,
12195 tree_cons (NULL_TREE
, integer_type_node
,
12197 tree v2si_ftype_di_di
12198 = build_function_type (V2SI_type_node
,
12199 tree_cons (NULL_TREE
, long_long_integer_type_node
,
12200 tree_cons (NULL_TREE
, long_long_integer_type_node
,
12202 tree di_ftype_di_int
12203 = build_function_type (long_long_integer_type_node
,
12204 tree_cons (NULL_TREE
, long_long_integer_type_node
,
12205 tree_cons (NULL_TREE
, integer_type_node
,
12207 tree di_ftype_di_int_int
12208 = build_function_type (long_long_integer_type_node
,
12209 tree_cons (NULL_TREE
, long_long_integer_type_node
,
12210 tree_cons (NULL_TREE
, integer_type_node
,
12211 tree_cons (NULL_TREE
,
12214 tree int_ftype_v8qi
12215 = build_function_type (integer_type_node
,
12216 tree_cons (NULL_TREE
, V8QI_type_node
,
12218 tree int_ftype_v4hi
12219 = build_function_type (integer_type_node
,
12220 tree_cons (NULL_TREE
, V4HI_type_node
,
12222 tree int_ftype_v2si
12223 = build_function_type (integer_type_node
,
12224 tree_cons (NULL_TREE
, V2SI_type_node
,
12226 tree int_ftype_v8qi_int
12227 = build_function_type (integer_type_node
,
12228 tree_cons (NULL_TREE
, V8QI_type_node
,
12229 tree_cons (NULL_TREE
, integer_type_node
,
12231 tree int_ftype_v4hi_int
12232 = build_function_type (integer_type_node
,
12233 tree_cons (NULL_TREE
, V4HI_type_node
,
12234 tree_cons (NULL_TREE
, integer_type_node
,
12236 tree int_ftype_v2si_int
12237 = build_function_type (integer_type_node
,
12238 tree_cons (NULL_TREE
, V2SI_type_node
,
12239 tree_cons (NULL_TREE
, integer_type_node
,
12241 tree v8qi_ftype_v8qi_int_int
12242 = build_function_type (V8QI_type_node
,
12243 tree_cons (NULL_TREE
, V8QI_type_node
,
12244 tree_cons (NULL_TREE
, integer_type_node
,
12245 tree_cons (NULL_TREE
,
12248 tree v4hi_ftype_v4hi_int_int
12249 = build_function_type (V4HI_type_node
,
12250 tree_cons (NULL_TREE
, V4HI_type_node
,
12251 tree_cons (NULL_TREE
, integer_type_node
,
12252 tree_cons (NULL_TREE
,
12255 tree v2si_ftype_v2si_int_int
12256 = build_function_type (V2SI_type_node
,
12257 tree_cons (NULL_TREE
, V2SI_type_node
,
12258 tree_cons (NULL_TREE
, integer_type_node
,
12259 tree_cons (NULL_TREE
,
12262 /* Miscellaneous. */
12263 tree v8qi_ftype_v4hi_v4hi
12264 = build_function_type (V8QI_type_node
,
12265 tree_cons (NULL_TREE
, V4HI_type_node
,
12266 tree_cons (NULL_TREE
, V4HI_type_node
,
12268 tree v4hi_ftype_v2si_v2si
12269 = build_function_type (V4HI_type_node
,
12270 tree_cons (NULL_TREE
, V2SI_type_node
,
12271 tree_cons (NULL_TREE
, V2SI_type_node
,
12273 tree v2si_ftype_v4hi_v4hi
12274 = build_function_type (V2SI_type_node
,
12275 tree_cons (NULL_TREE
, V4HI_type_node
,
12276 tree_cons (NULL_TREE
, V4HI_type_node
,
12278 tree v2si_ftype_v8qi_v8qi
12279 = build_function_type (V2SI_type_node
,
12280 tree_cons (NULL_TREE
, V8QI_type_node
,
12281 tree_cons (NULL_TREE
, V8QI_type_node
,
12283 tree v4hi_ftype_v4hi_di
12284 = build_function_type (V4HI_type_node
,
12285 tree_cons (NULL_TREE
, V4HI_type_node
,
12286 tree_cons (NULL_TREE
,
12287 long_long_integer_type_node
,
12289 tree v2si_ftype_v2si_di
12290 = build_function_type (V2SI_type_node
,
12291 tree_cons (NULL_TREE
, V2SI_type_node
,
12292 tree_cons (NULL_TREE
,
12293 long_long_integer_type_node
,
12295 tree void_ftype_int_int
12296 = build_function_type (void_type_node
,
12297 tree_cons (NULL_TREE
, integer_type_node
,
12298 tree_cons (NULL_TREE
, integer_type_node
,
12301 = build_function_type (long_long_unsigned_type_node
, endlink
);
12303 = build_function_type (long_long_integer_type_node
,
12304 tree_cons (NULL_TREE
, V8QI_type_node
,
12307 = build_function_type (long_long_integer_type_node
,
12308 tree_cons (NULL_TREE
, V4HI_type_node
,
12311 = build_function_type (long_long_integer_type_node
,
12312 tree_cons (NULL_TREE
, V2SI_type_node
,
12314 tree v2si_ftype_v4hi
12315 = build_function_type (V2SI_type_node
,
12316 tree_cons (NULL_TREE
, V4HI_type_node
,
12318 tree v4hi_ftype_v8qi
12319 = build_function_type (V4HI_type_node
,
12320 tree_cons (NULL_TREE
, V8QI_type_node
,
12323 tree di_ftype_di_v4hi_v4hi
12324 = build_function_type (long_long_unsigned_type_node
,
12325 tree_cons (NULL_TREE
,
12326 long_long_unsigned_type_node
,
12327 tree_cons (NULL_TREE
, V4HI_type_node
,
12328 tree_cons (NULL_TREE
,
12332 tree di_ftype_v4hi_v4hi
12333 = build_function_type (long_long_unsigned_type_node
,
12334 tree_cons (NULL_TREE
, V4HI_type_node
,
12335 tree_cons (NULL_TREE
, V4HI_type_node
,
12338 /* Normal vector binops. */
12339 tree v8qi_ftype_v8qi_v8qi
12340 = build_function_type (V8QI_type_node
,
12341 tree_cons (NULL_TREE
, V8QI_type_node
,
12342 tree_cons (NULL_TREE
, V8QI_type_node
,
12344 tree v4hi_ftype_v4hi_v4hi
12345 = build_function_type (V4HI_type_node
,
12346 tree_cons (NULL_TREE
, V4HI_type_node
,
12347 tree_cons (NULL_TREE
, V4HI_type_node
,
12349 tree v2si_ftype_v2si_v2si
12350 = build_function_type (V2SI_type_node
,
12351 tree_cons (NULL_TREE
, V2SI_type_node
,
12352 tree_cons (NULL_TREE
, V2SI_type_node
,
12354 tree di_ftype_di_di
12355 = build_function_type (long_long_unsigned_type_node
,
12356 tree_cons (NULL_TREE
, long_long_unsigned_type_node
,
12357 tree_cons (NULL_TREE
,
12358 long_long_unsigned_type_node
,
12361 /* Add all builtins that are more or less simple operations on two
12363 for (i
= 0, d
= bdesc_2arg
; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
12365 /* Use one of the operands; the target can have a different mode for
12366 mask-generating compares. */
12367 enum machine_mode mode
;
12373 mode
= insn_data
[d
->icode
].operand
[1].mode
;
12378 type
= v8qi_ftype_v8qi_v8qi
;
12381 type
= v4hi_ftype_v4hi_v4hi
;
12384 type
= v2si_ftype_v2si_v2si
;
12387 type
= di_ftype_di_di
;
12391 gcc_unreachable ();
12394 def_mbuiltin (d
->mask
, d
->name
, type
, d
->code
);
12397 /* Add the remaining MMX insns with somewhat more complicated types. */
12398 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wzero", di_ftype_void
, ARM_BUILTIN_WZERO
);
12399 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_setwcx", void_ftype_int_int
, ARM_BUILTIN_SETWCX
);
12400 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_getwcx", int_ftype_int
, ARM_BUILTIN_GETWCX
);
12402 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di
, ARM_BUILTIN_WSLLH
);
12403 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsllw", v2si_ftype_v2si_di
, ARM_BUILTIN_WSLLW
);
12404 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wslld", di_ftype_di_di
, ARM_BUILTIN_WSLLD
);
12405 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WSLLHI
);
12406 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsllwi", v2si_ftype_v2si_int
, ARM_BUILTIN_WSLLWI
);
12407 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wslldi", di_ftype_di_int
, ARM_BUILTIN_WSLLDI
);
12409 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di
, ARM_BUILTIN_WSRLH
);
12410 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrlw", v2si_ftype_v2si_di
, ARM_BUILTIN_WSRLW
);
12411 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrld", di_ftype_di_di
, ARM_BUILTIN_WSRLD
);
12412 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WSRLHI
);
12413 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int
, ARM_BUILTIN_WSRLWI
);
12414 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrldi", di_ftype_di_int
, ARM_BUILTIN_WSRLDI
);
12416 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di
, ARM_BUILTIN_WSRAH
);
12417 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsraw", v2si_ftype_v2si_di
, ARM_BUILTIN_WSRAW
);
12418 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrad", di_ftype_di_di
, ARM_BUILTIN_WSRAD
);
12419 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WSRAHI
);
12420 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsrawi", v2si_ftype_v2si_int
, ARM_BUILTIN_WSRAWI
);
12421 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsradi", di_ftype_di_int
, ARM_BUILTIN_WSRADI
);
12423 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di
, ARM_BUILTIN_WRORH
);
12424 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrorw", v2si_ftype_v2si_di
, ARM_BUILTIN_WRORW
);
12425 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrord", di_ftype_di_di
, ARM_BUILTIN_WRORD
);
12426 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WRORHI
);
12427 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrorwi", v2si_ftype_v2si_int
, ARM_BUILTIN_WRORWI
);
12428 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wrordi", di_ftype_di_int
, ARM_BUILTIN_WRORDI
);
12430 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int
, ARM_BUILTIN_WSHUFH
);
12432 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi
, ARM_BUILTIN_WSADB
);
12433 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi
, ARM_BUILTIN_WSADH
);
12434 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi
, ARM_BUILTIN_WSADBZ
);
12435 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi
, ARM_BUILTIN_WSADHZ
);
12437 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmsb", int_ftype_v8qi_int
, ARM_BUILTIN_TEXTRMSB
);
12438 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmsh", int_ftype_v4hi_int
, ARM_BUILTIN_TEXTRMSH
);
12439 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmsw", int_ftype_v2si_int
, ARM_BUILTIN_TEXTRMSW
);
12440 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmub", int_ftype_v8qi_int
, ARM_BUILTIN_TEXTRMUB
);
12441 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmuh", int_ftype_v4hi_int
, ARM_BUILTIN_TEXTRMUH
);
12442 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_textrmuw", int_ftype_v2si_int
, ARM_BUILTIN_TEXTRMUW
);
12443 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int
, ARM_BUILTIN_TINSRB
);
12444 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int
, ARM_BUILTIN_TINSRH
);
12445 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int
, ARM_BUILTIN_TINSRW
);
12447 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_waccb", di_ftype_v8qi
, ARM_BUILTIN_WACCB
);
12448 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wacch", di_ftype_v4hi
, ARM_BUILTIN_WACCH
);
12449 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_waccw", di_ftype_v2si
, ARM_BUILTIN_WACCW
);
12451 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmovmskb", int_ftype_v8qi
, ARM_BUILTIN_TMOVMSKB
);
12452 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmovmskh", int_ftype_v4hi
, ARM_BUILTIN_TMOVMSKH
);
12453 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmovmskw", int_ftype_v2si
, ARM_BUILTIN_TMOVMSKW
);
12455 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi
, ARM_BUILTIN_WPACKHSS
);
12456 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi
, ARM_BUILTIN_WPACKHUS
);
12457 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si
, ARM_BUILTIN_WPACKWUS
);
12458 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si
, ARM_BUILTIN_WPACKWSS
);
12459 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackdus", v2si_ftype_di_di
, ARM_BUILTIN_WPACKDUS
);
12460 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wpackdss", v2si_ftype_di_di
, ARM_BUILTIN_WPACKDSS
);
12462 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi
, ARM_BUILTIN_WUNPCKEHUB
);
12463 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi
, ARM_BUILTIN_WUNPCKEHUH
);
12464 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehuw", di_ftype_v2si
, ARM_BUILTIN_WUNPCKEHUW
);
12465 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi
, ARM_BUILTIN_WUNPCKEHSB
);
12466 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi
, ARM_BUILTIN_WUNPCKEHSH
);
12467 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckehsw", di_ftype_v2si
, ARM_BUILTIN_WUNPCKEHSW
);
12468 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi
, ARM_BUILTIN_WUNPCKELUB
);
12469 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi
, ARM_BUILTIN_WUNPCKELUH
);
12470 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckeluw", di_ftype_v2si
, ARM_BUILTIN_WUNPCKELUW
);
12471 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi
, ARM_BUILTIN_WUNPCKELSB
);
12472 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi
, ARM_BUILTIN_WUNPCKELSH
);
12473 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wunpckelsw", di_ftype_v2si
, ARM_BUILTIN_WUNPCKELSW
);
12475 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi
, ARM_BUILTIN_WMACS
);
12476 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi
, ARM_BUILTIN_WMACSZ
);
12477 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi
, ARM_BUILTIN_WMACU
);
12478 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi
, ARM_BUILTIN_WMACUZ
);
12480 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int
, ARM_BUILTIN_WALIGN
);
12481 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmia", di_ftype_di_int_int
, ARM_BUILTIN_TMIA
);
12482 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiaph", di_ftype_di_int_int
, ARM_BUILTIN_TMIAPH
);
12483 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiabb", di_ftype_di_int_int
, ARM_BUILTIN_TMIABB
);
12484 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiabt", di_ftype_di_int_int
, ARM_BUILTIN_TMIABT
);
12485 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiatb", di_ftype_di_int_int
, ARM_BUILTIN_TMIATB
);
12486 def_mbuiltin (FL_IWMMXT
, "__builtin_arm_tmiatt", di_ftype_di_int_int
, ARM_BUILTIN_TMIATT
);
12490 arm_init_tls_builtins (void)
12493 tree nothrow
= tree_cons (get_identifier ("nothrow"), NULL
, NULL
);
12494 tree const_nothrow
= tree_cons (get_identifier ("const"), NULL
, nothrow
);
12496 ftype
= build_function_type (ptr_type_node
, void_list_node
);
12497 lang_hooks
.builtin_function ("__builtin_thread_pointer", ftype
,
12498 ARM_BUILTIN_THREAD_POINTER
, BUILT_IN_MD
,
12499 NULL
, const_nothrow
);
12503 arm_init_builtins (void)
12505 arm_init_tls_builtins ();
12507 if (TARGET_REALLY_IWMMXT
)
12508 arm_init_iwmmxt_builtins ();
12511 /* Errors in the source file can cause expand_expr to return const0_rtx
12512 where we expect a vector. To avoid crashing, use one of the vector
12513 clear instructions. */
12516 safe_vector_operand (rtx x
, enum machine_mode mode
)
12518 if (x
!= const0_rtx
)
12520 x
= gen_reg_rtx (mode
);
12522 emit_insn (gen_iwmmxt_clrdi (mode
== DImode
? x
12523 : gen_rtx_SUBREG (DImode
, x
, 0)));
12527 /* Subroutine of arm_expand_builtin to take care of binop insns. */
12530 arm_expand_binop_builtin (enum insn_code icode
,
12531 tree arglist
, rtx target
)
12534 tree arg0
= TREE_VALUE (arglist
);
12535 tree arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12536 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12537 rtx op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12538 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12539 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12540 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
12542 if (VECTOR_MODE_P (mode0
))
12543 op0
= safe_vector_operand (op0
, mode0
);
12544 if (VECTOR_MODE_P (mode1
))
12545 op1
= safe_vector_operand (op1
, mode1
);
12548 || GET_MODE (target
) != tmode
12549 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12550 target
= gen_reg_rtx (tmode
);
12552 gcc_assert (GET_MODE (op0
) == mode0
&& GET_MODE (op1
) == mode1
);
12554 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12555 op0
= copy_to_mode_reg (mode0
, op0
);
12556 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12557 op1
= copy_to_mode_reg (mode1
, op1
);
12559 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
12566 /* Subroutine of arm_expand_builtin to take care of unop insns. */
12569 arm_expand_unop_builtin (enum insn_code icode
,
12570 tree arglist
, rtx target
, int do_load
)
12573 tree arg0
= TREE_VALUE (arglist
);
12574 rtx op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12575 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12576 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12579 || GET_MODE (target
) != tmode
12580 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12581 target
= gen_reg_rtx (tmode
);
12583 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
12586 if (VECTOR_MODE_P (mode0
))
12587 op0
= safe_vector_operand (op0
, mode0
);
12589 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12590 op0
= copy_to_mode_reg (mode0
, op0
);
12593 pat
= GEN_FCN (icode
) (target
, op0
);
12600 /* Expand an expression EXP that calls a built-in function,
12601 with result going to TARGET if that's convenient
12602 (and in mode MODE if that's convenient).
12603 SUBTARGET may be used as the target for computing one of EXP's operands.
12604 IGNORE is nonzero if the value is to be ignored. */
12607 arm_expand_builtin (tree exp
,
12609 rtx subtarget ATTRIBUTE_UNUSED
,
12610 enum machine_mode mode ATTRIBUTE_UNUSED
,
12611 int ignore ATTRIBUTE_UNUSED
)
12613 const struct builtin_description
* d
;
12614 enum insn_code icode
;
12615 tree fndecl
= TREE_OPERAND (TREE_OPERAND (exp
, 0), 0);
12616 tree arglist
= TREE_OPERAND (exp
, 1);
12624 int fcode
= DECL_FUNCTION_CODE (fndecl
);
12626 enum machine_mode tmode
;
12627 enum machine_mode mode0
;
12628 enum machine_mode mode1
;
12629 enum machine_mode mode2
;
12633 case ARM_BUILTIN_TEXTRMSB
:
12634 case ARM_BUILTIN_TEXTRMUB
:
12635 case ARM_BUILTIN_TEXTRMSH
:
12636 case ARM_BUILTIN_TEXTRMUH
:
12637 case ARM_BUILTIN_TEXTRMSW
:
12638 case ARM_BUILTIN_TEXTRMUW
:
12639 icode
= (fcode
== ARM_BUILTIN_TEXTRMSB
? CODE_FOR_iwmmxt_textrmsb
12640 : fcode
== ARM_BUILTIN_TEXTRMUB
? CODE_FOR_iwmmxt_textrmub
12641 : fcode
== ARM_BUILTIN_TEXTRMSH
? CODE_FOR_iwmmxt_textrmsh
12642 : fcode
== ARM_BUILTIN_TEXTRMUH
? CODE_FOR_iwmmxt_textrmuh
12643 : CODE_FOR_iwmmxt_textrmw
);
12645 arg0
= TREE_VALUE (arglist
);
12646 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12647 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12648 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12649 tmode
= insn_data
[icode
].operand
[0].mode
;
12650 mode0
= insn_data
[icode
].operand
[1].mode
;
12651 mode1
= insn_data
[icode
].operand
[2].mode
;
12653 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12654 op0
= copy_to_mode_reg (mode0
, op0
);
12655 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12657 /* @@@ better error message */
12658 error ("selector must be an immediate");
12659 return gen_reg_rtx (tmode
);
12662 || GET_MODE (target
) != tmode
12663 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12664 target
= gen_reg_rtx (tmode
);
12665 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
12671 case ARM_BUILTIN_TINSRB
:
12672 case ARM_BUILTIN_TINSRH
:
12673 case ARM_BUILTIN_TINSRW
:
12674 icode
= (fcode
== ARM_BUILTIN_TINSRB
? CODE_FOR_iwmmxt_tinsrb
12675 : fcode
== ARM_BUILTIN_TINSRH
? CODE_FOR_iwmmxt_tinsrh
12676 : CODE_FOR_iwmmxt_tinsrw
);
12677 arg0
= TREE_VALUE (arglist
);
12678 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12679 arg2
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist
)));
12680 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12681 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12682 op2
= expand_expr (arg2
, NULL_RTX
, VOIDmode
, 0);
12683 tmode
= insn_data
[icode
].operand
[0].mode
;
12684 mode0
= insn_data
[icode
].operand
[1].mode
;
12685 mode1
= insn_data
[icode
].operand
[2].mode
;
12686 mode2
= insn_data
[icode
].operand
[3].mode
;
12688 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12689 op0
= copy_to_mode_reg (mode0
, op0
);
12690 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12691 op1
= copy_to_mode_reg (mode1
, op1
);
12692 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
12694 /* @@@ better error message */
12695 error ("selector must be an immediate");
12699 || GET_MODE (target
) != tmode
12700 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12701 target
= gen_reg_rtx (tmode
);
12702 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
12708 case ARM_BUILTIN_SETWCX
:
12709 arg0
= TREE_VALUE (arglist
);
12710 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12711 op0
= force_reg (SImode
, expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0));
12712 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12713 emit_insn (gen_iwmmxt_tmcr (op1
, op0
));
12716 case ARM_BUILTIN_GETWCX
:
12717 arg0
= TREE_VALUE (arglist
);
12718 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12719 target
= gen_reg_rtx (SImode
);
12720 emit_insn (gen_iwmmxt_tmrc (target
, op0
));
12723 case ARM_BUILTIN_WSHUFH
:
12724 icode
= CODE_FOR_iwmmxt_wshufh
;
12725 arg0
= TREE_VALUE (arglist
);
12726 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12727 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12728 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12729 tmode
= insn_data
[icode
].operand
[0].mode
;
12730 mode1
= insn_data
[icode
].operand
[1].mode
;
12731 mode2
= insn_data
[icode
].operand
[2].mode
;
12733 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode1
))
12734 op0
= copy_to_mode_reg (mode1
, op0
);
12735 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode2
))
12737 /* @@@ better error message */
12738 error ("mask must be an immediate");
12742 || GET_MODE (target
) != tmode
12743 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12744 target
= gen_reg_rtx (tmode
);
12745 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
12751 case ARM_BUILTIN_WSADB
:
12752 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb
, arglist
, target
);
12753 case ARM_BUILTIN_WSADH
:
12754 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh
, arglist
, target
);
12755 case ARM_BUILTIN_WSADBZ
:
12756 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz
, arglist
, target
);
12757 case ARM_BUILTIN_WSADHZ
:
12758 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz
, arglist
, target
);
12760 /* Several three-argument builtins. */
12761 case ARM_BUILTIN_WMACS
:
12762 case ARM_BUILTIN_WMACU
:
12763 case ARM_BUILTIN_WALIGN
:
12764 case ARM_BUILTIN_TMIA
:
12765 case ARM_BUILTIN_TMIAPH
:
12766 case ARM_BUILTIN_TMIATT
:
12767 case ARM_BUILTIN_TMIATB
:
12768 case ARM_BUILTIN_TMIABT
:
12769 case ARM_BUILTIN_TMIABB
:
12770 icode
= (fcode
== ARM_BUILTIN_WMACS
? CODE_FOR_iwmmxt_wmacs
12771 : fcode
== ARM_BUILTIN_WMACU
? CODE_FOR_iwmmxt_wmacu
12772 : fcode
== ARM_BUILTIN_TMIA
? CODE_FOR_iwmmxt_tmia
12773 : fcode
== ARM_BUILTIN_TMIAPH
? CODE_FOR_iwmmxt_tmiaph
12774 : fcode
== ARM_BUILTIN_TMIABB
? CODE_FOR_iwmmxt_tmiabb
12775 : fcode
== ARM_BUILTIN_TMIABT
? CODE_FOR_iwmmxt_tmiabt
12776 : fcode
== ARM_BUILTIN_TMIATB
? CODE_FOR_iwmmxt_tmiatb
12777 : fcode
== ARM_BUILTIN_TMIATT
? CODE_FOR_iwmmxt_tmiatt
12778 : CODE_FOR_iwmmxt_walign
);
12779 arg0
= TREE_VALUE (arglist
);
12780 arg1
= TREE_VALUE (TREE_CHAIN (arglist
));
12781 arg2
= TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist
)));
12782 op0
= expand_expr (arg0
, NULL_RTX
, VOIDmode
, 0);
12783 op1
= expand_expr (arg1
, NULL_RTX
, VOIDmode
, 0);
12784 op2
= expand_expr (arg2
, NULL_RTX
, VOIDmode
, 0);
12785 tmode
= insn_data
[icode
].operand
[0].mode
;
12786 mode0
= insn_data
[icode
].operand
[1].mode
;
12787 mode1
= insn_data
[icode
].operand
[2].mode
;
12788 mode2
= insn_data
[icode
].operand
[3].mode
;
12790 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12791 op0
= copy_to_mode_reg (mode0
, op0
);
12792 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12793 op1
= copy_to_mode_reg (mode1
, op1
);
12794 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
12795 op2
= copy_to_mode_reg (mode2
, op2
);
12797 || GET_MODE (target
) != tmode
12798 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12799 target
= gen_reg_rtx (tmode
);
12800 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
12806 case ARM_BUILTIN_WZERO
:
12807 target
= gen_reg_rtx (DImode
);
12808 emit_insn (gen_iwmmxt_clrdi (target
));
12811 case ARM_BUILTIN_THREAD_POINTER
:
12812 return arm_load_tp (target
);
12818 for (i
= 0, d
= bdesc_2arg
; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
12819 if (d
->code
== (const enum arm_builtins
) fcode
)
12820 return arm_expand_binop_builtin (d
->icode
, arglist
, target
);
12822 for (i
= 0, d
= bdesc_1arg
; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
12823 if (d
->code
== (const enum arm_builtins
) fcode
)
12824 return arm_expand_unop_builtin (d
->icode
, arglist
, target
, 0);
12826 /* @@@ Should really do something sensible here. */
12830 /* Return the number (counting from 0) of
12831 the least significant set bit in MASK. */
12834 number_of_first_bit_set (unsigned mask
)
12839 (mask
& (1 << bit
)) == 0;
12846 /* Emit code to push or pop registers to or from the stack. F is the
12847 assembly file. MASK is the registers to push or pop. PUSH is
12848 nonzero if we should push, and zero if we should pop. For debugging
12849 output, if pushing, adjust CFA_OFFSET by the amount of space added
12850 to the stack. REAL_REGS should have the same number of bits set as
12851 MASK, and will be used instead (in the same order) to describe which
12852 registers were saved - this is used to mark the save slots when we
12853 push high registers after moving them to low registers. */
12855 thumb_pushpop (FILE *f
, unsigned long mask
, int push
, int *cfa_offset
,
12856 unsigned long real_regs
)
12859 int lo_mask
= mask
& 0xFF;
12860 int pushed_words
= 0;
12864 if (lo_mask
== 0 && !push
&& (mask
& (1 << PC_REGNUM
)))
12866 /* Special case. Do not generate a POP PC statement here, do it in
12868 thumb_exit (f
, -1);
12872 if (ARM_EABI_UNWIND_TABLES
&& push
)
12874 fprintf (f
, "\t.save\t{");
12875 for (regno
= 0; regno
< 15; regno
++)
12877 if (real_regs
& (1 << regno
))
12879 if (real_regs
& ((1 << regno
) -1))
12881 asm_fprintf (f
, "%r", regno
);
12884 fprintf (f
, "}\n");
12887 fprintf (f
, "\t%s\t{", push
? "push" : "pop");
12889 /* Look at the low registers first. */
12890 for (regno
= 0; regno
<= LAST_LO_REGNUM
; regno
++, lo_mask
>>= 1)
12894 asm_fprintf (f
, "%r", regno
);
12896 if ((lo_mask
& ~1) != 0)
12903 if (push
&& (mask
& (1 << LR_REGNUM
)))
12905 /* Catch pushing the LR. */
12909 asm_fprintf (f
, "%r", LR_REGNUM
);
12913 else if (!push
&& (mask
& (1 << PC_REGNUM
)))
12915 /* Catch popping the PC. */
12916 if (TARGET_INTERWORK
|| TARGET_BACKTRACE
12917 || current_function_calls_eh_return
)
12919 /* The PC is never poped directly, instead
12920 it is popped into r3 and then BX is used. */
12921 fprintf (f
, "}\n");
12923 thumb_exit (f
, -1);
12932 asm_fprintf (f
, "%r", PC_REGNUM
);
12936 fprintf (f
, "}\n");
12938 if (push
&& pushed_words
&& dwarf2out_do_frame ())
12940 char *l
= dwarf2out_cfi_label ();
12941 int pushed_mask
= real_regs
;
12943 *cfa_offset
+= pushed_words
* 4;
12944 dwarf2out_def_cfa (l
, SP_REGNUM
, *cfa_offset
);
12947 pushed_mask
= real_regs
;
12948 for (regno
= 0; regno
<= 14; regno
++, pushed_mask
>>= 1)
12950 if (pushed_mask
& 1)
12951 dwarf2out_reg_save (l
, regno
, 4 * pushed_words
++ - *cfa_offset
);
12956 /* Generate code to return from a thumb function.
12957 If 'reg_containing_return_addr' is -1, then the return address is
12958 actually on the stack, at the stack pointer. */
12960 thumb_exit (FILE *f
, int reg_containing_return_addr
)
12962 unsigned regs_available_for_popping
;
12963 unsigned regs_to_pop
;
12965 unsigned available
;
12969 int restore_a4
= FALSE
;
12971 /* Compute the registers we need to pop. */
12975 if (reg_containing_return_addr
== -1)
12977 regs_to_pop
|= 1 << LR_REGNUM
;
12981 if (TARGET_BACKTRACE
)
12983 /* Restore the (ARM) frame pointer and stack pointer. */
12984 regs_to_pop
|= (1 << ARM_HARD_FRAME_POINTER_REGNUM
) | (1 << SP_REGNUM
);
12988 /* If there is nothing to pop then just emit the BX instruction and
12990 if (pops_needed
== 0)
12992 if (current_function_calls_eh_return
)
12993 asm_fprintf (f
, "\tadd\t%r, %r\n", SP_REGNUM
, ARM_EH_STACKADJ_REGNUM
);
12995 asm_fprintf (f
, "\tbx\t%r\n", reg_containing_return_addr
);
12998 /* Otherwise if we are not supporting interworking and we have not created
12999 a backtrace structure and the function was not entered in ARM mode then
13000 just pop the return address straight into the PC. */
13001 else if (!TARGET_INTERWORK
13002 && !TARGET_BACKTRACE
13003 && !is_called_in_ARM_mode (current_function_decl
)
13004 && !current_function_calls_eh_return
)
13006 asm_fprintf (f
, "\tpop\t{%r}\n", PC_REGNUM
);
13010 /* Find out how many of the (return) argument registers we can corrupt. */
13011 regs_available_for_popping
= 0;
13013 /* If returning via __builtin_eh_return, the bottom three registers
13014 all contain information needed for the return. */
13015 if (current_function_calls_eh_return
)
13019 /* If we can deduce the registers used from the function's
13020 return value. This is more reliable that examining
13021 regs_ever_live[] because that will be set if the register is
13022 ever used in the function, not just if the register is used
13023 to hold a return value. */
13025 if (current_function_return_rtx
!= 0)
13026 mode
= GET_MODE (current_function_return_rtx
);
13028 mode
= DECL_MODE (DECL_RESULT (current_function_decl
));
13030 size
= GET_MODE_SIZE (mode
);
13034 /* In a void function we can use any argument register.
13035 In a function that returns a structure on the stack
13036 we can use the second and third argument registers. */
13037 if (mode
== VOIDmode
)
13038 regs_available_for_popping
=
13039 (1 << ARG_REGISTER (1))
13040 | (1 << ARG_REGISTER (2))
13041 | (1 << ARG_REGISTER (3));
13043 regs_available_for_popping
=
13044 (1 << ARG_REGISTER (2))
13045 | (1 << ARG_REGISTER (3));
13047 else if (size
<= 4)
13048 regs_available_for_popping
=
13049 (1 << ARG_REGISTER (2))
13050 | (1 << ARG_REGISTER (3));
13051 else if (size
<= 8)
13052 regs_available_for_popping
=
13053 (1 << ARG_REGISTER (3));
13056 /* Match registers to be popped with registers into which we pop them. */
13057 for (available
= regs_available_for_popping
,
13058 required
= regs_to_pop
;
13059 required
!= 0 && available
!= 0;
13060 available
&= ~(available
& - available
),
13061 required
&= ~(required
& - required
))
13064 /* If we have any popping registers left over, remove them. */
13066 regs_available_for_popping
&= ~available
;
13068 /* Otherwise if we need another popping register we can use
13069 the fourth argument register. */
13070 else if (pops_needed
)
13072 /* If we have not found any free argument registers and
13073 reg a4 contains the return address, we must move it. */
13074 if (regs_available_for_popping
== 0
13075 && reg_containing_return_addr
== LAST_ARG_REGNUM
)
13077 asm_fprintf (f
, "\tmov\t%r, %r\n", LR_REGNUM
, LAST_ARG_REGNUM
);
13078 reg_containing_return_addr
= LR_REGNUM
;
13080 else if (size
> 12)
13082 /* Register a4 is being used to hold part of the return value,
13083 but we have dire need of a free, low register. */
13086 asm_fprintf (f
, "\tmov\t%r, %r\n",IP_REGNUM
, LAST_ARG_REGNUM
);
13089 if (reg_containing_return_addr
!= LAST_ARG_REGNUM
)
13091 /* The fourth argument register is available. */
13092 regs_available_for_popping
|= 1 << LAST_ARG_REGNUM
;
13098 /* Pop as many registers as we can. */
13099 thumb_pushpop (f
, regs_available_for_popping
, FALSE
, NULL
,
13100 regs_available_for_popping
);
13102 /* Process the registers we popped. */
13103 if (reg_containing_return_addr
== -1)
13105 /* The return address was popped into the lowest numbered register. */
13106 regs_to_pop
&= ~(1 << LR_REGNUM
);
13108 reg_containing_return_addr
=
13109 number_of_first_bit_set (regs_available_for_popping
);
13111 /* Remove this register for the mask of available registers, so that
13112 the return address will not be corrupted by further pops. */
13113 regs_available_for_popping
&= ~(1 << reg_containing_return_addr
);
13116 /* If we popped other registers then handle them here. */
13117 if (regs_available_for_popping
)
13121 /* Work out which register currently contains the frame pointer. */
13122 frame_pointer
= number_of_first_bit_set (regs_available_for_popping
);
13124 /* Move it into the correct place. */
13125 asm_fprintf (f
, "\tmov\t%r, %r\n",
13126 ARM_HARD_FRAME_POINTER_REGNUM
, frame_pointer
);
13128 /* (Temporarily) remove it from the mask of popped registers. */
13129 regs_available_for_popping
&= ~(1 << frame_pointer
);
13130 regs_to_pop
&= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM
);
13132 if (regs_available_for_popping
)
13136 /* We popped the stack pointer as well,
13137 find the register that contains it. */
13138 stack_pointer
= number_of_first_bit_set (regs_available_for_popping
);
13140 /* Move it into the stack register. */
13141 asm_fprintf (f
, "\tmov\t%r, %r\n", SP_REGNUM
, stack_pointer
);
13143 /* At this point we have popped all necessary registers, so
13144 do not worry about restoring regs_available_for_popping
13145 to its correct value:
13147 assert (pops_needed == 0)
13148 assert (regs_available_for_popping == (1 << frame_pointer))
13149 assert (regs_to_pop == (1 << STACK_POINTER)) */
13153 /* Since we have just move the popped value into the frame
13154 pointer, the popping register is available for reuse, and
13155 we know that we still have the stack pointer left to pop. */
13156 regs_available_for_popping
|= (1 << frame_pointer
);
13160 /* If we still have registers left on the stack, but we no longer have
13161 any registers into which we can pop them, then we must move the return
13162 address into the link register and make available the register that
13164 if (regs_available_for_popping
== 0 && pops_needed
> 0)
13166 regs_available_for_popping
|= 1 << reg_containing_return_addr
;
13168 asm_fprintf (f
, "\tmov\t%r, %r\n", LR_REGNUM
,
13169 reg_containing_return_addr
);
13171 reg_containing_return_addr
= LR_REGNUM
;
13174 /* If we have registers left on the stack then pop some more.
13175 We know that at most we will want to pop FP and SP. */
13176 if (pops_needed
> 0)
13181 thumb_pushpop (f
, regs_available_for_popping
, FALSE
, NULL
,
13182 regs_available_for_popping
);
13184 /* We have popped either FP or SP.
13185 Move whichever one it is into the correct register. */
13186 popped_into
= number_of_first_bit_set (regs_available_for_popping
);
13187 move_to
= number_of_first_bit_set (regs_to_pop
);
13189 asm_fprintf (f
, "\tmov\t%r, %r\n", move_to
, popped_into
);
13191 regs_to_pop
&= ~(1 << move_to
);
13196 /* If we still have not popped everything then we must have only
13197 had one register available to us and we are now popping the SP. */
13198 if (pops_needed
> 0)
13202 thumb_pushpop (f
, regs_available_for_popping
, FALSE
, NULL
,
13203 regs_available_for_popping
);
13205 popped_into
= number_of_first_bit_set (regs_available_for_popping
);
13207 asm_fprintf (f
, "\tmov\t%r, %r\n", SP_REGNUM
, popped_into
);
13209 assert (regs_to_pop == (1 << STACK_POINTER))
13210 assert (pops_needed == 1)
13214 /* If necessary restore the a4 register. */
13217 if (reg_containing_return_addr
!= LR_REGNUM
)
13219 asm_fprintf (f
, "\tmov\t%r, %r\n", LR_REGNUM
, LAST_ARG_REGNUM
);
13220 reg_containing_return_addr
= LR_REGNUM
;
13223 asm_fprintf (f
, "\tmov\t%r, %r\n", LAST_ARG_REGNUM
, IP_REGNUM
);
13226 if (current_function_calls_eh_return
)
13227 asm_fprintf (f
, "\tadd\t%r, %r\n", SP_REGNUM
, ARM_EH_STACKADJ_REGNUM
);
13229 /* Return to caller. */
13230 asm_fprintf (f
, "\tbx\t%r\n", reg_containing_return_addr
);
13235 thumb_final_prescan_insn (rtx insn
)
13237 if (flag_print_asm_name
)
13238 asm_fprintf (asm_out_file
, "%@ 0x%04x\n",
13239 INSN_ADDRESSES (INSN_UID (insn
)));
13243 thumb_shiftable_const (unsigned HOST_WIDE_INT val
)
13245 unsigned HOST_WIDE_INT mask
= 0xff;
13248 if (val
== 0) /* XXX */
13251 for (i
= 0; i
< 25; i
++)
13252 if ((val
& (mask
<< i
)) == val
)
13258 /* Returns nonzero if the current function contains,
13259 or might contain a far jump. */
13261 thumb_far_jump_used_p (void)
13265 /* This test is only important for leaf functions. */
13266 /* assert (!leaf_function_p ()); */
13268 /* If we have already decided that far jumps may be used,
13269 do not bother checking again, and always return true even if
13270 it turns out that they are not being used. Once we have made
13271 the decision that far jumps are present (and that hence the link
13272 register will be pushed onto the stack) we cannot go back on it. */
13273 if (cfun
->machine
->far_jump_used
)
13276 /* If this function is not being called from the prologue/epilogue
13277 generation code then it must be being called from the
13278 INITIAL_ELIMINATION_OFFSET macro. */
13279 if (!(ARM_DOUBLEWORD_ALIGN
|| reload_completed
))
13281 /* In this case we know that we are being asked about the elimination
13282 of the arg pointer register. If that register is not being used,
13283 then there are no arguments on the stack, and we do not have to
13284 worry that a far jump might force the prologue to push the link
13285 register, changing the stack offsets. In this case we can just
13286 return false, since the presence of far jumps in the function will
13287 not affect stack offsets.
13289 If the arg pointer is live (or if it was live, but has now been
13290 eliminated and so set to dead) then we do have to test to see if
13291 the function might contain a far jump. This test can lead to some
13292 false negatives, since before reload is completed, then length of
13293 branch instructions is not known, so gcc defaults to returning their
13294 longest length, which in turn sets the far jump attribute to true.
13296 A false negative will not result in bad code being generated, but it
13297 will result in a needless push and pop of the link register. We
13298 hope that this does not occur too often.
13300 If we need doubleword stack alignment this could affect the other
13301 elimination offsets so we can't risk getting it wrong. */
13302 if (regs_ever_live
[ARG_POINTER_REGNUM
])
13303 cfun
->machine
->arg_pointer_live
= 1;
13304 else if (!cfun
->machine
->arg_pointer_live
)
13308 /* Check to see if the function contains a branch
13309 insn with the far jump attribute set. */
13310 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
13312 if (GET_CODE (insn
) == JUMP_INSN
13313 /* Ignore tablejump patterns. */
13314 && GET_CODE (PATTERN (insn
)) != ADDR_VEC
13315 && GET_CODE (PATTERN (insn
)) != ADDR_DIFF_VEC
13316 && get_attr_far_jump (insn
) == FAR_JUMP_YES
13319 /* Record the fact that we have decided that
13320 the function does use far jumps. */
13321 cfun
->machine
->far_jump_used
= 1;
13329 /* Return nonzero if FUNC must be entered in ARM mode. */
13331 is_called_in_ARM_mode (tree func
)
13333 gcc_assert (TREE_CODE (func
) == FUNCTION_DECL
);
13335 /* Ignore the problem about functions whose address is taken. */
13336 if (TARGET_CALLEE_INTERWORKING
&& TREE_PUBLIC (func
))
13340 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func
)) != NULL_TREE
;
13346 /* The bits which aren't usefully expanded as rtl. */
13348 thumb_unexpanded_epilogue (void)
13351 unsigned long live_regs_mask
= 0;
13352 int high_regs_pushed
= 0;
13353 int had_to_push_lr
;
13356 if (return_used_this_function
)
13359 if (IS_NAKED (arm_current_func_type ()))
13362 live_regs_mask
= thumb_compute_save_reg_mask ();
13363 high_regs_pushed
= bit_count (live_regs_mask
& 0x0f00);
13365 /* If we can deduce the registers used from the function's return value.
13366 This is more reliable that examining regs_ever_live[] because that
13367 will be set if the register is ever used in the function, not just if
13368 the register is used to hold a return value. */
13369 size
= arm_size_return_regs ();
13371 /* The prolog may have pushed some high registers to use as
13372 work registers. e.g. the testsuite file:
13373 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
13374 compiles to produce:
13375 push {r4, r5, r6, r7, lr}
13379 as part of the prolog. We have to undo that pushing here. */
13381 if (high_regs_pushed
)
13383 unsigned long mask
= live_regs_mask
& 0xff;
13386 /* The available low registers depend on the size of the value we are
13394 /* Oh dear! We have no low registers into which we can pop
13397 ("no low registers available for popping high registers");
13399 for (next_hi_reg
= 8; next_hi_reg
< 13; next_hi_reg
++)
13400 if (live_regs_mask
& (1 << next_hi_reg
))
13403 while (high_regs_pushed
)
13405 /* Find lo register(s) into which the high register(s) can
13407 for (regno
= 0; regno
<= LAST_LO_REGNUM
; regno
++)
13409 if (mask
& (1 << regno
))
13410 high_regs_pushed
--;
13411 if (high_regs_pushed
== 0)
13415 mask
&= (2 << regno
) - 1; /* A noop if regno == 8 */
13417 /* Pop the values into the low register(s). */
13418 thumb_pushpop (asm_out_file
, mask
, 0, NULL
, mask
);
13420 /* Move the value(s) into the high registers. */
13421 for (regno
= 0; regno
<= LAST_LO_REGNUM
; regno
++)
13423 if (mask
& (1 << regno
))
13425 asm_fprintf (asm_out_file
, "\tmov\t%r, %r\n", next_hi_reg
,
13428 for (next_hi_reg
++; next_hi_reg
< 13; next_hi_reg
++)
13429 if (live_regs_mask
& (1 << next_hi_reg
))
13434 live_regs_mask
&= ~0x0f00;
13437 had_to_push_lr
= (live_regs_mask
& (1 << LR_REGNUM
)) != 0;
13438 live_regs_mask
&= 0xff;
13440 if (current_function_pretend_args_size
== 0 || TARGET_BACKTRACE
)
13442 /* Pop the return address into the PC. */
13443 if (had_to_push_lr
)
13444 live_regs_mask
|= 1 << PC_REGNUM
;
13446 /* Either no argument registers were pushed or a backtrace
13447 structure was created which includes an adjusted stack
13448 pointer, so just pop everything. */
13449 if (live_regs_mask
)
13450 thumb_pushpop (asm_out_file
, live_regs_mask
, FALSE
, NULL
,
13453 /* We have either just popped the return address into the
13454 PC or it is was kept in LR for the entire function. */
13455 if (!had_to_push_lr
)
13456 thumb_exit (asm_out_file
, LR_REGNUM
);
13460 /* Pop everything but the return address. */
13461 if (live_regs_mask
)
13462 thumb_pushpop (asm_out_file
, live_regs_mask
, FALSE
, NULL
,
13465 if (had_to_push_lr
)
13469 /* We have no free low regs, so save one. */
13470 asm_fprintf (asm_out_file
, "\tmov\t%r, %r\n", IP_REGNUM
,
13474 /* Get the return address into a temporary register. */
13475 thumb_pushpop (asm_out_file
, 1 << LAST_ARG_REGNUM
, 0, NULL
,
13476 1 << LAST_ARG_REGNUM
);
13480 /* Move the return address to lr. */
13481 asm_fprintf (asm_out_file
, "\tmov\t%r, %r\n", LR_REGNUM
,
13483 /* Restore the low register. */
13484 asm_fprintf (asm_out_file
, "\tmov\t%r, %r\n", LAST_ARG_REGNUM
,
13489 regno
= LAST_ARG_REGNUM
;
13494 /* Remove the argument registers that were pushed onto the stack. */
13495 asm_fprintf (asm_out_file
, "\tadd\t%r, %r, #%d\n",
13496 SP_REGNUM
, SP_REGNUM
,
13497 current_function_pretend_args_size
);
13499 thumb_exit (asm_out_file
, regno
);
13505 /* Functions to save and restore machine-specific function data. */
13506 static struct machine_function
*
13507 arm_init_machine_status (void)
13509 struct machine_function
*machine
;
13510 machine
= (machine_function
*) ggc_alloc_cleared (sizeof (machine_function
));
13512 #if ARM_FT_UNKNOWN != 0
13513 machine
->func_type
= ARM_FT_UNKNOWN
;
13518 /* Return an RTX indicating where the return address to the
13519 calling function can be found. */
13521 arm_return_addr (int count
, rtx frame ATTRIBUTE_UNUSED
)
13526 return get_hard_reg_initial_val (Pmode
, LR_REGNUM
);
13529 /* Do anything needed before RTL is emitted for each function. */
13531 arm_init_expanders (void)
13533 /* Arrange to initialize and mark the machine per-function status. */
13534 init_machine_status
= arm_init_machine_status
;
13536 /* This is to stop the combine pass optimizing away the alignment
13537 adjustment of va_arg. */
13538 /* ??? It is claimed that this should not be necessary. */
13540 mark_reg_pointer (arg_pointer_rtx
, PARM_BOUNDARY
);
13544 /* Like arm_compute_initial_elimination offset. Simpler because there
13545 isn't an ABI specified frame pointer for Thumb. Instead, we set it
13546 to point at the base of the local variables after static stack
13547 space for a function has been allocated. */
13550 thumb_compute_initial_elimination_offset (unsigned int from
, unsigned int to
)
13552 arm_stack_offsets
*offsets
;
13554 offsets
= arm_get_frame_offsets ();
13558 case ARG_POINTER_REGNUM
:
13561 case STACK_POINTER_REGNUM
:
13562 return offsets
->outgoing_args
- offsets
->saved_args
;
13564 case FRAME_POINTER_REGNUM
:
13565 return offsets
->soft_frame
- offsets
->saved_args
;
13567 case ARM_HARD_FRAME_POINTER_REGNUM
:
13568 return offsets
->saved_regs
- offsets
->saved_args
;
13570 case THUMB_HARD_FRAME_POINTER_REGNUM
:
13571 return offsets
->locals_base
- offsets
->saved_args
;
13574 gcc_unreachable ();
13578 case FRAME_POINTER_REGNUM
:
13581 case STACK_POINTER_REGNUM
:
13582 return offsets
->outgoing_args
- offsets
->soft_frame
;
13584 case ARM_HARD_FRAME_POINTER_REGNUM
:
13585 return offsets
->saved_regs
- offsets
->soft_frame
;
13587 case THUMB_HARD_FRAME_POINTER_REGNUM
:
13588 return offsets
->locals_base
- offsets
->soft_frame
;
13591 gcc_unreachable ();
13596 gcc_unreachable ();
13601 /* Generate the rest of a function's prologue. */
13603 thumb_expand_prologue (void)
13607 HOST_WIDE_INT amount
;
13608 arm_stack_offsets
*offsets
;
13609 unsigned long func_type
;
13611 unsigned long live_regs_mask
;
13613 func_type
= arm_current_func_type ();
13615 /* Naked functions don't have prologues. */
13616 if (IS_NAKED (func_type
))
13619 if (IS_INTERRUPT (func_type
))
13621 error ("interrupt Service Routines cannot be coded in Thumb mode");
13625 live_regs_mask
= thumb_compute_save_reg_mask ();
13626 /* Load the pic register before setting the frame pointer,
13627 so we can use r7 as a temporary work register. */
13629 arm_load_pic_register (live_regs_mask
);
13631 if (!frame_pointer_needed
&& CALLER_INTERWORKING_SLOT_SIZE
> 0)
13632 emit_move_insn (gen_rtx_REG (Pmode
, ARM_HARD_FRAME_POINTER_REGNUM
),
13633 stack_pointer_rtx
);
13635 offsets
= arm_get_frame_offsets ();
13636 amount
= offsets
->outgoing_args
- offsets
->saved_regs
;
13641 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
13642 GEN_INT (- amount
)));
13643 RTX_FRAME_RELATED_P (insn
) = 1;
13649 /* The stack decrement is too big for an immediate value in a single
13650 insn. In theory we could issue multiple subtracts, but after
13651 three of them it becomes more space efficient to place the full
13652 value in the constant pool and load into a register. (Also the
13653 ARM debugger really likes to see only one stack decrement per
13654 function). So instead we look for a scratch register into which
13655 we can load the decrement, and then we subtract this from the
13656 stack pointer. Unfortunately on the thumb the only available
13657 scratch registers are the argument registers, and we cannot use
13658 these as they may hold arguments to the function. Instead we
13659 attempt to locate a call preserved register which is used by this
13660 function. If we can find one, then we know that it will have
13661 been pushed at the start of the prologue and so we can corrupt
13663 for (regno
= LAST_ARG_REGNUM
+ 1; regno
<= LAST_LO_REGNUM
; regno
++)
13664 if (live_regs_mask
& (1 << regno
)
13665 && !(frame_pointer_needed
13666 && (regno
== THUMB_HARD_FRAME_POINTER_REGNUM
)))
13669 if (regno
> LAST_LO_REGNUM
) /* Very unlikely. */
13671 rtx spare
= gen_rtx_REG (SImode
, IP_REGNUM
);
13673 /* Choose an arbitrary, non-argument low register. */
13674 reg
= gen_rtx_REG (SImode
, LAST_LO_REGNUM
);
13676 /* Save it by copying it into a high, scratch register. */
13677 emit_insn (gen_movsi (spare
, reg
));
13678 /* Add a USE to stop propagate_one_insn() from barfing. */
13679 emit_insn (gen_prologue_use (spare
));
13681 /* Decrement the stack. */
13682 emit_insn (gen_movsi (reg
, GEN_INT (- amount
)));
13683 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
,
13684 stack_pointer_rtx
, reg
));
13685 RTX_FRAME_RELATED_P (insn
) = 1;
13686 dwarf
= gen_rtx_SET (SImode
, stack_pointer_rtx
,
13687 plus_constant (stack_pointer_rtx
,
13689 RTX_FRAME_RELATED_P (dwarf
) = 1;
13691 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
13694 /* Restore the low register's original value. */
13695 emit_insn (gen_movsi (reg
, spare
));
13697 /* Emit a USE of the restored scratch register, so that flow
13698 analysis will not consider the restore redundant. The
13699 register won't be used again in this function and isn't
13700 restored by the epilogue. */
13701 emit_insn (gen_prologue_use (reg
));
13705 reg
= gen_rtx_REG (SImode
, regno
);
13707 emit_insn (gen_movsi (reg
, GEN_INT (- amount
)));
13709 insn
= emit_insn (gen_addsi3 (stack_pointer_rtx
,
13710 stack_pointer_rtx
, reg
));
13711 RTX_FRAME_RELATED_P (insn
) = 1;
13712 dwarf
= gen_rtx_SET (SImode
, stack_pointer_rtx
,
13713 plus_constant (stack_pointer_rtx
,
13715 RTX_FRAME_RELATED_P (dwarf
) = 1;
13717 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
13723 if (frame_pointer_needed
)
13725 amount
= offsets
->outgoing_args
- offsets
->locals_base
;
13728 insn
= emit_insn (gen_addsi3 (hard_frame_pointer_rtx
,
13729 stack_pointer_rtx
, GEN_INT (amount
)));
13732 emit_insn (gen_movsi (hard_frame_pointer_rtx
, GEN_INT (amount
)));
13733 insn
= emit_insn (gen_addsi3 (hard_frame_pointer_rtx
,
13734 hard_frame_pointer_rtx
,
13735 stack_pointer_rtx
));
13736 dwarf
= gen_rtx_SET (SImode
, hard_frame_pointer_rtx
,
13737 plus_constant (stack_pointer_rtx
, amount
));
13738 RTX_FRAME_RELATED_P (dwarf
) = 1;
13739 REG_NOTES (insn
) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR
, dwarf
,
13743 RTX_FRAME_RELATED_P (insn
) = 1;
13746 /* If we are profiling, make sure no instructions are scheduled before
13747 the call to mcount. Similarly if the user has requested no
13748 scheduling in the prolog. Similarly if we want non-call exceptions
13749 using the EABI unwinder, to prevent faulting instructions from being
13750 swapped with a stack adjustment. */
13751 if (current_function_profile
|| !TARGET_SCHED_PROLOG
13752 || (ARM_EABI_UNWIND_TABLES
&& flag_non_call_exceptions
))
13753 emit_insn (gen_blockage ());
13755 cfun
->machine
->lr_save_eliminated
= !thumb_force_lr_save ();
13756 if (live_regs_mask
& 0xff)
13757 cfun
->machine
->lr_save_eliminated
= 0;
13759 /* If the link register is being kept alive, with the return address in it,
13760 then make sure that it does not get reused by the ce2 pass. */
13761 if (cfun
->machine
->lr_save_eliminated
)
13762 emit_insn (gen_prologue_use (gen_rtx_REG (SImode
, LR_REGNUM
)));
13767 thumb_expand_epilogue (void)
13769 HOST_WIDE_INT amount
;
13770 arm_stack_offsets
*offsets
;
13773 /* Naked functions don't have prologues. */
13774 if (IS_NAKED (arm_current_func_type ()))
13777 offsets
= arm_get_frame_offsets ();
13778 amount
= offsets
->outgoing_args
- offsets
->saved_regs
;
13780 if (frame_pointer_needed
)
13782 emit_insn (gen_movsi (stack_pointer_rtx
, hard_frame_pointer_rtx
));
13783 amount
= offsets
->locals_base
- offsets
->saved_regs
;
13786 gcc_assert (amount
>= 0);
13790 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
,
13791 GEN_INT (amount
)));
13794 /* r3 is always free in the epilogue. */
13795 rtx reg
= gen_rtx_REG (SImode
, LAST_ARG_REGNUM
);
13797 emit_insn (gen_movsi (reg
, GEN_INT (amount
)));
13798 emit_insn (gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, reg
));
13802 /* Emit a USE (stack_pointer_rtx), so that
13803 the stack adjustment will not be deleted. */
13804 emit_insn (gen_prologue_use (stack_pointer_rtx
));
13806 if (current_function_profile
|| !TARGET_SCHED_PROLOG
)
13807 emit_insn (gen_blockage ());
13809 /* Emit a clobber for each insn that will be restored in the epilogue,
13810 so that flow2 will get register lifetimes correct. */
13811 for (regno
= 0; regno
< 13; regno
++)
13812 if (regs_ever_live
[regno
] && !call_used_regs
[regno
])
13813 emit_insn (gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, regno
)));
13815 if (! regs_ever_live
[LR_REGNUM
])
13816 emit_insn (gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, LR_REGNUM
)));
13820 thumb_output_function_prologue (FILE *f
, HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
13822 unsigned long live_regs_mask
= 0;
13823 unsigned long l_mask
;
13824 unsigned high_regs_pushed
= 0;
13825 int cfa_offset
= 0;
13828 if (IS_NAKED (arm_current_func_type ()))
13831 if (is_called_in_ARM_mode (current_function_decl
))
13835 gcc_assert (GET_CODE (DECL_RTL (current_function_decl
)) == MEM
);
13836 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl
), 0))
13838 name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
13840 /* Generate code sequence to switch us into Thumb mode. */
13841 /* The .code 32 directive has already been emitted by
13842 ASM_DECLARE_FUNCTION_NAME. */
13843 asm_fprintf (f
, "\torr\t%r, %r, #1\n", IP_REGNUM
, PC_REGNUM
);
13844 asm_fprintf (f
, "\tbx\t%r\n", IP_REGNUM
);
13846 /* Generate a label, so that the debugger will notice the
13847 change in instruction sets. This label is also used by
13848 the assembler to bypass the ARM code when this function
13849 is called from a Thumb encoded function elsewhere in the
13850 same file. Hence the definition of STUB_NAME here must
13851 agree with the definition in gas/config/tc-arm.c. */
13853 #define STUB_NAME ".real_start_of"
13855 fprintf (f
, "\t.code\t16\n");
13857 if (arm_dllexport_name_p (name
))
13858 name
= arm_strip_name_encoding (name
);
13860 asm_fprintf (f
, "\t.globl %s%U%s\n", STUB_NAME
, name
);
13861 fprintf (f
, "\t.thumb_func\n");
13862 asm_fprintf (f
, "%s%U%s:\n", STUB_NAME
, name
);
13865 if (current_function_pretend_args_size
)
13867 /* Output unwind directive for the stack adjustment. */
13868 if (ARM_EABI_UNWIND_TABLES
)
13869 fprintf (f
, "\t.pad #%d\n",
13870 current_function_pretend_args_size
);
13872 if (cfun
->machine
->uses_anonymous_args
)
13876 fprintf (f
, "\tpush\t{");
13878 num_pushes
= ARM_NUM_INTS (current_function_pretend_args_size
);
13880 for (regno
= LAST_ARG_REGNUM
+ 1 - num_pushes
;
13881 regno
<= LAST_ARG_REGNUM
;
13883 asm_fprintf (f
, "%r%s", regno
,
13884 regno
== LAST_ARG_REGNUM
? "" : ", ");
13886 fprintf (f
, "}\n");
13889 asm_fprintf (f
, "\tsub\t%r, %r, #%d\n",
13890 SP_REGNUM
, SP_REGNUM
,
13891 current_function_pretend_args_size
);
13893 /* We don't need to record the stores for unwinding (would it
13894 help the debugger any if we did?), but record the change in
13895 the stack pointer. */
13896 if (dwarf2out_do_frame ())
13898 char *l
= dwarf2out_cfi_label ();
13900 cfa_offset
= cfa_offset
+ current_function_pretend_args_size
;
13901 dwarf2out_def_cfa (l
, SP_REGNUM
, cfa_offset
);
13905 /* Get the registers we are going to push. */
13906 live_regs_mask
= thumb_compute_save_reg_mask ();
13907 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
13908 l_mask
= live_regs_mask
& 0x40ff;
13909 /* Then count how many other high registers will need to be pushed. */
13910 high_regs_pushed
= bit_count (live_regs_mask
& 0x0f00);
13912 if (TARGET_BACKTRACE
)
13915 unsigned work_register
;
13917 /* We have been asked to create a stack backtrace structure.
13918 The code looks like this:
13922 0 sub SP, #16 Reserve space for 4 registers.
13923 2 push {R7} Push low registers.
13924 4 add R7, SP, #20 Get the stack pointer before the push.
13925 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
13926 8 mov R7, PC Get hold of the start of this code plus 12.
13927 10 str R7, [SP, #16] Store it.
13928 12 mov R7, FP Get hold of the current frame pointer.
13929 14 str R7, [SP, #4] Store it.
13930 16 mov R7, LR Get hold of the current return address.
13931 18 str R7, [SP, #12] Store it.
13932 20 add R7, SP, #16 Point at the start of the backtrace structure.
13933 22 mov FP, R7 Put this value into the frame pointer. */
13935 work_register
= thumb_find_work_register (live_regs_mask
);
13937 if (ARM_EABI_UNWIND_TABLES
)
13938 asm_fprintf (f
, "\t.pad #16\n");
13941 (f
, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
13942 SP_REGNUM
, SP_REGNUM
);
13944 if (dwarf2out_do_frame ())
13946 char *l
= dwarf2out_cfi_label ();
13948 cfa_offset
= cfa_offset
+ 16;
13949 dwarf2out_def_cfa (l
, SP_REGNUM
, cfa_offset
);
13954 thumb_pushpop (f
, l_mask
, 1, &cfa_offset
, l_mask
);
13955 offset
= bit_count (l_mask
) * UNITS_PER_WORD
;
13960 asm_fprintf (f
, "\tadd\t%r, %r, #%d\n", work_register
, SP_REGNUM
,
13961 offset
+ 16 + current_function_pretend_args_size
);
13963 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13966 /* Make sure that the instruction fetching the PC is in the right place
13967 to calculate "start of backtrace creation code + 12". */
13970 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
, PC_REGNUM
);
13971 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13973 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
,
13974 ARM_HARD_FRAME_POINTER_REGNUM
);
13975 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13980 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
,
13981 ARM_HARD_FRAME_POINTER_REGNUM
);
13982 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13984 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
, PC_REGNUM
);
13985 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13989 asm_fprintf (f
, "\tmov\t%r, %r\n", work_register
, LR_REGNUM
);
13990 asm_fprintf (f
, "\tstr\t%r, [%r, #%d]\n", work_register
, SP_REGNUM
,
13992 asm_fprintf (f
, "\tadd\t%r, %r, #%d\n", work_register
, SP_REGNUM
,
13994 asm_fprintf (f
, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
13995 ARM_HARD_FRAME_POINTER_REGNUM
, work_register
);
13997 /* Optimization: If we are not pushing any low registers but we are going
13998 to push some high registers then delay our first push. This will just
13999 be a push of LR and we can combine it with the push of the first high
14001 else if ((l_mask
& 0xff) != 0
14002 || (high_regs_pushed
== 0 && l_mask
))
14003 thumb_pushpop (f
, l_mask
, 1, &cfa_offset
, l_mask
);
14005 if (high_regs_pushed
)
14007 unsigned pushable_regs
;
14008 unsigned next_hi_reg
;
14010 for (next_hi_reg
= 12; next_hi_reg
> LAST_LO_REGNUM
; next_hi_reg
--)
14011 if (live_regs_mask
& (1 << next_hi_reg
))
14014 pushable_regs
= l_mask
& 0xff;
14016 if (pushable_regs
== 0)
14017 pushable_regs
= 1 << thumb_find_work_register (live_regs_mask
);
14019 while (high_regs_pushed
> 0)
14021 unsigned long real_regs_mask
= 0;
14023 for (regno
= LAST_LO_REGNUM
; regno
>= 0; regno
--)
14025 if (pushable_regs
& (1 << regno
))
14027 asm_fprintf (f
, "\tmov\t%r, %r\n", regno
, next_hi_reg
);
14029 high_regs_pushed
--;
14030 real_regs_mask
|= (1 << next_hi_reg
);
14032 if (high_regs_pushed
)
14034 for (next_hi_reg
--; next_hi_reg
> LAST_LO_REGNUM
;
14036 if (live_regs_mask
& (1 << next_hi_reg
))
14041 pushable_regs
&= ~((1 << regno
) - 1);
14047 /* If we had to find a work register and we have not yet
14048 saved the LR then add it to the list of regs to push. */
14049 if (l_mask
== (1 << LR_REGNUM
))
14051 thumb_pushpop (f
, pushable_regs
| (1 << LR_REGNUM
),
14053 real_regs_mask
| (1 << LR_REGNUM
));
14057 thumb_pushpop (f
, pushable_regs
, 1, &cfa_offset
, real_regs_mask
);
14062 /* Handle the case of a double word load into a low register from
14063 a computed memory address. The computed address may involve a
14064 register which is overwritten by the load. */
14066 thumb_load_double_from_address (rtx
*operands
)
14074 gcc_assert (GET_CODE (operands
[0]) == REG
);
14075 gcc_assert (GET_CODE (operands
[1]) == MEM
);
14077 /* Get the memory address. */
14078 addr
= XEXP (operands
[1], 0);
14080 /* Work out how the memory address is computed. */
14081 switch (GET_CODE (addr
))
14084 operands
[2] = adjust_address (operands
[1], SImode
, 4);
14086 if (REGNO (operands
[0]) == REGNO (addr
))
14088 output_asm_insn ("ldr\t%H0, %2", operands
);
14089 output_asm_insn ("ldr\t%0, %1", operands
);
14093 output_asm_insn ("ldr\t%0, %1", operands
);
14094 output_asm_insn ("ldr\t%H0, %2", operands
);
14099 /* Compute <address> + 4 for the high order load. */
14100 operands
[2] = adjust_address (operands
[1], SImode
, 4);
14102 output_asm_insn ("ldr\t%0, %1", operands
);
14103 output_asm_insn ("ldr\t%H0, %2", operands
);
14107 arg1
= XEXP (addr
, 0);
14108 arg2
= XEXP (addr
, 1);
14110 if (CONSTANT_P (arg1
))
14111 base
= arg2
, offset
= arg1
;
14113 base
= arg1
, offset
= arg2
;
14115 gcc_assert (GET_CODE (base
) == REG
);
14117 /* Catch the case of <address> = <reg> + <reg> */
14118 if (GET_CODE (offset
) == REG
)
14120 int reg_offset
= REGNO (offset
);
14121 int reg_base
= REGNO (base
);
14122 int reg_dest
= REGNO (operands
[0]);
14124 /* Add the base and offset registers together into the
14125 higher destination register. */
14126 asm_fprintf (asm_out_file
, "\tadd\t%r, %r, %r",
14127 reg_dest
+ 1, reg_base
, reg_offset
);
14129 /* Load the lower destination register from the address in
14130 the higher destination register. */
14131 asm_fprintf (asm_out_file
, "\tldr\t%r, [%r, #0]",
14132 reg_dest
, reg_dest
+ 1);
14134 /* Load the higher destination register from its own address
14136 asm_fprintf (asm_out_file
, "\tldr\t%r, [%r, #4]",
14137 reg_dest
+ 1, reg_dest
+ 1);
14141 /* Compute <address> + 4 for the high order load. */
14142 operands
[2] = adjust_address (operands
[1], SImode
, 4);
14144 /* If the computed address is held in the low order register
14145 then load the high order register first, otherwise always
14146 load the low order register first. */
14147 if (REGNO (operands
[0]) == REGNO (base
))
14149 output_asm_insn ("ldr\t%H0, %2", operands
);
14150 output_asm_insn ("ldr\t%0, %1", operands
);
14154 output_asm_insn ("ldr\t%0, %1", operands
);
14155 output_asm_insn ("ldr\t%H0, %2", operands
);
14161 /* With no registers to worry about we can just load the value
14163 operands
[2] = adjust_address (operands
[1], SImode
, 4);
14165 output_asm_insn ("ldr\t%H0, %2", operands
);
14166 output_asm_insn ("ldr\t%0, %1", operands
);
14170 gcc_unreachable ();
14177 thumb_output_move_mem_multiple (int n
, rtx
*operands
)
14184 if (REGNO (operands
[4]) > REGNO (operands
[5]))
14187 operands
[4] = operands
[5];
14190 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands
);
14191 output_asm_insn ("stmia\t%0!, {%4, %5}", operands
);
14195 if (REGNO (operands
[4]) > REGNO (operands
[5]))
14198 operands
[4] = operands
[5];
14201 if (REGNO (operands
[5]) > REGNO (operands
[6]))
14204 operands
[5] = operands
[6];
14207 if (REGNO (operands
[4]) > REGNO (operands
[5]))
14210 operands
[4] = operands
[5];
14214 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands
);
14215 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands
);
14219 gcc_unreachable ();
14225 /* Output a call-via instruction for thumb state. */
14227 thumb_call_via_reg (rtx reg
)
14229 int regno
= REGNO (reg
);
14232 gcc_assert (regno
< LR_REGNUM
);
14234 /* If we are in the normal text section we can use a single instance
14235 per compilation unit. If we are doing function sections, then we need
14236 an entry per section, since we can't rely on reachability. */
14237 if (in_text_section ())
14239 thumb_call_reg_needed
= 1;
14241 if (thumb_call_via_label
[regno
] == NULL
)
14242 thumb_call_via_label
[regno
] = gen_label_rtx ();
14243 labelp
= thumb_call_via_label
+ regno
;
14247 if (cfun
->machine
->call_via
[regno
] == NULL
)
14248 cfun
->machine
->call_via
[regno
] = gen_label_rtx ();
14249 labelp
= cfun
->machine
->call_via
+ regno
;
14252 output_asm_insn ("bl\t%a0", labelp
);
14256 /* Routines for generating rtl. */
14258 thumb_expand_movmemqi (rtx
*operands
)
14260 rtx out
= copy_to_mode_reg (SImode
, XEXP (operands
[0], 0));
14261 rtx in
= copy_to_mode_reg (SImode
, XEXP (operands
[1], 0));
14262 HOST_WIDE_INT len
= INTVAL (operands
[2]);
14263 HOST_WIDE_INT offset
= 0;
14267 emit_insn (gen_movmem12b (out
, in
, out
, in
));
14273 emit_insn (gen_movmem8b (out
, in
, out
, in
));
14279 rtx reg
= gen_reg_rtx (SImode
);
14280 emit_insn (gen_movsi (reg
, gen_rtx_MEM (SImode
, in
)));
14281 emit_insn (gen_movsi (gen_rtx_MEM (SImode
, out
), reg
));
14288 rtx reg
= gen_reg_rtx (HImode
);
14289 emit_insn (gen_movhi (reg
, gen_rtx_MEM (HImode
,
14290 plus_constant (in
, offset
))));
14291 emit_insn (gen_movhi (gen_rtx_MEM (HImode
, plus_constant (out
, offset
)),
14299 rtx reg
= gen_reg_rtx (QImode
);
14300 emit_insn (gen_movqi (reg
, gen_rtx_MEM (QImode
,
14301 plus_constant (in
, offset
))));
14302 emit_insn (gen_movqi (gen_rtx_MEM (QImode
, plus_constant (out
, offset
)),
14308 thumb_reload_out_hi (rtx
*operands
)
14310 emit_insn (gen_thumb_movhi_clobber (operands
[0], operands
[1], operands
[2]));
14313 /* Handle reading a half-word from memory during reload. */
14315 thumb_reload_in_hi (rtx
*operands ATTRIBUTE_UNUSED
)
14317 gcc_unreachable ();
14320 /* Return the length of a function name prefix
14321 that starts with the character 'c'. */
14323 arm_get_strip_length (int c
)
14327 ARM_NAME_ENCODING_LENGTHS
14332 /* Return a pointer to a function's name with any
14333 and all prefix encodings stripped from it. */
14335 arm_strip_name_encoding (const char *name
)
14339 while ((skip
= arm_get_strip_length (* name
)))
14345 /* If there is a '*' anywhere in the name's prefix, then
14346 emit the stripped name verbatim, otherwise prepend an
14347 underscore if leading underscores are being used. */
14349 arm_asm_output_labelref (FILE *stream
, const char *name
)
14354 while ((skip
= arm_get_strip_length (* name
)))
14356 verbatim
|= (*name
== '*');
14361 fputs (name
, stream
);
14363 asm_fprintf (stream
, "%U%s", name
);
14367 arm_file_end (void)
14371 if (! thumb_call_reg_needed
)
14375 asm_fprintf (asm_out_file
, "\t.code 16\n");
14376 ASM_OUTPUT_ALIGN (asm_out_file
, 1);
14378 for (regno
= 0; regno
< LR_REGNUM
; regno
++)
14380 rtx label
= thumb_call_via_label
[regno
];
14384 targetm
.asm_out
.internal_label (asm_out_file
, "L",
14385 CODE_LABEL_NUMBER (label
));
14386 asm_fprintf (asm_out_file
, "\tbx\t%r\n", regno
);
14393 #ifdef AOF_ASSEMBLER
14394 /* Special functions only needed when producing AOF syntax assembler. */
14398 struct pic_chain
* next
;
14399 const char * symname
;
14402 static struct pic_chain
* aof_pic_chain
= NULL
;
14405 aof_pic_entry (rtx x
)
14407 struct pic_chain
** chainp
;
14410 if (aof_pic_label
== NULL_RTX
)
14412 aof_pic_label
= gen_rtx_SYMBOL_REF (Pmode
, "x$adcons");
14415 for (offset
= 0, chainp
= &aof_pic_chain
; *chainp
;
14416 offset
+= 4, chainp
= &(*chainp
)->next
)
14417 if ((*chainp
)->symname
== XSTR (x
, 0))
14418 return plus_constant (aof_pic_label
, offset
);
14420 *chainp
= (struct pic_chain
*) xmalloc (sizeof (struct pic_chain
));
14421 (*chainp
)->next
= NULL
;
14422 (*chainp
)->symname
= XSTR (x
, 0);
14423 return plus_constant (aof_pic_label
, offset
);
14427 aof_dump_pic_table (FILE *f
)
14429 struct pic_chain
* chain
;
14431 if (aof_pic_chain
== NULL
)
14434 asm_fprintf (f
, "\tAREA |%r$$adcons|, BASED %r\n",
14435 PIC_OFFSET_TABLE_REGNUM
,
14436 PIC_OFFSET_TABLE_REGNUM
);
14437 fputs ("|x$adcons|\n", f
);
14439 for (chain
= aof_pic_chain
; chain
; chain
= chain
->next
)
14441 fputs ("\tDCD\t", f
);
14442 assemble_name (f
, chain
->symname
);
14447 int arm_text_section_count
= 1;
14450 aof_text_section (void )
14452 static char buf
[100];
14453 sprintf (buf
, "\tAREA |C$$code%d|, CODE, READONLY",
14454 arm_text_section_count
++);
14456 strcat (buf
, ", PIC, REENTRANT");
14460 static int arm_data_section_count
= 1;
14463 aof_data_section (void)
14465 static char buf
[100];
14466 sprintf (buf
, "\tAREA |C$$data%d|, DATA", arm_data_section_count
++);
14470 /* The AOF assembler is religiously strict about declarations of
14471 imported and exported symbols, so that it is impossible to declare
14472 a function as imported near the beginning of the file, and then to
14473 export it later on. It is, however, possible to delay the decision
14474 until all the functions in the file have been compiled. To get
14475 around this, we maintain a list of the imports and exports, and
14476 delete from it any that are subsequently defined. At the end of
14477 compilation we spit the remainder of the list out before the END
14482 struct import
* next
;
14486 static struct import
* imports_list
= NULL
;
14489 aof_add_import (const char *name
)
14491 struct import
* new;
14493 for (new = imports_list
; new; new = new->next
)
14494 if (new->name
== name
)
14497 new = (struct import
*) xmalloc (sizeof (struct import
));
14498 new->next
= imports_list
;
14499 imports_list
= new;
14504 aof_delete_import (const char *name
)
14506 struct import
** old
;
14508 for (old
= &imports_list
; *old
; old
= & (*old
)->next
)
14510 if ((*old
)->name
== name
)
14512 *old
= (*old
)->next
;
14518 int arm_main_function
= 0;
14521 aof_dump_imports (FILE *f
)
14523 /* The AOF assembler needs this to cause the startup code to be extracted
14524 from the library. Brining in __main causes the whole thing to work
14526 if (arm_main_function
)
14529 fputs ("\tIMPORT __main\n", f
);
14530 fputs ("\tDCD __main\n", f
);
14533 /* Now dump the remaining imports. */
14534 while (imports_list
)
14536 fprintf (f
, "\tIMPORT\t");
14537 assemble_name (f
, imports_list
->name
);
14539 imports_list
= imports_list
->next
;
14544 aof_globalize_label (FILE *stream
, const char *name
)
14546 default_globalize_label (stream
, name
);
14547 if (! strcmp (name
, "main"))
14548 arm_main_function
= 1;
14552 aof_file_start (void)
14554 fputs ("__r0\tRN\t0\n", asm_out_file
);
14555 fputs ("__a1\tRN\t0\n", asm_out_file
);
14556 fputs ("__a2\tRN\t1\n", asm_out_file
);
14557 fputs ("__a3\tRN\t2\n", asm_out_file
);
14558 fputs ("__a4\tRN\t3\n", asm_out_file
);
14559 fputs ("__v1\tRN\t4\n", asm_out_file
);
14560 fputs ("__v2\tRN\t5\n", asm_out_file
);
14561 fputs ("__v3\tRN\t6\n", asm_out_file
);
14562 fputs ("__v4\tRN\t7\n", asm_out_file
);
14563 fputs ("__v5\tRN\t8\n", asm_out_file
);
14564 fputs ("__v6\tRN\t9\n", asm_out_file
);
14565 fputs ("__sl\tRN\t10\n", asm_out_file
);
14566 fputs ("__fp\tRN\t11\n", asm_out_file
);
14567 fputs ("__ip\tRN\t12\n", asm_out_file
);
14568 fputs ("__sp\tRN\t13\n", asm_out_file
);
14569 fputs ("__lr\tRN\t14\n", asm_out_file
);
14570 fputs ("__pc\tRN\t15\n", asm_out_file
);
14571 fputs ("__f0\tFN\t0\n", asm_out_file
);
14572 fputs ("__f1\tFN\t1\n", asm_out_file
);
14573 fputs ("__f2\tFN\t2\n", asm_out_file
);
14574 fputs ("__f3\tFN\t3\n", asm_out_file
);
14575 fputs ("__f4\tFN\t4\n", asm_out_file
);
14576 fputs ("__f5\tFN\t5\n", asm_out_file
);
14577 fputs ("__f6\tFN\t6\n", asm_out_file
);
14578 fputs ("__f7\tFN\t7\n", asm_out_file
);
14583 aof_file_end (void)
14586 aof_dump_pic_table (asm_out_file
);
14588 aof_dump_imports (asm_out_file
);
14589 fputs ("\tEND\n", asm_out_file
);
14591 #endif /* AOF_ASSEMBLER */
14594 /* Symbols in the text segment can be accessed without indirecting via the
14595 constant pool; it may take an extra binary operation, but this is still
14596 faster than indirecting via memory. Don't do this when not optimizing,
14597 since we won't be calculating al of the offsets necessary to do this
14601 arm_encode_section_info (tree decl
, rtx rtl
, int first
)
14603 /* This doesn't work with AOF syntax, since the string table may be in
14604 a different AREA. */
14605 #ifndef AOF_ASSEMBLER
14606 if (optimize
> 0 && TREE_CONSTANT (decl
))
14607 SYMBOL_REF_FLAG (XEXP (rtl
, 0)) = 1;
14610 /* If we are referencing a function that is weak then encode a long call
14611 flag in the function name, otherwise if the function is static or
14612 or known to be defined in this file then encode a short call flag. */
14613 if (first
&& DECL_P (decl
))
14615 if (TREE_CODE (decl
) == FUNCTION_DECL
&& DECL_WEAK (decl
))
14616 arm_encode_call_attribute (decl
, LONG_CALL_FLAG_CHAR
);
14617 else if (! TREE_PUBLIC (decl
))
14618 arm_encode_call_attribute (decl
, SHORT_CALL_FLAG_CHAR
);
14621 default_encode_section_info (decl
, rtl
, first
);
14623 #endif /* !ARM_PE */
14626 arm_internal_label (FILE *stream
, const char *prefix
, unsigned long labelno
)
14628 if (arm_ccfsm_state
== 3 && (unsigned) arm_target_label
== labelno
14629 && !strcmp (prefix
, "L"))
14631 arm_ccfsm_state
= 0;
14632 arm_target_insn
= NULL
;
14634 default_internal_label (stream
, prefix
, labelno
);
14637 /* Output code to add DELTA to the first argument, and then jump
14638 to FUNCTION. Used for C++ multiple inheritance. */
14640 arm_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
14641 HOST_WIDE_INT delta
,
14642 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
14645 static int thunk_label
= 0;
14648 int mi_delta
= delta
;
14649 const char *const mi_op
= mi_delta
< 0 ? "sub" : "add";
14651 int this_regno
= (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
)
14654 mi_delta
= - mi_delta
;
14657 int labelno
= thunk_label
++;
14658 ASM_GENERATE_INTERNAL_LABEL (label
, "LTHUMBFUNC", labelno
);
14659 fputs ("\tldr\tr12, ", file
);
14660 assemble_name (file
, label
);
14661 fputc ('\n', file
);
14664 /* If we are generating PIC, the ldr instruction below loads
14665 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
14666 the address of the add + 8, so we have:
14668 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
14671 Note that we have "+ 1" because some versions of GNU ld
14672 don't set the low bit of the result for R_ARM_REL32
14673 relocations against thumb function symbols. */
14674 ASM_GENERATE_INTERNAL_LABEL (labelpc
, "LTHUNKPC", labelno
);
14675 assemble_name (file
, labelpc
);
14676 fputs (":\n", file
);
14677 fputs ("\tadd\tr12, pc, r12\n", file
);
14680 while (mi_delta
!= 0)
14682 if ((mi_delta
& (3 << shift
)) == 0)
14686 asm_fprintf (file
, "\t%s\t%r, %r, #%d\n",
14687 mi_op
, this_regno
, this_regno
,
14688 mi_delta
& (0xff << shift
));
14689 mi_delta
&= ~(0xff << shift
);
14695 fprintf (file
, "\tbx\tr12\n");
14696 ASM_OUTPUT_ALIGN (file
, 2);
14697 assemble_name (file
, label
);
14698 fputs (":\n", file
);
14701 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
14702 rtx tem
= XEXP (DECL_RTL (function
), 0);
14703 tem
= gen_rtx_PLUS (GET_MODE (tem
), tem
, GEN_INT (-7));
14704 tem
= gen_rtx_MINUS (GET_MODE (tem
),
14706 gen_rtx_SYMBOL_REF (Pmode
,
14707 ggc_strdup (labelpc
)));
14708 assemble_integer (tem
, 4, BITS_PER_WORD
, 1);
14711 /* Output ".word .LTHUNKn". */
14712 assemble_integer (XEXP (DECL_RTL (function
), 0), 4, BITS_PER_WORD
, 1);
14716 fputs ("\tb\t", file
);
14717 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
14718 if (NEED_PLT_RELOC
)
14719 fputs ("(PLT)", file
);
14720 fputc ('\n', file
);
14725 arm_emit_vector_const (FILE *file
, rtx x
)
14728 const char * pattern
;
14730 gcc_assert (GET_CODE (x
) == CONST_VECTOR
);
14732 switch (GET_MODE (x
))
14734 case V2SImode
: pattern
= "%08x"; break;
14735 case V4HImode
: pattern
= "%04x"; break;
14736 case V8QImode
: pattern
= "%02x"; break;
14737 default: gcc_unreachable ();
14740 fprintf (file
, "0x");
14741 for (i
= CONST_VECTOR_NUNITS (x
); i
--;)
14745 element
= CONST_VECTOR_ELT (x
, i
);
14746 fprintf (file
, pattern
, INTVAL (element
));
14753 arm_output_load_gr (rtx
*operands
)
14760 if (GET_CODE (operands
[1]) != MEM
14761 || GET_CODE (sum
= XEXP (operands
[1], 0)) != PLUS
14762 || GET_CODE (reg
= XEXP (sum
, 0)) != REG
14763 || GET_CODE (offset
= XEXP (sum
, 1)) != CONST_INT
14764 || ((INTVAL (offset
) < 1024) && (INTVAL (offset
) > -1024)))
14765 return "wldrw%?\t%0, %1";
14767 /* Fix up an out-of-range load of a GR register. */
14768 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg
);
14769 wcgr
= operands
[0];
14771 output_asm_insn ("ldr%?\t%0, %1", operands
);
14773 operands
[0] = wcgr
;
14775 output_asm_insn ("tmcr%?\t%0, %1", operands
);
14776 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg
);
14782 arm_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
14783 int incoming ATTRIBUTE_UNUSED
)
14786 /* FIXME: The ARM backend has special code to handle structure
14787 returns, and will reserve its own hidden first argument. So
14788 if this macro is enabled a *second* hidden argument will be
14789 reserved, which will break binary compatibility with old
14790 toolchains and also thunk handling. One day this should be
14794 /* Register in which address to store a structure value
14795 is passed to a function. */
14796 return gen_rtx_REG (Pmode
, ARG_REGISTER (1));
14800 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
14802 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
14803 named arg and all anonymous args onto the stack.
14804 XXX I know the prologue shouldn't be pushing registers, but it is faster
14808 arm_setup_incoming_varargs (CUMULATIVE_ARGS
*cum
,
14809 enum machine_mode mode ATTRIBUTE_UNUSED
,
14810 tree type ATTRIBUTE_UNUSED
,
14812 int second_time ATTRIBUTE_UNUSED
)
14814 cfun
->machine
->uses_anonymous_args
= 1;
14815 if (cum
->nregs
< NUM_ARG_REGS
)
14816 *pretend_size
= (NUM_ARG_REGS
- cum
->nregs
) * UNITS_PER_WORD
;
14819 /* Return nonzero if the CONSUMER instruction (a store) does not need
14820 PRODUCER's value to calculate the address. */
14823 arm_no_early_store_addr_dep (rtx producer
, rtx consumer
)
14825 rtx value
= PATTERN (producer
);
14826 rtx addr
= PATTERN (consumer
);
14828 if (GET_CODE (value
) == COND_EXEC
)
14829 value
= COND_EXEC_CODE (value
);
14830 if (GET_CODE (value
) == PARALLEL
)
14831 value
= XVECEXP (value
, 0, 0);
14832 value
= XEXP (value
, 0);
14833 if (GET_CODE (addr
) == COND_EXEC
)
14834 addr
= COND_EXEC_CODE (addr
);
14835 if (GET_CODE (addr
) == PARALLEL
)
14836 addr
= XVECEXP (addr
, 0, 0);
14837 addr
= XEXP (addr
, 0);
14839 return !reg_overlap_mentioned_p (value
, addr
);
14842 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14843 have an early register shift value or amount dependency on the
14844 result of PRODUCER. */
14847 arm_no_early_alu_shift_dep (rtx producer
, rtx consumer
)
14849 rtx value
= PATTERN (producer
);
14850 rtx op
= PATTERN (consumer
);
14853 if (GET_CODE (value
) == COND_EXEC
)
14854 value
= COND_EXEC_CODE (value
);
14855 if (GET_CODE (value
) == PARALLEL
)
14856 value
= XVECEXP (value
, 0, 0);
14857 value
= XEXP (value
, 0);
14858 if (GET_CODE (op
) == COND_EXEC
)
14859 op
= COND_EXEC_CODE (op
);
14860 if (GET_CODE (op
) == PARALLEL
)
14861 op
= XVECEXP (op
, 0, 0);
14864 early_op
= XEXP (op
, 0);
14865 /* This is either an actual independent shift, or a shift applied to
14866 the first operand of another operation. We want the whole shift
14868 if (GET_CODE (early_op
) == REG
)
14871 return !reg_overlap_mentioned_p (value
, early_op
);
14874 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
14875 have an early register shift value dependency on the result of
14879 arm_no_early_alu_shift_value_dep (rtx producer
, rtx consumer
)
14881 rtx value
= PATTERN (producer
);
14882 rtx op
= PATTERN (consumer
);
14885 if (GET_CODE (value
) == COND_EXEC
)
14886 value
= COND_EXEC_CODE (value
);
14887 if (GET_CODE (value
) == PARALLEL
)
14888 value
= XVECEXP (value
, 0, 0);
14889 value
= XEXP (value
, 0);
14890 if (GET_CODE (op
) == COND_EXEC
)
14891 op
= COND_EXEC_CODE (op
);
14892 if (GET_CODE (op
) == PARALLEL
)
14893 op
= XVECEXP (op
, 0, 0);
14896 early_op
= XEXP (op
, 0);
14898 /* This is either an actual independent shift, or a shift applied to
14899 the first operand of another operation. We want the value being
14900 shifted, in either case. */
14901 if (GET_CODE (early_op
) != REG
)
14902 early_op
= XEXP (early_op
, 0);
14904 return !reg_overlap_mentioned_p (value
, early_op
);
14907 /* Return nonzero if the CONSUMER (a mul or mac op) does not
14908 have an early register mult dependency on the result of
14912 arm_no_early_mul_dep (rtx producer
, rtx consumer
)
14914 rtx value
= PATTERN (producer
);
14915 rtx op
= PATTERN (consumer
);
14917 if (GET_CODE (value
) == COND_EXEC
)
14918 value
= COND_EXEC_CODE (value
);
14919 if (GET_CODE (value
) == PARALLEL
)
14920 value
= XVECEXP (value
, 0, 0);
14921 value
= XEXP (value
, 0);
14922 if (GET_CODE (op
) == COND_EXEC
)
14923 op
= COND_EXEC_CODE (op
);
14924 if (GET_CODE (op
) == PARALLEL
)
14925 op
= XVECEXP (op
, 0, 0);
14928 return (GET_CODE (op
) == PLUS
14929 && !reg_overlap_mentioned_p (value
, XEXP (op
, 0)));
14933 /* We can't rely on the caller doing the proper promotion when
14934 using APCS or ATPCS. */
14937 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED
)
14939 return !TARGET_AAPCS_BASED
;
14943 /* AAPCS based ABIs use short enums by default. */
14946 arm_default_short_enums (void)
14948 return TARGET_AAPCS_BASED
&& arm_abi
!= ARM_ABI_AAPCS_LINUX
;
14952 /* AAPCS requires that anonymous bitfields affect structure alignment. */
14955 arm_align_anon_bitfield (void)
14957 return TARGET_AAPCS_BASED
;
14961 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
14964 arm_cxx_guard_type (void)
14966 return TARGET_AAPCS_BASED
? integer_type_node
: long_long_integer_type_node
;
14970 /* The EABI says test the least significant bit of a guard variable. */
14973 arm_cxx_guard_mask_bit (void)
14975 return TARGET_AAPCS_BASED
;
14979 /* The EABI specifies that all array cookies are 8 bytes long. */
14982 arm_get_cookie_size (tree type
)
14986 if (!TARGET_AAPCS_BASED
)
14987 return default_cxx_get_cookie_size (type
);
14989 size
= build_int_cst (sizetype
, 8);
14994 /* The EABI says that array cookies should also contain the element size. */
14997 arm_cookie_has_size (void)
14999 return TARGET_AAPCS_BASED
;
15003 /* The EABI says constructors and destructors should return a pointer to
15004 the object constructed/destroyed. */
15007 arm_cxx_cdtor_returns_this (void)
15009 return TARGET_AAPCS_BASED
;
15012 /* The EABI says that an inline function may never be the key
15016 arm_cxx_key_method_may_be_inline (void)
15018 return !TARGET_AAPCS_BASED
;
15022 arm_cxx_determine_class_data_visibility (tree decl
)
15024 if (!TARGET_AAPCS_BASED
)
15027 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
15028 is exported. However, on systems without dynamic vague linkage,
15029 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
15030 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P
&& DECL_COMDAT (decl
))
15031 DECL_VISIBILITY (decl
) = VISIBILITY_HIDDEN
;
15033 DECL_VISIBILITY (decl
) = VISIBILITY_DEFAULT
;
15034 DECL_VISIBILITY_SPECIFIED (decl
) = 1;
15038 arm_cxx_class_data_always_comdat (void)
15040 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
15041 vague linkage if the class has no key function. */
15042 return !TARGET_AAPCS_BASED
;
15046 /* The EABI says __aeabi_atexit should be used to register static
15050 arm_cxx_use_aeabi_atexit (void)
15052 return TARGET_AAPCS_BASED
;
15057 arm_set_return_address (rtx source
, rtx scratch
)
15059 arm_stack_offsets
*offsets
;
15060 HOST_WIDE_INT delta
;
15062 unsigned long saved_regs
;
15064 saved_regs
= arm_compute_save_reg_mask ();
15066 if ((saved_regs
& (1 << LR_REGNUM
)) == 0)
15067 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNUM
), source
);
15070 if (frame_pointer_needed
)
15071 addr
= plus_constant(hard_frame_pointer_rtx
, -4);
15074 /* LR will be the first saved register. */
15075 offsets
= arm_get_frame_offsets ();
15076 delta
= offsets
->outgoing_args
- (offsets
->frame
+ 4);
15081 emit_insn (gen_addsi3 (scratch
, stack_pointer_rtx
,
15082 GEN_INT (delta
& ~4095)));
15087 addr
= stack_pointer_rtx
;
15089 addr
= plus_constant (addr
, delta
);
15091 emit_move_insn (gen_frame_mem (Pmode
, addr
), source
);
15097 thumb_set_return_address (rtx source
, rtx scratch
)
15099 arm_stack_offsets
*offsets
;
15100 HOST_WIDE_INT delta
;
15103 unsigned long mask
;
15105 emit_insn (gen_rtx_USE (VOIDmode
, source
));
15107 mask
= thumb_compute_save_reg_mask ();
15108 if (mask
& (1 << LR_REGNUM
))
15110 offsets
= arm_get_frame_offsets ();
15112 /* Find the saved regs. */
15113 if (frame_pointer_needed
)
15115 delta
= offsets
->soft_frame
- offsets
->saved_args
;
15116 reg
= THUMB_HARD_FRAME_POINTER_REGNUM
;
15120 delta
= offsets
->outgoing_args
- offsets
->saved_args
;
15123 /* Allow for the stack frame. */
15124 if (TARGET_BACKTRACE
)
15126 /* The link register is always the first saved register. */
15129 /* Construct the address. */
15130 addr
= gen_rtx_REG (SImode
, reg
);
15131 if ((reg
!= SP_REGNUM
&& delta
>= 128)
15134 emit_insn (gen_movsi (scratch
, GEN_INT (delta
)));
15135 emit_insn (gen_addsi3 (scratch
, scratch
, stack_pointer_rtx
));
15139 addr
= plus_constant (addr
, delta
);
15141 emit_move_insn (gen_frame_mem (Pmode
, addr
), source
);
15144 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNUM
), source
);
15147 /* Implements target hook vector_mode_supported_p. */
15149 arm_vector_mode_supported_p (enum machine_mode mode
)
15151 if ((mode
== V2SImode
)
15152 || (mode
== V4HImode
)
15153 || (mode
== V8QImode
))
15159 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
15160 ARM insns and therefore guarantee that the shift count is modulo 256.
15161 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
15162 guarantee no particular behavior for out-of-range counts. */
15164 static unsigned HOST_WIDE_INT
15165 arm_shift_truncation_mask (enum machine_mode mode
)
15167 return mode
== SImode
? 255 : 0;
15171 /* Map internal gcc register numbers to DWARF2 register numbers. */
15174 arm_dbx_register_number (unsigned int regno
)
15179 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
15180 compatibility. The EABI defines them as registers 96-103. */
15181 if (IS_FPA_REGNUM (regno
))
15182 return (TARGET_AAPCS_BASED
? 96 : 16) + regno
- FIRST_FPA_REGNUM
;
15184 if (IS_VFP_REGNUM (regno
))
15185 return 64 + regno
- FIRST_VFP_REGNUM
;
15187 if (IS_IWMMXT_GR_REGNUM (regno
))
15188 return 104 + regno
- FIRST_IWMMXT_GR_REGNUM
;
15190 if (IS_IWMMXT_REGNUM (regno
))
15191 return 112 + regno
- FIRST_IWMMXT_REGNUM
;
15193 gcc_unreachable ();
15197 #ifdef TARGET_UNWIND_INFO
15198 /* Emit unwind directives for a store-multiple instruction. This should
15199 only ever be generated by the function prologue code, so we expect it
15200 to have a particular form. */
15203 arm_unwind_emit_stm (FILE * asm_out_file
, rtx p
)
15206 HOST_WIDE_INT offset
;
15207 HOST_WIDE_INT nregs
;
15213 /* First insn will adjust the stack pointer. */
15214 e
= XVECEXP (p
, 0, 0);
15215 if (GET_CODE (e
) != SET
15216 || GET_CODE (XEXP (e
, 0)) != REG
15217 || REGNO (XEXP (e
, 0)) != SP_REGNUM
15218 || GET_CODE (XEXP (e
, 1)) != PLUS
)
15221 offset
= -INTVAL (XEXP (XEXP (e
, 1), 1));
15222 nregs
= XVECLEN (p
, 0) - 1;
15224 reg
= REGNO (XEXP (XVECEXP (p
, 0, 1), 1));
15227 /* The function prologue may also push pc, but not annotate it as it is
15228 never restored. We turn this into a stack pointer adjustment. */
15229 if (nregs
* 4 == offset
- 4)
15231 fprintf (asm_out_file
, "\t.pad #4\n");
15236 else if (IS_VFP_REGNUM (reg
))
15238 /* FPA register saves use an additional word. */
15242 else if (reg
>= FIRST_FPA_REGNUM
&& reg
<= LAST_FPA_REGNUM
)
15244 /* FPA registers are done differently. */
15245 asm_fprintf (asm_out_file
, "\t.save %r, %wd\n", reg
, nregs
);
15249 /* Unknown register type. */
15252 /* If the stack increment doesn't match the size of the saved registers,
15253 something has gone horribly wrong. */
15254 if (offset
!= nregs
* reg_size
)
15257 fprintf (asm_out_file
, "\t.save {");
15261 /* The remaining insns will describe the stores. */
15262 for (i
= 1; i
<= nregs
; i
++)
15264 /* Expect (set (mem <addr>) (reg)).
15265 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
15266 e
= XVECEXP (p
, 0, i
);
15267 if (GET_CODE (e
) != SET
15268 || GET_CODE (XEXP (e
, 0)) != MEM
15269 || GET_CODE (XEXP (e
, 1)) != REG
)
15272 reg
= REGNO (XEXP (e
, 1));
15277 fprintf (asm_out_file
, ", ");
15278 /* We can't use %r for vfp because we need to use the
15279 double precision register names. */
15280 if (IS_VFP_REGNUM (reg
))
15281 asm_fprintf (asm_out_file
, "d%d", (reg
- FIRST_VFP_REGNUM
) / 2);
15283 asm_fprintf (asm_out_file
, "%r", reg
);
15285 #ifdef ENABLE_CHECKING
15286 /* Check that the addresses are consecutive. */
15287 e
= XEXP (XEXP (e
, 0), 0);
15288 if (GET_CODE (e
) == PLUS
)
15290 offset
+= reg_size
;
15291 if (GET_CODE (XEXP (e
, 0)) != REG
15292 || REGNO (XEXP (e
, 0)) != SP_REGNUM
15293 || GET_CODE (XEXP (e
, 1)) != CONST_INT
15294 || offset
!= INTVAL (XEXP (e
, 1)))
15298 || GET_CODE (e
) != REG
15299 || REGNO (e
) != SP_REGNUM
)
15303 fprintf (asm_out_file
, "}\n");
15306 /* Emit unwind directives for a SET. */
15309 arm_unwind_emit_set (FILE * asm_out_file
, rtx p
)
15316 switch (GET_CODE (e0
))
15319 /* Pushing a single register. */
15320 if (GET_CODE (XEXP (e0
, 0)) != PRE_DEC
15321 || GET_CODE (XEXP (XEXP (e0
, 0), 0)) != REG
15322 || REGNO (XEXP (XEXP (e0
, 0), 0)) != SP_REGNUM
)
15325 asm_fprintf (asm_out_file
, "\t.save ");
15326 if (IS_VFP_REGNUM (REGNO (e1
)))
15327 asm_fprintf(asm_out_file
, "{d%d}\n",
15328 (REGNO (e1
) - FIRST_VFP_REGNUM
) / 2);
15330 asm_fprintf(asm_out_file
, "{%r}\n", REGNO (e1
));
15334 if (REGNO (e0
) == SP_REGNUM
)
15336 /* A stack increment. */
15337 if (GET_CODE (e1
) != PLUS
15338 || GET_CODE (XEXP (e1
, 0)) != REG
15339 || REGNO (XEXP (e1
, 0)) != SP_REGNUM
15340 || GET_CODE (XEXP (e1
, 1)) != CONST_INT
)
15343 asm_fprintf (asm_out_file
, "\t.pad #%wd\n",
15344 -INTVAL (XEXP (e1
, 1)));
15346 else if (REGNO (e0
) == HARD_FRAME_POINTER_REGNUM
)
15348 HOST_WIDE_INT offset
;
15351 if (GET_CODE (e1
) == PLUS
)
15353 if (GET_CODE (XEXP (e1
, 0)) != REG
15354 || GET_CODE (XEXP (e1
, 1)) != CONST_INT
)
15356 reg
= REGNO (XEXP (e1
, 0));
15357 offset
= INTVAL (XEXP (e1
, 1));
15358 asm_fprintf (asm_out_file
, "\t.setfp %r, %r, #%wd\n",
15359 HARD_FRAME_POINTER_REGNUM
, reg
,
15360 INTVAL (XEXP (e1
, 1)));
15362 else if (GET_CODE (e1
) == REG
)
15365 asm_fprintf (asm_out_file
, "\t.setfp %r, %r\n",
15366 HARD_FRAME_POINTER_REGNUM
, reg
);
15371 else if (GET_CODE (e1
) == REG
&& REGNO (e1
) == SP_REGNUM
)
15373 /* Move from sp to reg. */
15374 asm_fprintf (asm_out_file
, "\t.movsp %r\n", REGNO (e0
));
15386 /* Emit unwind directives for the given insn. */
15389 arm_unwind_emit (FILE * asm_out_file
, rtx insn
)
15393 if (!ARM_EABI_UNWIND_TABLES
)
15396 if (GET_CODE (insn
) == NOTE
|| !RTX_FRAME_RELATED_P (insn
))
15399 pat
= find_reg_note (insn
, REG_FRAME_RELATED_EXPR
, NULL_RTX
);
15401 pat
= XEXP (pat
, 0);
15403 pat
= PATTERN (insn
);
15405 switch (GET_CODE (pat
))
15408 arm_unwind_emit_set (asm_out_file
, pat
);
15412 /* Store multiple. */
15413 arm_unwind_emit_stm (asm_out_file
, pat
);
15422 /* Output a reference from a function exception table to the type_info
15423 object X. The EABI specifies that the symbol should be relocated by
15424 an R_ARM_TARGET2 relocation. */
15427 arm_output_ttype (rtx x
)
15429 fputs ("\t.word\t", asm_out_file
);
15430 output_addr_const (asm_out_file
, x
);
15431 /* Use special relocations for symbol references. */
15432 if (GET_CODE (x
) != CONST_INT
)
15433 fputs ("(TARGET2)", asm_out_file
);
15434 fputc ('\n', asm_out_file
);
15438 #endif /* TARGET_UNWIND_INFO */
15441 /* Output unwind directives for the start/end of a function. */
15444 arm_output_fn_unwind (FILE * f
, bool prologue
)
15446 if (!ARM_EABI_UNWIND_TABLES
)
15450 fputs ("\t.fnstart\n", f
);
15452 fputs ("\t.fnend\n", f
);
15456 arm_emit_tls_decoration (FILE *fp
, rtx x
)
15458 enum tls_reloc reloc
;
15461 val
= XVECEXP (x
, 0, 0);
15462 reloc
= INTVAL (XVECEXP (x
, 0, 1));
15464 output_addr_const (fp
, val
);
15469 fputs ("(tlsgd)", fp
);
15472 fputs ("(tlsldm)", fp
);
15475 fputs ("(tlsldo)", fp
);
15478 fputs ("(gottpoff)", fp
);
15481 fputs ("(tpoff)", fp
);
15484 gcc_unreachable ();
15492 fputs (" + (. - ", fp
);
15493 output_addr_const (fp
, XVECEXP (x
, 0, 2));
15495 output_addr_const (fp
, XVECEXP (x
, 0, 3));
15506 arm_output_addr_const_extra (FILE *fp
, rtx x
)
15508 if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_TLS
)
15509 return arm_emit_tls_decoration (fp
, x
);
15510 else if (GET_CODE (x
) == UNSPEC
&& XINT (x
, 1) == UNSPEC_PIC_LABEL
)
15513 int labelno
= INTVAL (XVECEXP (x
, 0, 0));
15515 ASM_GENERATE_INTERNAL_LABEL (label
, "LPIC", labelno
);
15516 assemble_name_raw (fp
, label
);
15520 else if (GET_CODE (x
) == CONST_VECTOR
)
15521 return arm_emit_vector_const (fp
, x
);
15526 #include "gt-arm.h"