1 /* Subroutines for insn-output.cc for Motorola 68000 family.
2 Copyright (C) 1987-2025 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
23 #define INCLUDE_STRING
25 #include "coretypes.h"
29 #include "stringpool.h"
34 #include "fold-const.h"
36 #include "stor-layout.h"
39 #include "insn-config.h"
40 #include "conditions.h"
42 #include "insn-attr.h"
44 #include "diagnostic-core.h"
61 #include "cfgcleanup.h"
62 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
63 #include "sched-int.h"
64 #include "insn-codes.h"
71 /* This file should be included last. */
72 #include "target-def.h"
74 enum reg_class regno_reg_class
[] =
76 DATA_REGS
, DATA_REGS
, DATA_REGS
, DATA_REGS
,
77 DATA_REGS
, DATA_REGS
, DATA_REGS
, DATA_REGS
,
78 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
79 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
80 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
81 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
86 /* The minimum number of integer registers that we want to save with the
87 movem instruction. Using two movel instructions instead of a single
88 moveml is about 15% faster for the 68020 and 68030 at no expense in
90 #define MIN_MOVEM_REGS 3
92 /* The minimum number of floating point registers that we want to save
93 with the fmovem instruction. */
94 #define MIN_FMOVEM_REGS 1
96 /* Structure describing stack frame layout. */
99 /* Stack pointer to frame pointer offset. */
100 HOST_WIDE_INT offset
;
102 /* Offset of FPU registers. */
103 HOST_WIDE_INT foffset
;
105 /* Frame size in bytes (rounded up). */
108 /* Data and address register. */
110 unsigned int reg_mask
;
114 unsigned int fpu_mask
;
116 /* Offsets relative to ARG_POINTER. */
117 HOST_WIDE_INT frame_pointer_offset
;
118 HOST_WIDE_INT stack_pointer_offset
;
120 /* Function which the above information refers to. */
124 /* Current frame information calculated by m68k_compute_frame_layout(). */
125 static struct m68k_frame current_frame
;
127 /* Structure describing an m68k address.
129 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
130 with null fields evaluating to 0. Here:
132 - BASE satisfies m68k_legitimate_base_reg_p
133 - INDEX satisfies m68k_legitimate_index_reg_p
134 - OFFSET satisfies m68k_legitimate_constant_address_p
136 INDEX is either HImode or SImode. The other fields are SImode.
138 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
139 the address is (BASE)+. */
140 struct m68k_address
{
148 static int m68k_sched_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
150 static int m68k_sched_issue_rate (void);
151 static int m68k_sched_variable_issue (FILE *, int, rtx_insn
*, int);
152 static void m68k_sched_md_init_global (FILE *, int, int);
153 static void m68k_sched_md_finish_global (FILE *, int);
154 static void m68k_sched_md_init (FILE *, int, int);
155 static void m68k_sched_dfa_pre_advance_cycle (void);
156 static void m68k_sched_dfa_post_advance_cycle (void);
157 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
159 static bool m68k_can_eliminate (const int, const int);
160 static void m68k_conditional_register_usage (void);
161 static bool m68k_legitimate_address_p (machine_mode
, rtx
, bool,
162 code_helper
= ERROR_MARK
);
163 static void m68k_option_override (void);
164 static void m68k_override_options_after_change (void);
165 static rtx
find_addr_reg (rtx
);
166 static const char *singlemove_string (rtx
*);
167 static void m68k_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
168 HOST_WIDE_INT
, tree
);
169 static rtx
m68k_struct_value_rtx (tree
, int);
170 static tree
m68k_handle_fndecl_attribute (tree
*node
, tree name
,
171 tree args
, int flags
,
173 static void m68k_compute_frame_layout (void);
174 static bool m68k_save_reg (unsigned int regno
, bool interrupt_handler
);
175 static bool m68k_ok_for_sibcall_p (tree
, tree
);
176 static bool m68k_tls_symbol_p (rtx
);
177 static rtx
m68k_legitimize_address (rtx
, rtx
, machine_mode
);
178 static bool m68k_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
179 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
180 static bool m68k_return_in_memory (const_tree
, const_tree
);
182 static void m68k_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
183 static void m68k_trampoline_init (rtx
, tree
, rtx
);
184 static poly_int64
m68k_return_pops_args (tree
, tree
, poly_int64
);
185 static rtx
m68k_delegitimize_address (rtx
);
186 static void m68k_function_arg_advance (cumulative_args_t
,
187 const function_arg_info
&);
188 static rtx
m68k_function_arg (cumulative_args_t
, const function_arg_info
&);
189 static bool m68k_cannot_force_const_mem (machine_mode mode
, rtx x
);
190 static bool m68k_output_addr_const_extra (FILE *, rtx
);
191 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED
;
192 static enum flt_eval_method
193 m68k_excess_precision (enum excess_precision_type
);
194 static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode
);
195 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode
);
196 static bool m68k_modes_tieable_p (machine_mode
, machine_mode
);
197 static machine_mode
m68k_promote_function_mode (const_tree
, machine_mode
,
198 int *, const_tree
, int);
199 static void m68k_asm_final_postscan_insn (FILE *, rtx_insn
*insn
, rtx
[], int);
200 static HARD_REG_SET
m68k_zero_call_used_regs (HARD_REG_SET
);
201 static machine_mode
m68k_c_mode_for_floating_type (enum tree_index
);
202 static bool m68k_use_lra_p (void);
204 /* Initialize the GCC target structure. */
206 #if INT_OP_GROUP == INT_OP_DOT_WORD
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
211 #if INT_OP_GROUP == INT_OP_NO_DOT
212 #undef TARGET_ASM_BYTE_OP
213 #define TARGET_ASM_BYTE_OP "\tbyte\t"
214 #undef TARGET_ASM_ALIGNED_HI_OP
215 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
216 #undef TARGET_ASM_ALIGNED_SI_OP
217 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
220 #if INT_OP_GROUP == INT_OP_DC
221 #undef TARGET_ASM_BYTE_OP
222 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
223 #undef TARGET_ASM_ALIGNED_HI_OP
224 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
225 #undef TARGET_ASM_ALIGNED_SI_OP
226 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
229 #undef TARGET_ASM_UNALIGNED_HI_OP
230 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
231 #undef TARGET_ASM_UNALIGNED_SI_OP
232 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
234 #undef TARGET_ASM_OUTPUT_MI_THUNK
235 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
236 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
237 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
239 #undef TARGET_ASM_FILE_START_APP_OFF
240 #define TARGET_ASM_FILE_START_APP_OFF true
242 #undef TARGET_LEGITIMIZE_ADDRESS
243 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
245 #undef TARGET_SCHED_ADJUST_COST
246 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
248 #undef TARGET_SCHED_ISSUE_RATE
249 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
251 #undef TARGET_SCHED_VARIABLE_ISSUE
252 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
254 #undef TARGET_SCHED_INIT_GLOBAL
255 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
257 #undef TARGET_SCHED_FINISH_GLOBAL
258 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
260 #undef TARGET_SCHED_INIT
261 #define TARGET_SCHED_INIT m68k_sched_md_init
263 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
264 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
266 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
267 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
269 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
270 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
271 m68k_sched_first_cycle_multipass_dfa_lookahead
273 #undef TARGET_OPTION_OVERRIDE
274 #define TARGET_OPTION_OVERRIDE m68k_option_override
276 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
277 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
279 #undef TARGET_RTX_COSTS
280 #define TARGET_RTX_COSTS m68k_rtx_costs
282 #undef TARGET_ATTRIBUTE_TABLE
283 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
285 #undef TARGET_PROMOTE_PROTOTYPES
286 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
288 #undef TARGET_STRUCT_VALUE_RTX
289 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
291 #undef TARGET_CANNOT_FORCE_CONST_MEM
292 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
294 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
295 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
297 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
298 #undef TARGET_RETURN_IN_MEMORY
299 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
303 #undef TARGET_HAVE_TLS
304 #define TARGET_HAVE_TLS (true)
306 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
307 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
311 #define TARGET_LRA_P m68k_use_lra_p
313 #undef TARGET_LEGITIMATE_ADDRESS_P
314 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
316 #undef TARGET_CAN_ELIMINATE
317 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
319 #undef TARGET_CONDITIONAL_REGISTER_USAGE
320 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
322 #undef TARGET_TRAMPOLINE_INIT
323 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
325 #undef TARGET_RETURN_POPS_ARGS
326 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
328 #undef TARGET_DELEGITIMIZE_ADDRESS
329 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
331 #undef TARGET_FUNCTION_ARG
332 #define TARGET_FUNCTION_ARG m68k_function_arg
334 #undef TARGET_FUNCTION_ARG_ADVANCE
335 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
337 #undef TARGET_LEGITIMATE_CONSTANT_P
338 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
340 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
341 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
343 #undef TARGET_C_EXCESS_PRECISION
344 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
346 /* The value stored by TAS. */
347 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
348 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
350 #undef TARGET_HARD_REGNO_NREGS
351 #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
352 #undef TARGET_HARD_REGNO_MODE_OK
353 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
355 #undef TARGET_MODES_TIEABLE_P
356 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
358 #undef TARGET_PROMOTE_FUNCTION_MODE
359 #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode
361 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
362 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
364 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
365 #define TARGET_ASM_FINAL_POSTSCAN_INSN m68k_asm_final_postscan_insn
367 #undef TARGET_ZERO_CALL_USED_REGS
368 #define TARGET_ZERO_CALL_USED_REGS m68k_zero_call_used_regs
370 #undef TARGET_C_MODE_FOR_FLOATING_TYPE
371 #define TARGET_C_MODE_FOR_FLOATING_TYPE m68k_c_mode_for_floating_type
373 TARGET_GNU_ATTRIBUTES (m68k_attribute_table
,
375 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
376 affects_type_identity, handler, exclude } */
377 { "interrupt", 0, 0, true, false, false, false,
378 m68k_handle_fndecl_attribute
, NULL
},
379 { "interrupt_handler", 0, 0, true, false, false, false,
380 m68k_handle_fndecl_attribute
, NULL
},
381 { "interrupt_thread", 0, 0, true, false, false, false,
382 m68k_handle_fndecl_attribute
, NULL
}
385 #undef TARGET_DOCUMENTATION_NAME
386 #define TARGET_DOCUMENTATION_NAME "m68k"
388 struct gcc_target targetm
= TARGET_INITIALIZER
;
390 /* Base flags for 68k ISAs. */
391 #define FL_FOR_isa_00 FL_ISA_68000
392 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
393 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
394 generated 68881 code for 68020 and 68030 targets unless explicitly told
396 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
397 | FL_BITFIELD | FL_68881 | FL_CAS)
398 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
399 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
401 /* Base flags for ColdFire ISAs. */
402 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
403 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
404 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
405 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
406 /* ISA_C is not upwardly compatible with ISA_B. */
407 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
411 /* Traditional 68000 instruction sets. */
417 /* ColdFire instruction set variants. */
425 /* Information about one of the -march, -mcpu or -mtune arguments. */
426 struct m68k_target_selection
428 /* The argument being described. */
431 /* For -mcpu, this is the device selected by the option.
432 For -mtune and -march, it is a representative device
433 for the microarchitecture or ISA respectively. */
434 enum target_device device
;
436 /* The M68K_DEVICE fields associated with DEVICE. See the comment
437 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
439 enum uarch_type microarch
;
444 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
445 static const struct m68k_target_selection all_devices
[] =
447 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
448 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
449 #include "m68k-devices.def"
451 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
454 /* A list of all ISAs, mapping each one to a representative device.
455 Used for -march selection. */
456 static const struct m68k_target_selection all_isas
[] =
458 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
459 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
460 #include "m68k-isas.def"
462 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
465 /* A list of all microarchitectures, mapping each one to a representative
466 device. Used for -mtune selection. */
467 static const struct m68k_target_selection all_microarchs
[] =
469 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
470 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
471 #include "m68k-microarchs.def"
472 #undef M68K_MICROARCH
473 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
476 /* The entries associated with the -mcpu, -march and -mtune settings,
477 or null for options that have not been used. */
478 const struct m68k_target_selection
*m68k_cpu_entry
;
479 const struct m68k_target_selection
*m68k_arch_entry
;
480 const struct m68k_target_selection
*m68k_tune_entry
;
482 /* Which CPU we are generating code for. */
483 enum target_device m68k_cpu
;
485 /* Which microarchitecture to tune for. */
486 enum uarch_type m68k_tune
;
488 /* Which FPU to use. */
489 enum fpu_type m68k_fpu
;
491 /* The set of FL_* flags that apply to the target processor. */
492 unsigned int m68k_cpu_flags
;
494 /* The set of FL_* flags that apply to the processor to be tuned for. */
495 unsigned int m68k_tune_flags
;
497 /* Asm templates for calling or jumping to an arbitrary symbolic address,
498 or NULL if such calls or jumps are not supported. The address is held
500 const char *m68k_symbolic_call
;
501 const char *m68k_symbolic_jump
;
503 /* Enum variable that corresponds to m68k_symbolic_call values. */
504 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var
;
507 /* Implement TARGET_OPTION_OVERRIDE. */
510 m68k_option_override (void)
512 const struct m68k_target_selection
*entry
;
513 unsigned long target_mask
;
515 if (OPTION_SET_P (m68k_arch_option
))
516 m68k_arch_entry
= &all_isas
[m68k_arch_option
];
518 if (OPTION_SET_P (m68k_cpu_option
))
519 m68k_cpu_entry
= &all_devices
[(int) m68k_cpu_option
];
521 if (OPTION_SET_P (m68k_tune_option
))
522 m68k_tune_entry
= &all_microarchs
[(int) m68k_tune_option
];
530 -march=ARCH should generate code that runs any processor
531 implementing architecture ARCH. -mcpu=CPU should override -march
532 and should generate code that runs on processor CPU, making free
533 use of any instructions that CPU understands. -mtune=UARCH applies
534 on top of -mcpu or -march and optimizes the code for UARCH. It does
535 not change the target architecture. */
538 /* Complain if the -march setting is for a different microarchitecture,
539 or includes flags that the -mcpu setting doesn't. */
541 && (m68k_arch_entry
->microarch
!= m68k_cpu_entry
->microarch
542 || (m68k_arch_entry
->flags
& ~m68k_cpu_entry
->flags
) != 0))
543 warning (0, "%<-mcpu=%s%> conflicts with %<-march=%s%>",
544 m68k_cpu_entry
->name
, m68k_arch_entry
->name
);
546 entry
= m68k_cpu_entry
;
549 entry
= m68k_arch_entry
;
552 entry
= all_devices
+ TARGET_CPU_DEFAULT
;
554 m68k_cpu_flags
= entry
->flags
;
556 /* Use the architecture setting to derive default values for
560 /* ColdFire is lenient about alignment. */
561 if (!TARGET_COLDFIRE
)
562 target_mask
|= MASK_STRICT_ALIGNMENT
;
564 if ((m68k_cpu_flags
& FL_BITFIELD
) != 0)
565 target_mask
|= MASK_BITFIELD
;
566 if ((m68k_cpu_flags
& FL_CF_HWDIV
) != 0)
567 target_mask
|= MASK_CF_HWDIV
;
568 if ((m68k_cpu_flags
& (FL_68881
| FL_CF_FPU
)) != 0)
569 target_mask
|= MASK_HARD_FLOAT
;
570 target_flags
|= target_mask
& ~target_flags_explicit
;
572 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
573 m68k_cpu
= entry
->device
;
576 m68k_tune
= m68k_tune_entry
->microarch
;
577 m68k_tune_flags
= m68k_tune_entry
->flags
;
579 #ifdef M68K_DEFAULT_TUNE
580 else if (!m68k_cpu_entry
&& !m68k_arch_entry
)
582 enum target_device dev
;
583 dev
= all_microarchs
[M68K_DEFAULT_TUNE
].device
;
584 m68k_tune_flags
= all_devices
[dev
].flags
;
589 m68k_tune
= entry
->microarch
;
590 m68k_tune_flags
= entry
->flags
;
593 /* Set the type of FPU. */
594 m68k_fpu
= (!TARGET_HARD_FLOAT
? FPUTYPE_NONE
595 : (m68k_cpu_flags
& FL_COLDFIRE
) != 0 ? FPUTYPE_COLDFIRE
598 /* Sanity check to ensure that msep-data and mid-sahred-library are not
599 * both specified together. Doing so simply doesn't make sense.
601 if (TARGET_SEP_DATA
&& TARGET_ID_SHARED_LIBRARY
)
602 error ("cannot specify both %<-msep-data%> and %<-mid-shared-library%>");
604 /* If we're generating code for a separate A5 relative data segment,
605 * we've got to enable -fPIC as well. This might be relaxable to
606 * -fpic but it hasn't been tested properly.
608 if (TARGET_SEP_DATA
|| TARGET_ID_SHARED_LIBRARY
)
611 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
612 error if the target does not support them. */
613 if (TARGET_PCREL
&& !TARGET_68020
&& flag_pic
== 2)
614 error ("%<-mpcrel%> %<-fPIC%> is not currently supported on selected cpu");
616 /* ??? A historic way of turning on pic, or is this intended to
617 be an embedded thing that doesn't have the same name binding
618 significance that it does on hosted ELF systems? */
619 if (TARGET_PCREL
&& flag_pic
== 0)
624 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_JSR
;
626 m68k_symbolic_jump
= "jra %a0";
628 else if (TARGET_ID_SHARED_LIBRARY
)
629 /* All addresses must be loaded from the GOT. */
631 else if (TARGET_68020
|| TARGET_ISAB
|| TARGET_ISAC
)
634 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_BSR_C
;
636 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_BSR_P
;
639 /* No unconditional long branch */;
640 else if (TARGET_PCREL
)
641 m68k_symbolic_jump
= "bra%.l %c0";
643 m68k_symbolic_jump
= "bra%.l %p0";
644 /* Turn off function cse if we are doing PIC. We always want
645 function call to be done as `bsr foo@PLTPC'. */
646 /* ??? It's traditional to do this for -mpcrel too, but it isn't
647 clear how intentional that is. */
648 flag_no_function_cse
= 1;
651 switch (m68k_symbolic_call_var
)
653 case M68K_SYMBOLIC_CALL_JSR
:
654 m68k_symbolic_call
= "jsr %a0";
657 case M68K_SYMBOLIC_CALL_BSR_C
:
658 m68k_symbolic_call
= "bsr%.l %c0";
661 case M68K_SYMBOLIC_CALL_BSR_P
:
662 m68k_symbolic_call
= "bsr%.l %p0";
665 case M68K_SYMBOLIC_CALL_NONE
:
666 gcc_assert (m68k_symbolic_call
== NULL
);
673 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
674 parse_alignment_opts ();
675 int label_alignment
= align_labels
.levels
[0].get_value ();
676 if (label_alignment
> 2)
678 warning (0, "%<-falign-labels=%d%> is not supported", label_alignment
);
679 str_align_labels
= "1";
682 int loop_alignment
= align_loops
.levels
[0].get_value ();
683 if (loop_alignment
> 2)
685 warning (0, "%<-falign-loops=%d%> is not supported", loop_alignment
);
686 str_align_loops
= "1";
690 if ((opt_fstack_limit_symbol_arg
!= NULL
|| opt_fstack_limit_register_no
>= 0)
693 warning (0, "%<-fstack-limit-%> options are not supported on this cpu");
694 opt_fstack_limit_symbol_arg
= NULL
;
695 opt_fstack_limit_register_no
= -1;
698 SUBTARGET_OVERRIDE_OPTIONS
;
700 /* Setup scheduling options. */
702 m68k_sched_cpu
= CPU_CFV1
;
704 m68k_sched_cpu
= CPU_CFV2
;
706 m68k_sched_cpu
= CPU_CFV3
;
708 m68k_sched_cpu
= CPU_CFV4
;
711 m68k_sched_cpu
= CPU_UNKNOWN
;
712 flag_schedule_insns
= 0;
713 flag_schedule_insns_after_reload
= 0;
714 flag_modulo_sched
= 0;
715 flag_live_range_shrinkage
= 0;
718 if (m68k_sched_cpu
!= CPU_UNKNOWN
)
720 if ((m68k_cpu_flags
& (FL_CF_EMAC
| FL_CF_EMAC_B
)) != 0)
721 m68k_sched_mac
= MAC_CF_EMAC
;
722 else if ((m68k_cpu_flags
& FL_CF_MAC
) != 0)
723 m68k_sched_mac
= MAC_CF_MAC
;
725 m68k_sched_mac
= MAC_NO
;
729 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
732 m68k_override_options_after_change (void)
734 if (m68k_sched_cpu
== CPU_UNKNOWN
)
736 flag_schedule_insns
= 0;
737 flag_schedule_insns_after_reload
= 0;
738 flag_modulo_sched
= 0;
739 flag_live_range_shrinkage
= 0;
743 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
744 given argument and NAME is the argument passed to -mcpu. Return NULL
745 if -mcpu was not passed. */
748 m68k_cpp_cpu_ident (const char *prefix
)
752 return concat ("__m", prefix
, "_cpu_", m68k_cpu_entry
->name
, NULL
);
755 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
756 given argument and NAME is the name of the representative device for
757 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
760 m68k_cpp_cpu_family (const char *prefix
)
764 return concat ("__m", prefix
, "_family_", m68k_cpu_entry
->family
, NULL
);
767 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
768 "interrupt_handler" attribute and interrupt_thread if FUNC has an
769 "interrupt_thread" attribute. Otherwise, return
770 m68k_fk_normal_function. */
772 enum m68k_function_kind
773 m68k_get_function_kind (tree func
)
777 gcc_assert (TREE_CODE (func
) == FUNCTION_DECL
);
779 a
= lookup_attribute ("interrupt", DECL_ATTRIBUTES (func
));
781 return m68k_fk_interrupt_handler
;
783 a
= lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func
));
785 return m68k_fk_interrupt_handler
;
787 a
= lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func
));
789 return m68k_fk_interrupt_thread
;
791 return m68k_fk_normal_function
;
794 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
795 struct attribute_spec.handler. */
797 m68k_handle_fndecl_attribute (tree
*node
, tree name
,
798 tree args ATTRIBUTE_UNUSED
,
799 int flags ATTRIBUTE_UNUSED
,
802 if (TREE_CODE (*node
) != FUNCTION_DECL
)
804 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
806 *no_add_attrs
= true;
809 if (m68k_get_function_kind (*node
) != m68k_fk_normal_function
)
811 error ("multiple interrupt attributes not allowed");
812 *no_add_attrs
= true;
816 && !strcmp (IDENTIFIER_POINTER (name
), "interrupt_thread"))
818 error ("%<interrupt_thread%> is available only on fido");
819 *no_add_attrs
= true;
826 m68k_compute_frame_layout (void)
830 enum m68k_function_kind func_kind
=
831 m68k_get_function_kind (current_function_decl
);
832 bool interrupt_handler
= func_kind
== m68k_fk_interrupt_handler
;
833 bool interrupt_thread
= func_kind
== m68k_fk_interrupt_thread
;
835 /* Only compute the frame once per function.
836 Don't cache information until reload has been completed. */
837 if (current_frame
.funcdef_no
== current_function_funcdef_no
841 current_frame
.size
= (get_frame_size () + 3) & -4;
845 /* Interrupt thread does not need to save any register. */
846 if (!interrupt_thread
)
847 for (regno
= 0; regno
< 16; regno
++)
848 if (m68k_save_reg (regno
, interrupt_handler
))
850 mask
|= 1 << (regno
- D0_REG
);
853 current_frame
.offset
= saved
* 4;
854 current_frame
.reg_no
= saved
;
855 current_frame
.reg_mask
= mask
;
857 current_frame
.foffset
= 0;
859 if (TARGET_HARD_FLOAT
)
861 /* Interrupt thread does not need to save any register. */
862 if (!interrupt_thread
)
863 for (regno
= 16; regno
< 24; regno
++)
864 if (m68k_save_reg (regno
, interrupt_handler
))
866 mask
|= 1 << (regno
- FP0_REG
);
869 current_frame
.foffset
= saved
* TARGET_FP_REG_SIZE
;
870 current_frame
.offset
+= current_frame
.foffset
;
872 current_frame
.fpu_no
= saved
;
873 current_frame
.fpu_mask
= mask
;
875 /* Remember what function this frame refers to. */
876 current_frame
.funcdef_no
= current_function_funcdef_no
;
879 /* Worker function for TARGET_CAN_ELIMINATE. */
882 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
884 return (to
== STACK_POINTER_REGNUM
? ! frame_pointer_needed
: true);
888 m68k_initial_elimination_offset (int from
, int to
)
891 /* The arg pointer points 8 bytes before the start of the arguments,
892 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
893 frame pointer in most frames. */
894 argptr_offset
= frame_pointer_needed
? 0 : UNITS_PER_WORD
;
895 if (from
== ARG_POINTER_REGNUM
&& to
== FRAME_POINTER_REGNUM
)
896 return argptr_offset
;
898 m68k_compute_frame_layout ();
900 gcc_assert (to
== STACK_POINTER_REGNUM
);
903 case ARG_POINTER_REGNUM
:
904 return current_frame
.offset
+ current_frame
.size
- argptr_offset
;
905 case FRAME_POINTER_REGNUM
:
906 return current_frame
.offset
+ current_frame
.size
;
912 /* Refer to the array `regs_ever_live' to determine which registers
913 to save; `regs_ever_live[I]' is nonzero if register number I
914 is ever used in the function. This function is responsible for
915 knowing which registers should not be saved even if used.
916 Return true if we need to save REGNO. */
919 m68k_save_reg (unsigned int regno
, bool interrupt_handler
)
921 if (flag_pic
&& regno
== PIC_REG
)
923 if (crtl
->saves_all_registers
)
925 if (crtl
->uses_pic_offset_table
)
927 /* Reload may introduce constant pool references into a function
928 that thitherto didn't need a PIC register. Note that the test
929 above will not catch that case because we will only set
930 crtl->uses_pic_offset_table when emitting
931 the address reloads. */
932 if (crtl
->uses_const_pool
)
936 if (crtl
->calls_eh_return
)
941 unsigned int test
= EH_RETURN_DATA_REGNO (i
);
942 if (test
== INVALID_REGNUM
)
949 /* Fixed regs we never touch. */
950 if (fixed_regs
[regno
])
953 /* The frame pointer (if it is such) is handled specially. */
954 if (regno
== FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
957 /* Interrupt handlers must also save call_used_regs
958 if they are live or when calling nested functions. */
959 if (interrupt_handler
)
961 if (df_regs_ever_live_p (regno
))
964 if (!crtl
->is_leaf
&& call_used_or_fixed_reg_p (regno
))
968 /* Never need to save registers that aren't touched. */
969 if (!df_regs_ever_live_p (regno
))
972 /* Otherwise save everything that isn't call-clobbered. */
973 return !call_used_or_fixed_reg_p (regno
);
976 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
977 the lowest memory address. COUNT is the number of registers to be
978 moved, with register REGNO + I being moved if bit I of MASK is set.
979 STORE_P specifies the direction of the move and ADJUST_STACK_P says
980 whether or not this is pre-decrement (if STORE_P) or post-increment
981 (if !STORE_P) operation. */
984 m68k_emit_movem (rtx base
, HOST_WIDE_INT offset
,
985 unsigned int count
, unsigned int regno
,
986 unsigned int mask
, bool store_p
, bool adjust_stack_p
)
989 rtx body
, addr
, src
, operands
[2];
992 body
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (adjust_stack_p
+ count
));
993 mode
= reg_raw_mode
[regno
];
998 src
= plus_constant (Pmode
, base
,
1000 * GET_MODE_SIZE (mode
)
1001 * (HOST_WIDE_INT
) (store_p
? -1 : 1)));
1002 XVECEXP (body
, 0, i
++) = gen_rtx_SET (base
, src
);
1005 for (; mask
!= 0; mask
>>= 1, regno
++)
1008 addr
= plus_constant (Pmode
, base
, offset
);
1009 operands
[!store_p
] = gen_frame_mem (mode
, addr
);
1010 operands
[store_p
] = gen_rtx_REG (mode
, regno
);
1011 XVECEXP (body
, 0, i
++)
1012 = gen_rtx_SET (operands
[0], operands
[1]);
1013 offset
+= GET_MODE_SIZE (mode
);
1015 gcc_assert (i
== XVECLEN (body
, 0));
1017 return emit_insn (body
);
1020 /* Make INSN a frame-related instruction. */
1023 m68k_set_frame_related (rtx_insn
*insn
)
1028 RTX_FRAME_RELATED_P (insn
) = 1;
1029 body
= PATTERN (insn
);
1030 if (GET_CODE (body
) == PARALLEL
)
1031 for (i
= 0; i
< XVECLEN (body
, 0); i
++)
1032 RTX_FRAME_RELATED_P (XVECEXP (body
, 0, i
)) = 1;
1035 /* Emit RTL for the "prologue" define_expand. */
1038 m68k_expand_prologue (void)
1040 HOST_WIDE_INT fsize_with_regs
;
1041 rtx limit
, src
, dest
;
1043 m68k_compute_frame_layout ();
1045 if (flag_stack_usage_info
)
1046 current_function_static_stack_size
1047 = current_frame
.size
+ current_frame
.offset
;
1049 /* If the stack limit is a symbol, we can check it here,
1050 before actually allocating the space. */
1051 if (crtl
->limit_stack
1052 && GET_CODE (stack_limit_rtx
) == SYMBOL_REF
)
1054 limit
= plus_constant (Pmode
, stack_limit_rtx
, current_frame
.size
+ 4);
1055 if (!m68k_legitimate_constant_p (Pmode
, limit
))
1057 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
), limit
);
1058 limit
= gen_rtx_REG (Pmode
, D0_REG
);
1060 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode
,
1061 stack_pointer_rtx
, limit
),
1062 stack_pointer_rtx
, limit
,
1066 fsize_with_regs
= current_frame
.size
;
1067 if (TARGET_COLDFIRE
)
1069 /* ColdFire's move multiple instructions do not allow pre-decrement
1070 addressing. Add the size of movem saves to the initial stack
1071 allocation instead. */
1072 if (current_frame
.reg_no
>= MIN_MOVEM_REGS
)
1073 fsize_with_regs
+= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1074 if (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
)
1075 fsize_with_regs
+= current_frame
.fpu_no
* GET_MODE_SIZE (DFmode
);
1078 if (frame_pointer_needed
)
1080 if (fsize_with_regs
== 0 && TUNE_68040
)
1082 /* On the 68040, two separate moves are faster than link.w 0. */
1083 dest
= gen_frame_mem (Pmode
,
1084 gen_rtx_PRE_DEC (Pmode
, stack_pointer_rtx
));
1085 m68k_set_frame_related (emit_move_insn (dest
, frame_pointer_rtx
));
1086 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx
,
1087 stack_pointer_rtx
));
1089 else if (fsize_with_regs
< 0x8000 || TARGET_68020
)
1090 m68k_set_frame_related
1091 (emit_insn (gen_link (frame_pointer_rtx
,
1092 GEN_INT (-4 - fsize_with_regs
))));
1095 m68k_set_frame_related
1096 (emit_insn (gen_link (frame_pointer_rtx
, GEN_INT (-4))));
1097 m68k_set_frame_related
1098 (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1100 GEN_INT (-fsize_with_regs
))));
1103 /* If the frame pointer is needed, emit a special barrier that
1104 will prevent the scheduler from moving stores to the frame
1105 before the stack adjustment. */
1106 emit_insn (gen_stack_tie (stack_pointer_rtx
, frame_pointer_rtx
));
1108 else if (fsize_with_regs
!= 0)
1109 m68k_set_frame_related
1110 (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1112 GEN_INT (-fsize_with_regs
))));
1114 if (current_frame
.fpu_mask
)
1116 gcc_assert (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
);
1118 m68k_set_frame_related
1119 (m68k_emit_movem (stack_pointer_rtx
,
1120 current_frame
.fpu_no
* -GET_MODE_SIZE (XFmode
),
1121 current_frame
.fpu_no
, FP0_REG
,
1122 current_frame
.fpu_mask
, true, true));
1127 /* If we're using moveml to save the integer registers,
1128 the stack pointer will point to the bottom of the moveml
1129 save area. Find the stack offset of the first FP register. */
1130 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1133 offset
= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1134 m68k_set_frame_related
1135 (m68k_emit_movem (stack_pointer_rtx
, offset
,
1136 current_frame
.fpu_no
, FP0_REG
,
1137 current_frame
.fpu_mask
, true, false));
1141 /* If the stack limit is not a symbol, check it here.
1142 This has the disadvantage that it may be too late... */
1143 if (crtl
->limit_stack
)
1145 if (REG_P (stack_limit_rtx
))
1146 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode
, stack_pointer_rtx
,
1148 stack_pointer_rtx
, stack_limit_rtx
,
1151 else if (GET_CODE (stack_limit_rtx
) != SYMBOL_REF
)
1152 warning (0, "stack limit expression is not supported");
1155 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1157 /* Store each register separately in the same order moveml does. */
1160 for (i
= 16; i
-- > 0; )
1161 if (current_frame
.reg_mask
& (1 << i
))
1163 src
= gen_rtx_REG (SImode
, D0_REG
+ i
);
1164 dest
= gen_frame_mem (SImode
,
1165 gen_rtx_PRE_DEC (Pmode
, stack_pointer_rtx
));
1166 m68k_set_frame_related (emit_insn (gen_movsi (dest
, src
)));
1171 if (TARGET_COLDFIRE
)
1172 /* The required register save space has already been allocated.
1173 The first register should be stored at (%sp). */
1174 m68k_set_frame_related
1175 (m68k_emit_movem (stack_pointer_rtx
, 0,
1176 current_frame
.reg_no
, D0_REG
,
1177 current_frame
.reg_mask
, true, false));
1179 m68k_set_frame_related
1180 (m68k_emit_movem (stack_pointer_rtx
,
1181 current_frame
.reg_no
* -GET_MODE_SIZE (SImode
),
1182 current_frame
.reg_no
, D0_REG
,
1183 current_frame
.reg_mask
, true, true));
1186 if (!TARGET_SEP_DATA
1187 && crtl
->uses_pic_offset_table
)
1188 emit_insn (gen_load_got (pic_offset_table_rtx
));
1191 /* Return true if a simple (return) instruction is sufficient for this
1192 instruction (i.e. if no epilogue is needed). */
1195 m68k_use_return_insn (void)
1197 if (!reload_completed
|| frame_pointer_needed
|| get_frame_size () != 0)
1200 m68k_compute_frame_layout ();
1201 return current_frame
.offset
== 0;
1204 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1205 SIBCALL_P says which.
1207 The function epilogue should not depend on the current stack pointer!
1208 It should use the frame pointer only, if there is a frame pointer.
1209 This is mandatory because of alloca; we also take advantage of it to
1210 omit stack adjustments before returning. */
1213 m68k_expand_epilogue (bool sibcall_p
)
1215 HOST_WIDE_INT fsize
, fsize_with_regs
;
1216 bool big
, restore_from_sp
;
1218 m68k_compute_frame_layout ();
1220 fsize
= current_frame
.size
;
1222 restore_from_sp
= false;
1224 /* FIXME : crtl->is_leaf below is too strong.
1225 What we really need to know there is if there could be pending
1226 stack adjustment needed at that point. */
1227 restore_from_sp
= (!frame_pointer_needed
1228 || (!cfun
->calls_alloca
&& crtl
->is_leaf
));
1230 /* fsize_with_regs is the size we need to adjust the sp when
1231 popping the frame. */
1232 fsize_with_regs
= fsize
;
1233 if (TARGET_COLDFIRE
&& restore_from_sp
)
1235 /* ColdFire's move multiple instructions do not allow post-increment
1236 addressing. Add the size of movem loads to the final deallocation
1238 if (current_frame
.reg_no
>= MIN_MOVEM_REGS
)
1239 fsize_with_regs
+= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1240 if (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
)
1241 fsize_with_regs
+= current_frame
.fpu_no
* GET_MODE_SIZE (DFmode
);
1244 if (current_frame
.offset
+ fsize
>= 0x8000
1246 && (current_frame
.reg_mask
|| current_frame
.fpu_mask
))
1249 && (current_frame
.reg_no
>= MIN_MOVEM_REGS
1250 || current_frame
.fpu_no
>= MIN_FMOVEM_REGS
))
1252 /* ColdFire's move multiple instructions do not support the
1253 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1254 stack-based restore. */
1255 emit_move_insn (gen_rtx_REG (Pmode
, A1_REG
),
1256 GEN_INT (-(current_frame
.offset
+ fsize
)));
1257 emit_insn (gen_blockage ());
1258 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1259 gen_rtx_REG (Pmode
, A1_REG
),
1260 frame_pointer_rtx
));
1261 restore_from_sp
= true;
1265 emit_move_insn (gen_rtx_REG (Pmode
, A1_REG
), GEN_INT (-fsize
));
1271 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1273 /* Restore each register separately in the same order moveml does. */
1275 HOST_WIDE_INT offset
;
1277 offset
= current_frame
.offset
+ fsize
;
1278 for (i
= 0; i
< 16; i
++)
1279 if (current_frame
.reg_mask
& (1 << i
))
1285 /* Generate the address -OFFSET(%fp,%a1.l). */
1286 addr
= gen_rtx_REG (Pmode
, A1_REG
);
1287 addr
= gen_rtx_PLUS (Pmode
, addr
, frame_pointer_rtx
);
1288 addr
= plus_constant (Pmode
, addr
, -offset
);
1290 else if (restore_from_sp
)
1291 addr
= gen_rtx_POST_INC (Pmode
, stack_pointer_rtx
);
1293 addr
= plus_constant (Pmode
, frame_pointer_rtx
, -offset
);
1294 emit_move_insn (gen_rtx_REG (SImode
, D0_REG
+ i
),
1295 gen_frame_mem (SImode
, addr
));
1296 offset
-= GET_MODE_SIZE (SImode
);
1299 else if (current_frame
.reg_mask
)
1302 m68k_emit_movem (gen_rtx_PLUS (Pmode
,
1303 gen_rtx_REG (Pmode
, A1_REG
),
1305 -(current_frame
.offset
+ fsize
),
1306 current_frame
.reg_no
, D0_REG
,
1307 current_frame
.reg_mask
, false, false);
1308 else if (restore_from_sp
)
1309 m68k_emit_movem (stack_pointer_rtx
, 0,
1310 current_frame
.reg_no
, D0_REG
,
1311 current_frame
.reg_mask
, false,
1314 m68k_emit_movem (frame_pointer_rtx
,
1315 -(current_frame
.offset
+ fsize
),
1316 current_frame
.reg_no
, D0_REG
,
1317 current_frame
.reg_mask
, false, false);
1320 if (current_frame
.fpu_no
> 0)
1323 m68k_emit_movem (gen_rtx_PLUS (Pmode
,
1324 gen_rtx_REG (Pmode
, A1_REG
),
1326 -(current_frame
.foffset
+ fsize
),
1327 current_frame
.fpu_no
, FP0_REG
,
1328 current_frame
.fpu_mask
, false, false);
1329 else if (restore_from_sp
)
1331 if (TARGET_COLDFIRE
)
1335 /* If we used moveml to restore the integer registers, the
1336 stack pointer will still point to the bottom of the moveml
1337 save area. Find the stack offset of the first FP
1339 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1342 offset
= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1343 m68k_emit_movem (stack_pointer_rtx
, offset
,
1344 current_frame
.fpu_no
, FP0_REG
,
1345 current_frame
.fpu_mask
, false, false);
1348 m68k_emit_movem (stack_pointer_rtx
, 0,
1349 current_frame
.fpu_no
, FP0_REG
,
1350 current_frame
.fpu_mask
, false, true);
1353 m68k_emit_movem (frame_pointer_rtx
,
1354 -(current_frame
.foffset
+ fsize
),
1355 current_frame
.fpu_no
, FP0_REG
,
1356 current_frame
.fpu_mask
, false, false);
1359 emit_insn (gen_blockage ());
1360 if (frame_pointer_needed
)
1361 emit_insn (gen_unlink (frame_pointer_rtx
));
1362 else if (fsize_with_regs
)
1363 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1365 GEN_INT (fsize_with_regs
)));
1367 if (crtl
->calls_eh_return
)
1368 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1370 EH_RETURN_STACKADJ_RTX
));
1373 emit_jump_insn (ret_rtx
);
1376 /* Return true if PARALLEL contains register REGNO. */
1378 m68k_reg_present_p (const_rtx parallel
, unsigned int regno
)
1382 if (REG_P (parallel
) && REGNO (parallel
) == regno
)
1385 if (GET_CODE (parallel
) != PARALLEL
)
1388 for (i
= 0; i
< XVECLEN (parallel
, 0); ++i
)
1392 x
= XEXP (XVECEXP (parallel
, 0, i
), 0);
1393 if (REG_P (x
) && REGNO (x
) == regno
)
1400 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1403 m68k_ok_for_sibcall_p (tree decl
, tree exp
)
1405 enum m68k_function_kind kind
;
1407 /* We cannot use sibcalls for nested functions because we use the
1408 static chain register for indirect calls. */
1409 if (CALL_EXPR_STATIC_CHAIN (exp
))
1412 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun
->decl
))))
1414 /* Check that the return value locations are the same. For
1415 example that we aren't returning a value from the sibling in
1416 a D0 register but then need to transfer it to a A0 register. */
1420 cfun_value
= FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun
->decl
)),
1422 call_value
= FUNCTION_VALUE (TREE_TYPE (exp
), decl
);
1424 /* Check that the values are equal or that the result the callee
1425 function returns is superset of what the current function returns. */
1426 if (!(rtx_equal_p (cfun_value
, call_value
)
1427 || (REG_P (cfun_value
)
1428 && m68k_reg_present_p (call_value
, REGNO (cfun_value
)))))
1432 kind
= m68k_get_function_kind (current_function_decl
);
1433 if (kind
== m68k_fk_normal_function
)
1434 /* We can always sibcall from a normal function, because it's
1435 undefined if it is calling an interrupt function. */
1438 /* Otherwise we can only sibcall if the function kind is known to be
1440 if (decl
&& m68k_get_function_kind (decl
) == kind
)
1446 /* On the m68k all args are always pushed. */
1449 m68k_function_arg (cumulative_args_t
, const function_arg_info
&)
1455 m68k_function_arg_advance (cumulative_args_t cum_v
,
1456 const function_arg_info
&arg
)
1458 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1460 *cum
+= (arg
.promoted_size_in_bytes () + 3) & ~3;
1463 /* Convert X to a legitimate function call memory reference and return the
1467 m68k_legitimize_call_address (rtx x
)
1469 gcc_assert (MEM_P (x
));
1470 if (call_operand (XEXP (x
, 0), VOIDmode
))
1472 return replace_equiv_address (x
, force_reg (Pmode
, XEXP (x
, 0)));
1475 /* Likewise for sibling calls. */
1478 m68k_legitimize_sibcall_address (rtx x
)
1480 gcc_assert (MEM_P (x
));
1481 if (sibcall_operand (XEXP (x
, 0), VOIDmode
))
1484 emit_move_insn (gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
), XEXP (x
, 0));
1485 return replace_equiv_address (x
, gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
));
1488 /* Convert X to a legitimate address and return it if successful. Otherwise
1491 For the 68000, we handle X+REG by loading X into a register R and
1492 using R+REG. R will go in an address reg and indexing will be used.
1493 However, if REG is a broken-out memory address or multiplication,
1494 nothing needs to be done because REG can certainly go in an address reg. */
1497 m68k_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
1499 if (m68k_tls_symbol_p (x
))
1500 return m68k_legitimize_tls_address (x
);
1502 if (GET_CODE (x
) == PLUS
)
1504 int ch
= (x
) != (oldx
);
1507 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1509 if (GET_CODE (XEXP (x
, 0)) == MULT
1510 || GET_CODE (XEXP (x
, 0)) == ASHIFT
)
1513 XEXP (x
, 0) = force_operand (XEXP (x
, 0), 0);
1515 if (GET_CODE (XEXP (x
, 1)) == MULT
1516 || GET_CODE (XEXP (x
, 1)) == ASHIFT
)
1519 XEXP (x
, 1) = force_operand (XEXP (x
, 1), 0);
1523 if (GET_CODE (XEXP (x
, 1)) == REG
1524 && GET_CODE (XEXP (x
, 0)) == REG
)
1526 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1529 x
= force_operand (x
, 0);
1533 if (memory_address_p (mode
, x
))
1536 if (GET_CODE (XEXP (x
, 0)) == REG
1537 || (GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
1538 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1539 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == HImode
))
1541 rtx temp
= gen_reg_rtx (Pmode
);
1542 rtx val
= force_operand (XEXP (x
, 1), 0);
1543 emit_move_insn (temp
, val
);
1546 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
1547 && GET_CODE (XEXP (x
, 0)) == REG
)
1548 x
= force_operand (x
, 0);
1550 else if (GET_CODE (XEXP (x
, 1)) == REG
1551 || (GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
1552 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == REG
1553 && GET_MODE (XEXP (XEXP (x
, 1), 0)) == HImode
))
1555 rtx temp
= gen_reg_rtx (Pmode
);
1556 rtx val
= force_operand (XEXP (x
, 0), 0);
1557 emit_move_insn (temp
, val
);
1560 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
1561 && GET_CODE (XEXP (x
, 1)) == REG
)
1562 x
= force_operand (x
, 0);
1569 /* For eliding comparisons, we remember how the flags were set.
1570 FLAGS_COMPARE_OP0 and FLAGS_COMPARE_OP1 are remembered for a direct
1571 comparison, they take priority. FLAGS_OPERAND1 and FLAGS_OPERAND2
1572 are used in more cases, they are a fallback for comparisons against
1573 zero after a move or arithmetic insn.
1574 FLAGS_VALID is set to FLAGS_VALID_NO if we should not use any of
1577 static rtx flags_compare_op0
, flags_compare_op1
;
1578 static rtx flags_operand1
, flags_operand2
;
1579 static attr_flags_valid flags_valid
= FLAGS_VALID_NO
;
1581 /* Return a code other than UNKNOWN if we can elide a CODE comparison of
1585 m68k_find_flags_value (rtx op0
, rtx op1
, rtx_code code
)
1587 if (flags_compare_op0
!= NULL_RTX
)
1589 if (rtx_equal_p (op0
, flags_compare_op0
)
1590 && rtx_equal_p (op1
, flags_compare_op1
))
1592 if (rtx_equal_p (op0
, flags_compare_op1
)
1593 && rtx_equal_p (op1
, flags_compare_op0
))
1594 return swap_condition (code
);
1598 machine_mode mode
= GET_MODE (op0
);
1599 if (op1
!= CONST0_RTX (mode
))
1601 /* Comparisons against 0 with these two should have been optimized out. */
1602 gcc_assert (code
!= LTU
&& code
!= GEU
);
1603 if (flags_valid
== FLAGS_VALID_NOOV
&& (code
== GT
|| code
== LE
))
1605 if (rtx_equal_p (flags_operand1
, op0
) || rtx_equal_p (flags_operand2
, op0
))
1606 return (FLOAT_MODE_P (mode
) ? code
1607 : code
== GE
? PLUS
: code
== LT
? MINUS
: code
);
1608 /* See if we are testing whether the high part of a DImode value is
1609 positive or negative and we have the full value as a remembered
1611 if (code
!= GE
&& code
!= LT
)
1614 && flags_operand1
!= NULL_RTX
&& GET_MODE (flags_operand1
) == DImode
1615 && REG_P (flags_operand1
) && REG_P (op0
)
1616 && hard_regno_nregs (REGNO (flags_operand1
), DImode
) == 2
1617 && REGNO (flags_operand1
) == REGNO (op0
))
1618 return code
== GE
? PLUS
: MINUS
;
1620 && flags_operand2
!= NULL_RTX
&& GET_MODE (flags_operand2
) == DImode
1621 && REG_P (flags_operand2
) && REG_P (op0
)
1622 && hard_regno_nregs (REGNO (flags_operand2
), DImode
) == 2
1623 && REGNO (flags_operand2
) == REGNO (op0
))
1624 return code
== GE
? PLUS
: MINUS
;
1628 /* Called through CC_STATUS_INIT, which is invoked by final whenever a
1629 label is encountered. */
1634 flags_compare_op0
= flags_compare_op1
= NULL_RTX
;
1635 flags_operand1
= flags_operand2
= NULL_RTX
;
1636 flags_valid
= FLAGS_VALID_NO
;
1639 /* Update flags for a move operation with OPERANDS. Called for move
1640 operations where attr_flags_valid returns "set". */
1643 handle_flags_for_move (rtx
*operands
)
1645 flags_compare_op0
= flags_compare_op1
= NULL_RTX
;
1646 if (!ADDRESS_REG_P (operands
[0]))
1648 flags_valid
= FLAGS_VALID_MOVE
;
1649 flags_operand1
= side_effects_p (operands
[0]) ? NULL_RTX
: operands
[0];
1650 if (side_effects_p (operands
[1])
1651 /* ??? For mem->mem moves, this can discard the source as a
1652 valid compare operand. If you assume aligned moves, this
1653 is unnecessary, but in theory, we could have an unaligned
1654 move overwriting parts of its source. */
1655 || modified_in_p (operands
[1], current_output_insn
))
1656 flags_operand2
= NULL_RTX
;
1658 flags_operand2
= operands
[1];
1661 if (flags_operand1
!= NULL_RTX
1662 && modified_in_p (flags_operand1
, current_output_insn
))
1663 flags_operand1
= NULL_RTX
;
1664 if (flags_operand2
!= NULL_RTX
1665 && modified_in_p (flags_operand2
, current_output_insn
))
1666 flags_operand2
= NULL_RTX
;
1669 /* Process INSN to remember flag operands if possible. */
1672 m68k_asm_final_postscan_insn (FILE *, rtx_insn
*insn
, rtx
[], int)
1674 enum attr_flags_valid v
= get_attr_flags_valid (insn
);
1675 if (v
== FLAGS_VALID_SET
)
1677 /* Comparisons use FLAGS_VALID_SET, so we can be sure we need to clear these
1679 flags_compare_op0
= flags_compare_op1
= NULL_RTX
;
1681 if (v
== FLAGS_VALID_NO
)
1683 flags_operand1
= flags_operand2
= NULL_RTX
;
1686 else if (v
== FLAGS_VALID_UNCHANGED
)
1688 if (flags_operand1
!= NULL_RTX
&& modified_in_p (flags_operand1
, insn
))
1689 flags_operand1
= NULL_RTX
;
1690 if (flags_operand2
!= NULL_RTX
&& modified_in_p (flags_operand2
, insn
))
1691 flags_operand2
= NULL_RTX
;
1696 rtx set
= single_set (insn
);
1697 rtx dest
= SET_DEST (set
);
1698 rtx src
= SET_SRC (set
);
1699 if (side_effects_p (dest
))
1704 case FLAGS_VALID_YES
:
1705 case FLAGS_VALID_NOOV
:
1706 flags_operand1
= dest
;
1707 flags_operand2
= NULL_RTX
;
1709 case FLAGS_VALID_MOVE
:
1710 /* fmoves to memory or data registers do not set the condition
1711 codes. Normal moves _do_ set the condition codes, but not in
1712 a way that is appropriate for comparison with 0, because -0.0
1713 would be treated as a negative nonzero number. Note that it
1714 isn't appropriate to conditionalize this restriction on
1715 HONOR_SIGNED_ZEROS because that macro merely indicates whether
1716 we care about the difference between -0.0 and +0.0. */
1717 if (dest
!= NULL_RTX
1720 || GET_CODE (src
) == FIX
1721 || FLOAT_MODE_P (GET_MODE (dest
))))
1722 flags_operand1
= flags_operand2
= NULL_RTX
;
1725 flags_operand1
= dest
;
1726 if (GET_MODE (src
) != VOIDmode
&& !side_effects_p (src
)
1727 && !modified_in_p (src
, insn
))
1728 flags_operand2
= src
;
1730 flags_operand2
= NULL_RTX
;
1739 /* Output a dbCC; jCC sequence. Note we do not handle the
1740 floating point version of this sequence (Fdbcc).
1741 OPERANDS are as in the two peepholes. CODE is the code
1742 returned by m68k_output_branch_<mode>. */
1745 output_dbcc_and_branch (rtx
*operands
, rtx_code code
)
1750 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands
);
1754 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands
);
1758 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands
);
1762 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands
);
1766 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands
);
1770 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands
);
1774 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands
);
1778 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands
);
1782 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands
);
1786 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands
);
1790 output_asm_insn ("dbpl %0,%l1\n\tjle %l2", operands
);
1794 output_asm_insn ("dbmi %0,%l1\n\tjle %l2", operands
);
1801 /* If the decrement is to be done in SImode, then we have
1802 to compensate for the fact that dbcc decrements in HImode. */
1803 switch (GET_MODE (operands
[0]))
1806 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands
);
1818 output_scc_di (rtx op
, rtx operand1
, rtx operand2
, rtx dest
)
1821 enum rtx_code op_code
= GET_CODE (op
);
1823 /* This does not produce a useful cc. */
1826 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1827 below. Swap the operands and change the op if these requirements
1828 are not fulfilled. */
1829 if (GET_CODE (operand2
) == REG
&& GET_CODE (operand1
) != REG
)
1833 operand1
= operand2
;
1835 op_code
= swap_condition (op_code
);
1837 loperands
[0] = operand1
;
1838 if (GET_CODE (operand1
) == REG
)
1839 loperands
[1] = gen_rtx_REG (SImode
, REGNO (operand1
) + 1);
1841 loperands
[1] = adjust_address (operand1
, SImode
, 4);
1842 if (operand2
!= const0_rtx
)
1844 loperands
[2] = operand2
;
1845 if (GET_CODE (operand2
) == REG
)
1846 loperands
[3] = gen_rtx_REG (SImode
, REGNO (operand2
) + 1);
1848 loperands
[3] = adjust_address (operand2
, SImode
, 4);
1850 loperands
[4] = gen_label_rtx ();
1851 if (operand2
!= const0_rtx
)
1852 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands
);
1855 if (TARGET_68020
|| TARGET_COLDFIRE
|| ! ADDRESS_REG_P (loperands
[0]))
1856 output_asm_insn ("tst%.l %0", loperands
);
1858 output_asm_insn ("cmp%.w #0,%0", loperands
);
1860 output_asm_insn ("jne %l4", loperands
);
1862 if (TARGET_68020
|| TARGET_COLDFIRE
|| ! ADDRESS_REG_P (loperands
[1]))
1863 output_asm_insn ("tst%.l %1", loperands
);
1865 output_asm_insn ("cmp%.w #0,%1", loperands
);
1868 loperands
[5] = dest
;
1873 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1874 CODE_LABEL_NUMBER (loperands
[4]));
1875 output_asm_insn ("seq %5", loperands
);
1879 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1880 CODE_LABEL_NUMBER (loperands
[4]));
1881 output_asm_insn ("sne %5", loperands
);
1885 loperands
[6] = gen_label_rtx ();
1886 output_asm_insn ("shi %5\n\tjra %l6", loperands
);
1887 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1888 CODE_LABEL_NUMBER (loperands
[4]));
1889 output_asm_insn ("sgt %5", loperands
);
1890 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1891 CODE_LABEL_NUMBER (loperands
[6]));
1895 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1896 CODE_LABEL_NUMBER (loperands
[4]));
1897 output_asm_insn ("shi %5", loperands
);
1901 loperands
[6] = gen_label_rtx ();
1902 output_asm_insn ("scs %5\n\tjra %l6", loperands
);
1903 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1904 CODE_LABEL_NUMBER (loperands
[4]));
1905 output_asm_insn ("slt %5", loperands
);
1906 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1907 CODE_LABEL_NUMBER (loperands
[6]));
1911 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1912 CODE_LABEL_NUMBER (loperands
[4]));
1913 output_asm_insn ("scs %5", loperands
);
1917 loperands
[6] = gen_label_rtx ();
1918 output_asm_insn ("scc %5\n\tjra %l6", loperands
);
1919 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1920 CODE_LABEL_NUMBER (loperands
[4]));
1921 output_asm_insn ("sge %5", loperands
);
1922 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1923 CODE_LABEL_NUMBER (loperands
[6]));
1927 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1928 CODE_LABEL_NUMBER (loperands
[4]));
1929 output_asm_insn ("scc %5", loperands
);
1933 loperands
[6] = gen_label_rtx ();
1934 output_asm_insn ("sls %5\n\tjra %l6", loperands
);
1935 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1936 CODE_LABEL_NUMBER (loperands
[4]));
1937 output_asm_insn ("sle %5", loperands
);
1938 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1939 CODE_LABEL_NUMBER (loperands
[6]));
1943 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1944 CODE_LABEL_NUMBER (loperands
[4]));
1945 output_asm_insn ("sls %5", loperands
);
1955 m68k_output_btst (rtx countop
, rtx dataop
, rtx_code code
, int signpos
)
1961 if (GET_CODE (countop
) == CONST_INT
)
1963 int count
= INTVAL (countop
);
1964 /* If COUNT is bigger than size of storage unit in use,
1965 advance to the containing unit of same size. */
1966 if (count
> signpos
)
1968 int offset
= (count
& ~signpos
) / 8;
1969 count
= count
& signpos
;
1970 ops
[1] = dataop
= adjust_address (dataop
, QImode
, offset
);
1973 if (code
== EQ
|| code
== NE
)
1977 output_asm_insn ("tst%.l %1", ops
);
1978 return code
== EQ
? PLUS
: MINUS
;
1982 output_asm_insn ("tst%.w %1", ops
);
1983 return code
== EQ
? PLUS
: MINUS
;
1987 output_asm_insn ("tst%.b %1", ops
);
1988 return code
== EQ
? PLUS
: MINUS
;
1991 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1992 On some m68k variants unfortunately that's slower than btst.
1993 On 68000 and higher, that should also work for all HImode operands. */
1994 if (TUNE_CPU32
|| TARGET_COLDFIRE
|| optimize_size
)
1996 if (count
== 3 && DATA_REG_P (ops
[1]) && (code
== EQ
|| code
== NE
))
1998 output_asm_insn ("move%.w %1,%%ccr", ops
);
1999 return code
== EQ
? PLUS
: MINUS
;
2001 if (count
== 2 && DATA_REG_P (ops
[1]) && (code
== EQ
|| code
== NE
))
2003 output_asm_insn ("move%.w %1,%%ccr", ops
);
2004 return code
== EQ
? NE
: EQ
;
2006 /* count == 1 followed by bvc/bvs and
2007 count == 0 followed by bcc/bcs are also possible, but need
2008 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
2011 output_asm_insn ("btst %0,%1", ops
);
2015 /* Output a bftst instruction for a zero_extract with ZXOP0, ZXOP1 and ZXOP2
2016 operands. CODE is the code of the comparison, and we return the code to
2017 be actually used in the jump. */
2020 m68k_output_bftst (rtx zxop0
, rtx zxop1
, rtx zxop2
, rtx_code code
)
2022 if (zxop1
== const1_rtx
&& GET_CODE (zxop2
) == CONST_INT
)
2024 int width
= GET_CODE (zxop0
) == REG
? 31 : 7;
2025 /* Pass 1000 as SIGNPOS argument so that btst will
2026 not think we are testing the sign bit for an `and'
2027 and assume that nonzero implies a negative result. */
2028 return m68k_output_btst (GEN_INT (width
- INTVAL (zxop2
)), zxop0
, code
, 1000);
2030 rtx ops
[3] = { zxop0
, zxop1
, zxop2
};
2031 output_asm_insn ("bftst %0{%b2:%b1}", ops
);
2035 /* Return true if X is a legitimate base register. STRICT_P says
2036 whether we need strict checking. */
2039 m68k_legitimate_base_reg_p (rtx x
, bool strict_p
)
2041 /* Allow SUBREG everywhere we allow REG. This results in better code. */
2042 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
2047 ? REGNO_OK_FOR_BASE_P (REGNO (x
))
2048 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x
))));
2051 /* Return true if X is a legitimate index register. STRICT_P says
2052 whether we need strict checking. */
2055 m68k_legitimate_index_reg_p (rtx x
, bool strict_p
)
2057 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
2062 ? REGNO_OK_FOR_INDEX_P (REGNO (x
))
2063 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x
))));
2066 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
2067 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
2068 ADDRESS if so. STRICT_P says whether we need strict checking. */
2071 m68k_decompose_index (rtx x
, bool strict_p
, struct m68k_address
*address
)
2075 /* Check for a scale factor. */
2077 if (TARGET_68020
|| TARGET_COLDFIRE
)
2079 if (GET_CODE (x
) == MULT
2080 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2081 && (INTVAL (XEXP (x
, 1)) == 2
2082 || INTVAL (XEXP (x
, 1)) == 4
2083 || (INTVAL (XEXP (x
, 1)) == 8
2084 && (TARGET_COLDFIRE_FPU
|| !TARGET_COLDFIRE
))))
2086 scale
= INTVAL (XEXP (x
, 1));
2089 /* LRA uses ASHIFT instead of MULT outside of MEM. */
2090 else if (GET_CODE (x
) == ASHIFT
2091 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2092 && (INTVAL (XEXP (x
, 1)) == 1
2093 || INTVAL (XEXP (x
, 1)) == 2
2094 || (INTVAL (XEXP (x
, 1)) == 3
2095 && (TARGET_COLDFIRE_FPU
|| !TARGET_COLDFIRE
))))
2097 scale
= 1 << INTVAL (XEXP (x
, 1));
2102 /* Check for a word extension. */
2103 if (!TARGET_COLDFIRE
2104 && GET_CODE (x
) == SIGN_EXTEND
2105 && GET_MODE (XEXP (x
, 0)) == HImode
)
2108 if (m68k_legitimate_index_reg_p (x
, strict_p
))
2110 address
->scale
= scale
;
2118 /* Return true if X is an illegitimate symbolic constant. */
2121 m68k_illegitimate_symbolic_constant_p (rtx x
)
2125 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
)
2127 split_const (x
, &base
, &offset
);
2128 if (GET_CODE (base
) == SYMBOL_REF
2129 && !offset_within_block_p (base
, INTVAL (offset
)))
2132 return m68k_tls_reference_p (x
, false);
2135 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2138 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2140 return m68k_illegitimate_symbolic_constant_p (x
);
2143 /* Return true if X is a legitimate constant address that can reach
2144 bytes in the range [X, X + REACH). STRICT_P says whether we need
2148 m68k_legitimate_constant_address_p (rtx x
, unsigned int reach
, bool strict_p
)
2152 if (!CONSTANT_ADDRESS_P (x
))
2156 && !(strict_p
&& TARGET_PCREL
)
2157 && symbolic_operand (x
, VOIDmode
))
2160 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
&& reach
> 1)
2162 split_const (x
, &base
, &offset
);
2163 if (GET_CODE (base
) == SYMBOL_REF
2164 && !offset_within_block_p (base
, INTVAL (offset
) + reach
- 1))
2168 return !m68k_tls_reference_p (x
, false);
2171 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
2172 labels will become jump tables. */
2175 m68k_jump_table_ref_p (rtx x
)
2177 if (GET_CODE (x
) != LABEL_REF
)
2180 rtx_insn
*insn
= as_a
<rtx_insn
*> (XEXP (x
, 0));
2181 if (!NEXT_INSN (insn
) && !PREV_INSN (insn
))
2184 insn
= next_nonnote_insn (insn
);
2185 return insn
&& JUMP_TABLE_DATA_P (insn
);
2188 /* Return true if X is a legitimate address for values of mode MODE.
2189 STRICT_P says whether strict checking is needed. If the address
2190 is valid, describe its components in *ADDRESS. */
2193 m68k_decompose_address (machine_mode mode
, rtx x
,
2194 bool strict_p
, struct m68k_address
*address
)
2198 memset (address
, 0, sizeof (*address
));
2200 if (mode
== BLKmode
)
2203 reach
= GET_MODE_SIZE (mode
);
2205 /* Check for (An) (mode 2). */
2206 if (m68k_legitimate_base_reg_p (x
, strict_p
))
2212 /* Check for -(An) and (An)+ (modes 3 and 4). */
2213 if ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_INC
)
2214 && m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
))
2216 address
->code
= GET_CODE (x
);
2217 address
->base
= XEXP (x
, 0);
2221 /* Check for (d16,An) (mode 5). */
2222 if (GET_CODE (x
) == PLUS
2223 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2224 && IN_RANGE (INTVAL (XEXP (x
, 1)), -0x8000, 0x8000 - reach
)
2225 && m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
))
2227 address
->base
= XEXP (x
, 0);
2228 address
->offset
= XEXP (x
, 1);
2232 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2233 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2235 if (GET_CODE (x
) == PLUS
2236 && XEXP (x
, 0) == pic_offset_table_rtx
)
2238 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2239 they are invalid in this context. */
2240 if (m68k_unwrap_symbol (XEXP (x
, 1), false) != XEXP (x
, 1))
2242 address
->base
= XEXP (x
, 0);
2243 address
->offset
= XEXP (x
, 1);
2248 /* The ColdFire FPU only accepts addressing modes 2-5. */
2249 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2252 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2253 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2254 All these modes are variations of mode 7. */
2255 if (m68k_legitimate_constant_address_p (x
, reach
, strict_p
))
2257 address
->offset
= x
;
2261 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2264 ??? do_tablejump creates these addresses before placing the target
2265 label, so we have to assume that unplaced labels are jump table
2266 references. It seems unlikely that we would ever generate indexed
2267 accesses to unplaced labels in other cases. Do not accept it in
2268 PIC mode, since the label address will need to be loaded from memory. */
2269 if (GET_CODE (x
) == PLUS
2271 && m68k_jump_table_ref_p (XEXP (x
, 1))
2272 && m68k_decompose_index (XEXP (x
, 0), strict_p
, address
))
2274 address
->offset
= XEXP (x
, 1);
2278 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2279 (bd,An,Xn.SIZE*SCALE) addresses. */
2283 /* Check for a nonzero base displacement. */
2284 if (GET_CODE (x
) == PLUS
2285 && m68k_legitimate_constant_address_p (XEXP (x
, 1), reach
, strict_p
))
2287 address
->offset
= XEXP (x
, 1);
2291 /* Check for a suppressed index register. */
2292 if (m68k_legitimate_base_reg_p (x
, strict_p
))
2298 /* Check for a suppressed base register. Do not allow this case
2299 for non-symbolic offsets as it effectively gives gcc freedom
2300 to treat data registers as base registers, which can generate
2303 && symbolic_operand (address
->offset
, VOIDmode
)
2304 && m68k_decompose_index (x
, strict_p
, address
))
2309 /* Check for a nonzero base displacement. */
2310 if (GET_CODE (x
) == PLUS
2311 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2312 && IN_RANGE (INTVAL (XEXP (x
, 1)), -0x80, 0x80 - reach
))
2314 address
->offset
= XEXP (x
, 1);
2319 /* We now expect the sum of a base and an index. */
2320 if (GET_CODE (x
) == PLUS
)
2322 if (m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
)
2323 && m68k_decompose_index (XEXP (x
, 1), strict_p
, address
))
2325 address
->base
= XEXP (x
, 0);
2329 if (m68k_legitimate_base_reg_p (XEXP (x
, 1), strict_p
)
2330 && m68k_decompose_index (XEXP (x
, 0), strict_p
, address
))
2332 address
->base
= XEXP (x
, 1);
2339 /* Return true if X is a legitimate address for values of mode MODE.
2340 STRICT_P says whether strict checking is needed. */
2343 m68k_legitimate_address_p (machine_mode mode
, rtx x
, bool strict_p
, code_helper
)
2345 struct m68k_address address
;
2347 return m68k_decompose_address (mode
, x
, strict_p
, &address
);
2350 /* Return true if X is a memory, describing its address in ADDRESS if so.
2351 Apply strict checking if called during or after reload. */
2354 m68k_legitimate_mem_p (rtx x
, struct m68k_address
*address
)
2357 && m68k_decompose_address (GET_MODE (x
), XEXP (x
, 0),
2358 (reload_in_progress
|| lra_in_progress
2359 || reload_completed
),
2363 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2366 m68k_legitimate_constant_p (machine_mode mode
, rtx x
)
2368 return mode
!= XFmode
&& !m68k_illegitimate_symbolic_constant_p (x
);
2371 /* Return true if X matches the 'Q' constraint. It must be a memory
2372 with a base address and no constant offset or index. */
2375 m68k_matches_q_p (rtx x
)
2377 struct m68k_address address
;
2379 return (m68k_legitimate_mem_p (x
, &address
)
2380 && address
.code
== UNKNOWN
2386 /* Return true if X matches the 'U' constraint. It must be a base address
2387 with a constant offset and no index. */
2390 m68k_matches_u_p (rtx x
)
2392 struct m68k_address address
;
2394 return (m68k_legitimate_mem_p (x
, &address
)
2395 && address
.code
== UNKNOWN
2401 /* Return GOT pointer. */
2406 if (pic_offset_table_rtx
== NULL_RTX
)
2407 pic_offset_table_rtx
= gen_rtx_REG (Pmode
, PIC_REG
);
2409 crtl
->uses_pic_offset_table
= 1;
2411 return pic_offset_table_rtx
;
2414 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2416 enum m68k_reloc
{ RELOC_GOT
, RELOC_TLSGD
, RELOC_TLSLDM
, RELOC_TLSLDO
,
2417 RELOC_TLSIE
, RELOC_TLSLE
};
2419 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2421 /* Wrap symbol X into unspec representing relocation RELOC.
2422 BASE_REG - register that should be added to the result.
2423 TEMP_REG - if non-null, temporary register. */
2426 m68k_wrap_symbol (rtx x
, enum m68k_reloc reloc
, rtx base_reg
, rtx temp_reg
)
2430 use_x_p
= (base_reg
== pic_offset_table_rtx
) ? TARGET_XGOT
: TARGET_XTLS
;
2432 if (TARGET_COLDFIRE
&& use_x_p
)
2433 /* When compiling with -mx{got, tls} switch the code will look like this:
2435 move.l <X>@<RELOC>,<TEMP_REG>
2436 add.l <BASE_REG>,<TEMP_REG> */
2438 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2439 to put @RELOC after reference. */
2440 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (reloc
)),
2442 x
= gen_rtx_CONST (Pmode
, x
);
2444 if (temp_reg
== NULL
)
2446 gcc_assert (can_create_pseudo_p ());
2447 temp_reg
= gen_reg_rtx (Pmode
);
2450 emit_move_insn (temp_reg
, x
);
2451 emit_insn (gen_addsi3 (temp_reg
, temp_reg
, base_reg
));
2456 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (reloc
)),
2458 x
= gen_rtx_CONST (Pmode
, x
);
2460 x
= gen_rtx_PLUS (Pmode
, base_reg
, x
);
2466 /* Helper for m68k_unwrap_symbol.
2467 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2468 sets *RELOC_PTR to relocation type for the symbol. */
2471 m68k_unwrap_symbol_1 (rtx orig
, bool unwrap_reloc32_p
,
2472 enum m68k_reloc
*reloc_ptr
)
2474 if (GET_CODE (orig
) == CONST
)
2477 enum m68k_reloc dummy
;
2481 if (reloc_ptr
== NULL
)
2484 /* Handle an addend. */
2485 if ((GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
)
2486 && CONST_INT_P (XEXP (x
, 1)))
2489 if (GET_CODE (x
) == UNSPEC
)
2491 switch (XINT (x
, 1))
2493 case UNSPEC_RELOC16
:
2494 orig
= XVECEXP (x
, 0, 0);
2495 *reloc_ptr
= (enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1));
2498 case UNSPEC_RELOC32
:
2499 if (unwrap_reloc32_p
)
2501 orig
= XVECEXP (x
, 0, 0);
2502 *reloc_ptr
= (enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1));
2515 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2516 UNSPEC_RELOC32 wrappers. */
2519 m68k_unwrap_symbol (rtx orig
, bool unwrap_reloc32_p
)
2521 return m68k_unwrap_symbol_1 (orig
, unwrap_reloc32_p
, NULL
);
2524 /* Adjust decorated address operand before outputing assembler for it. */
2527 m68k_adjust_decorated_operand (rtx op
)
2529 /* Combine and, possibly, other optimizations may do good job
2531 (const (unspec [(symbol)]))
2533 (const (plus (unspec [(symbol)])
2535 The problem with this is emitting @TLS or @GOT decorations.
2536 The decoration is emitted when processing (unspec), so the
2537 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2539 It seems that the easiest solution to this is to convert such
2541 (const (unspec [(plus (symbol)
2543 Note, that the top level of operand remains intact, so we don't have
2544 to patch up anything outside of the operand. */
2546 subrtx_var_iterator::array_type array
;
2547 FOR_EACH_SUBRTX_VAR (iter
, array
, op
, ALL
)
2550 if (m68k_unwrap_symbol (x
, true) != x
)
2554 gcc_assert (GET_CODE (x
) == CONST
);
2557 if (GET_CODE (plus
) == PLUS
|| GET_CODE (plus
) == MINUS
)
2562 unspec
= XEXP (plus
, 0);
2563 gcc_assert (GET_CODE (unspec
) == UNSPEC
);
2564 addend
= XEXP (plus
, 1);
2565 gcc_assert (CONST_INT_P (addend
));
2567 /* We now have all the pieces, rearrange them. */
2569 /* Move symbol to plus. */
2570 XEXP (plus
, 0) = XVECEXP (unspec
, 0, 0);
2572 /* Move plus inside unspec. */
2573 XVECEXP (unspec
, 0, 0) = plus
;
2575 /* Move unspec to top level of const. */
2576 XEXP (x
, 0) = unspec
;
2578 iter
.skip_subrtxes ();
2583 /* Prescan insn before outputing assembler for it. */
2586 m68k_final_prescan_insn (rtx_insn
*insn ATTRIBUTE_UNUSED
,
2587 rtx
*operands
, int n_operands
)
2591 for (i
= 0; i
< n_operands
; ++i
)
2592 m68k_adjust_decorated_operand (operands
[i
]);
2595 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2596 If REG is non-null, use it; generate new pseudo otherwise. */
2599 m68k_move_to_reg (rtx x
, rtx orig
, rtx reg
)
2603 if (reg
== NULL_RTX
)
2605 gcc_assert (can_create_pseudo_p ());
2606 reg
= gen_reg_rtx (Pmode
);
2609 insn
= emit_move_insn (reg
, x
);
2610 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2612 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
2617 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2621 m68k_wrap_symbol_into_got_ref (rtx x
, enum m68k_reloc reloc
, rtx temp_reg
)
2623 x
= m68k_wrap_symbol (x
, reloc
, m68k_get_gp (), temp_reg
);
2625 x
= gen_rtx_MEM (Pmode
, x
);
2626 MEM_READONLY_P (x
) = 1;
2631 /* Legitimize PIC addresses. If the address is already
2632 position-independent, we return ORIG. Newly generated
2633 position-independent addresses go to REG. If we need more
2634 than one register, we lose.
2636 An address is legitimized by making an indirect reference
2637 through the Global Offset Table with the name of the symbol
2640 The assembler and linker are responsible for placing the
2641 address of the symbol in the GOT. The function prologue
2642 is responsible for initializing a5 to the starting address
2645 The assembler is also responsible for translating a symbol name
2646 into a constant displacement from the start of the GOT.
2648 A quick example may make things a little clearer:
2650 When not generating PIC code to store the value 12345 into _foo
2651 we would generate the following code:
2655 When generating PIC two transformations are made. First, the compiler
2656 loads the address of foo into a register. So the first transformation makes:
2661 The code in movsi will intercept the lea instruction and call this
2662 routine which will transform the instructions into:
2664 movel a5@(_foo:w), a0
2668 That (in a nutshell) is how *all* symbol and label references are
2672 legitimize_pic_address (rtx orig
, machine_mode mode ATTRIBUTE_UNUSED
,
2677 /* First handle a simple SYMBOL_REF or LABEL_REF */
2678 if (GET_CODE (orig
) == SYMBOL_REF
|| GET_CODE (orig
) == LABEL_REF
)
2682 pic_ref
= m68k_wrap_symbol_into_got_ref (orig
, RELOC_GOT
, reg
);
2683 pic_ref
= m68k_move_to_reg (pic_ref
, orig
, reg
);
2685 else if (GET_CODE (orig
) == CONST
)
2689 /* Make sure this has not already been legitimized. */
2690 if (m68k_unwrap_symbol (orig
, true) != orig
)
2695 /* legitimize both operands of the PLUS */
2696 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
2698 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
2699 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
2700 base
== reg
? 0 : reg
);
2702 if (GET_CODE (orig
) == CONST_INT
)
2703 pic_ref
= plus_constant (Pmode
, base
, INTVAL (orig
));
2705 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
2711 /* The __tls_get_addr symbol. */
2712 static GTY(()) rtx m68k_tls_get_addr
;
2714 /* Return SYMBOL_REF for __tls_get_addr. */
2717 m68k_get_tls_get_addr (void)
2719 if (m68k_tls_get_addr
== NULL_RTX
)
2720 m68k_tls_get_addr
= init_one_libfunc ("__tls_get_addr");
2722 return m68k_tls_get_addr
;
2725 /* Return libcall result in A0 instead of usual D0. */
2726 static bool m68k_libcall_value_in_a0_p
= false;
2728 /* Emit instruction sequence that calls __tls_get_addr. X is
2729 the TLS symbol we are referencing and RELOC is the symbol type to use
2730 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2731 emitted. A pseudo register with result of __tls_get_addr call is
2735 m68k_call_tls_get_addr (rtx x
, rtx eqv
, enum m68k_reloc reloc
)
2741 /* Emit the call sequence. */
2744 /* FIXME: Unfortunately, emit_library_call_value does not
2745 consider (plus (%a5) (const (unspec))) to be a good enough
2746 operand for push, so it forces it into a register. The bad
2747 thing about this is that combiner, due to copy propagation and other
2748 optimizations, sometimes cannot later fix this. As a consequence,
2749 additional register may be allocated resulting in a spill.
2750 For reference, see args processing loops in
2751 calls.cc:emit_library_call_value_1.
2752 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2753 x
= m68k_wrap_symbol (x
, reloc
, m68k_get_gp (), NULL_RTX
);
2755 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2756 is the simpliest way of generating a call. The difference between
2757 __tls_get_addr() and libcall is that the result is returned in D0
2758 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2759 which temporarily switches returning the result to A0. */
2761 m68k_libcall_value_in_a0_p
= true;
2762 a0
= emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX
, LCT_PURE
,
2764 m68k_libcall_value_in_a0_p
= false;
2766 insns
= get_insns ();
2769 gcc_assert (can_create_pseudo_p ());
2770 dest
= gen_reg_rtx (Pmode
);
2771 emit_libcall_block (insns
, dest
, a0
, eqv
);
2776 /* The __tls_get_addr symbol. */
2777 static GTY(()) rtx m68k_read_tp
;
2779 /* Return SYMBOL_REF for __m68k_read_tp. */
2782 m68k_get_m68k_read_tp (void)
2784 if (m68k_read_tp
== NULL_RTX
)
2785 m68k_read_tp
= init_one_libfunc ("__m68k_read_tp");
2787 return m68k_read_tp
;
2790 /* Emit instruction sequence that calls __m68k_read_tp.
2791 A pseudo register with result of __m68k_read_tp call is returned. */
2794 m68k_call_m68k_read_tp (void)
2803 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2804 is the simpliest way of generating a call. The difference between
2805 __m68k_read_tp() and libcall is that the result is returned in D0
2806 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2807 which temporarily switches returning the result to A0. */
2809 /* Emit the call sequence. */
2810 m68k_libcall_value_in_a0_p
= true;
2811 a0
= emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX
, LCT_PURE
,
2813 m68k_libcall_value_in_a0_p
= false;
2814 insns
= get_insns ();
2817 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2818 share the m68k_read_tp result with other IE/LE model accesses. */
2819 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const1_rtx
), UNSPEC_RELOC32
);
2821 gcc_assert (can_create_pseudo_p ());
2822 dest
= gen_reg_rtx (Pmode
);
2823 emit_libcall_block (insns
, dest
, a0
, eqv
);
2828 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2829 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2833 m68k_legitimize_tls_address (rtx orig
)
2835 switch (SYMBOL_REF_TLS_MODEL (orig
))
2837 case TLS_MODEL_GLOBAL_DYNAMIC
:
2838 orig
= m68k_call_tls_get_addr (orig
, orig
, RELOC_TLSGD
);
2841 case TLS_MODEL_LOCAL_DYNAMIC
:
2847 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2848 share the LDM result with other LD model accesses. */
2849 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2852 a0
= m68k_call_tls_get_addr (orig
, eqv
, RELOC_TLSLDM
);
2854 x
= m68k_wrap_symbol (orig
, RELOC_TLSLDO
, a0
, NULL_RTX
);
2856 if (can_create_pseudo_p ())
2857 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2863 case TLS_MODEL_INITIAL_EXEC
:
2868 a0
= m68k_call_m68k_read_tp ();
2870 x
= m68k_wrap_symbol_into_got_ref (orig
, RELOC_TLSIE
, NULL_RTX
);
2871 x
= gen_rtx_PLUS (Pmode
, x
, a0
);
2873 if (can_create_pseudo_p ())
2874 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2880 case TLS_MODEL_LOCAL_EXEC
:
2885 a0
= m68k_call_m68k_read_tp ();
2887 x
= m68k_wrap_symbol (orig
, RELOC_TLSLE
, a0
, NULL_RTX
);
2889 if (can_create_pseudo_p ())
2890 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2903 /* Return true if X is a TLS symbol. */
2906 m68k_tls_symbol_p (rtx x
)
2908 if (!TARGET_HAVE_TLS
)
2911 if (GET_CODE (x
) != SYMBOL_REF
)
2914 return SYMBOL_REF_TLS_MODEL (x
) != 0;
2917 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2918 though illegitimate one.
2919 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2922 m68k_tls_reference_p (rtx x
, bool legitimate_p
)
2924 if (!TARGET_HAVE_TLS
)
2929 subrtx_var_iterator::array_type array
;
2930 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, ALL
)
2934 /* Note: this is not the same as m68k_tls_symbol_p. */
2935 if (GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0)
2938 /* Don't recurse into legitimate TLS references. */
2939 if (m68k_tls_reference_p (x
, true))
2940 iter
.skip_subrtxes ();
2946 enum m68k_reloc reloc
= RELOC_GOT
;
2948 return (m68k_unwrap_symbol_1 (x
, true, &reloc
) != x
2949 && TLS_RELOC_P (reloc
));
2955 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2957 /* Return the type of move that should be used for integer I. */
2960 m68k_const_method (HOST_WIDE_INT i
)
2967 /* The ColdFire doesn't have byte or word operations. */
2968 /* FIXME: This may not be useful for the m68060 either. */
2969 if (!TARGET_COLDFIRE
)
2971 /* if -256 < N < 256 but N is not in range for a moveq
2972 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2973 if (USE_MOVQ (i
^ 0xff))
2975 /* Likewise, try with not.w */
2976 if (USE_MOVQ (i
^ 0xffff))
2978 /* This is the only value where neg.w is useful */
2983 /* Try also with swap. */
2985 if (USE_MOVQ ((u
>> 16) | (u
<< 16)))
2990 /* Try using MVZ/MVS with an immediate value to load constants. */
2991 if (i
>= 0 && i
<= 65535)
2993 if (i
>= -32768 && i
<= 32767)
2997 /* Otherwise, use move.l */
3001 /* Return the cost of moving constant I into a data register. */
3004 const_int_cost (HOST_WIDE_INT i
)
3006 switch (m68k_const_method (i
))
3009 /* Constants between -128 and 127 are cheap due to moveq. */
3017 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
3027 m68k_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
3028 int opno ATTRIBUTE_UNUSED
,
3029 int *total
, bool speed ATTRIBUTE_UNUSED
)
3031 int code
= GET_CODE (x
);
3036 /* Constant zero is super cheap due to clr instruction. */
3037 if (x
== const0_rtx
)
3040 *total
= const_int_cost (INTVAL (x
));
3050 /* Make 0.0 cheaper than other floating constants to
3051 encourage creating tstsf and tstdf insns. */
3052 if ((GET_RTX_CLASS (outer_code
) == RTX_COMPARE
3053 || GET_RTX_CLASS (outer_code
) == RTX_COMM_COMPARE
)
3054 && (x
== CONST0_RTX (SFmode
) || x
== CONST0_RTX (DFmode
)))
3060 /* These are vaguely right for a 68020. */
3061 /* The costs for long multiply have been adjusted to work properly
3062 in synth_mult on the 68020, relative to an average of the time
3063 for add and the time for shift, taking away a little more because
3064 sometimes move insns are needed. */
3065 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
3070 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
3071 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
3073 : TARGET_COLDFIRE ? 3 : 13)
3078 : TUNE_68000_10 ? 5 \
3079 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
3080 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
3082 : TARGET_COLDFIRE ? 2 : 8)
3085 (TARGET_CF_HWDIV ? 11 \
3086 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3089 /* An lea costs about three times as much as a simple add. */
3091 && GET_CODE (XEXP (x
, 1)) == REG
3092 && ((GET_CODE (XEXP (x
, 0)) == MULT
3093 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
3094 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3095 && (INTVAL (XEXP (XEXP (x
, 0), 1)) == 2
3096 || INTVAL (XEXP (XEXP (x
, 0), 1)) == 4
3097 || INTVAL (XEXP (XEXP (x
, 0), 1)) == 8))
3098 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
3099 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
3100 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3101 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (XEXP (x
, 0), 1))
3104 /* lea an@(dx:l:i),am */
3105 *total
= COSTS_N_INSNS (TARGET_COLDFIRE
? 2 : 3);
3115 *total
= COSTS_N_INSNS(1);
3120 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
3122 if (INTVAL (XEXP (x
, 1)) < 16)
3123 *total
= COSTS_N_INSNS (2) + INTVAL (XEXP (x
, 1)) / 2;
3125 /* We're using clrw + swap for these cases. */
3126 *total
= COSTS_N_INSNS (4) + (INTVAL (XEXP (x
, 1)) - 16) / 2;
3129 *total
= COSTS_N_INSNS (10); /* Worst case. */
3132 /* A shift by a big integer takes an extra instruction. */
3133 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
3134 && (INTVAL (XEXP (x
, 1)) == 16))
3136 *total
= COSTS_N_INSNS (2); /* clrw;swap */
3139 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
3140 && !(INTVAL (XEXP (x
, 1)) > 0
3141 && INTVAL (XEXP (x
, 1)) <= 8))
3143 *total
= COSTS_N_INSNS (TARGET_COLDFIRE
? 1 : 3); /* lsr #i,dn */
3149 if ((GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
3150 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
3152 *total
= COSTS_N_INSNS (MULW_COST
);
3153 else if (mode
== QImode
|| mode
== HImode
)
3154 *total
= COSTS_N_INSNS (MULW_COST
);
3156 *total
= COSTS_N_INSNS (MULL_COST
);
3163 if (mode
== QImode
|| mode
== HImode
)
3164 *total
= COSTS_N_INSNS (DIVW_COST
); /* div.w */
3165 else if (TARGET_CF_HWDIV
)
3166 *total
= COSTS_N_INSNS (18);
3168 *total
= COSTS_N_INSNS (43); /* div.l */
3172 if (GET_RTX_CLASS (outer_code
) == RTX_COMPARE
3173 || GET_RTX_CLASS (outer_code
) == RTX_COMM_COMPARE
)
3182 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
3186 output_move_const_into_data_reg (rtx
*operands
)
3190 i
= INTVAL (operands
[1]);
3191 switch (m68k_const_method (i
))
3194 return "mvzw %1,%0";
3196 return "mvsw %1,%0";
3198 return "moveq %1,%0";
3201 operands
[1] = GEN_INT (i
^ 0xff);
3202 return "moveq %1,%0\n\tnot%.b %0";
3205 operands
[1] = GEN_INT (i
^ 0xffff);
3206 return "moveq %1,%0\n\tnot%.w %0";
3209 return "moveq #-128,%0\n\tneg%.w %0";
3214 operands
[1] = GEN_INT ((u
<< 16) | (u
>> 16));
3215 return "moveq %1,%0\n\tswap %0";
3218 return "move%.l %1,%0";
3224 /* Return true if I can be handled by ISA B's mov3q instruction. */
3227 valid_mov3q_const (HOST_WIDE_INT i
)
3229 return TARGET_ISAB
&& (i
== -1 || IN_RANGE (i
, 1, 7));
3232 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3233 I is the value of OPERANDS[1]. */
3236 output_move_simode_const (rtx
*operands
)
3242 src
= INTVAL (operands
[1]);
3244 && (DATA_REG_P (dest
) || MEM_P (dest
))
3245 /* clr insns on 68000 read before writing. */
3246 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3247 || !(MEM_P (dest
) && MEM_VOLATILE_P (dest
))))
3249 else if (GET_MODE (dest
) == SImode
&& valid_mov3q_const (src
))
3250 return "mov3q%.l %1,%0";
3251 else if (src
== 0 && ADDRESS_REG_P (dest
))
3252 return "sub%.l %0,%0";
3253 else if (DATA_REG_P (dest
))
3254 return output_move_const_into_data_reg (operands
);
3255 else if (ADDRESS_REG_P (dest
) && IN_RANGE (src
, -0x8000, 0x7fff))
3257 if (valid_mov3q_const (src
))
3258 return "mov3q%.l %1,%0";
3259 return "move%.w %1,%0";
3261 else if (MEM_P (dest
)
3262 && GET_CODE (XEXP (dest
, 0)) == PRE_DEC
3263 && REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
3264 && IN_RANGE (src
, -0x8000, 0x7fff))
3266 if (valid_mov3q_const (src
))
3267 return "mov3q%.l %1,%-";
3270 return "move%.l %1,%0";
3274 output_move_simode (rtx
*operands
)
3276 handle_flags_for_move (operands
);
3278 if (GET_CODE (operands
[1]) == CONST_INT
)
3279 return output_move_simode_const (operands
);
3280 else if ((GET_CODE (operands
[1]) == SYMBOL_REF
3281 || GET_CODE (operands
[1]) == CONST
)
3282 && push_operand (operands
[0], SImode
))
3284 else if ((GET_CODE (operands
[1]) == SYMBOL_REF
3285 || GET_CODE (operands
[1]) == CONST
)
3286 && ADDRESS_REG_P (operands
[0]))
3287 return "lea %a1,%0";
3288 return "move%.l %1,%0";
3292 output_move_himode (rtx
*operands
)
3294 if (GET_CODE (operands
[1]) == CONST_INT
)
3296 if (operands
[1] == const0_rtx
3297 && (DATA_REG_P (operands
[0])
3298 || GET_CODE (operands
[0]) == MEM
)
3299 /* clr insns on 68000 read before writing. */
3300 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3301 || !(GET_CODE (operands
[0]) == MEM
3302 && MEM_VOLATILE_P (operands
[0]))))
3304 else if (operands
[1] == const0_rtx
3305 && ADDRESS_REG_P (operands
[0]))
3306 return "sub%.l %0,%0";
3307 else if (DATA_REG_P (operands
[0])
3308 && INTVAL (operands
[1]) < 128
3309 && INTVAL (operands
[1]) >= -128)
3310 return "moveq %1,%0";
3311 else if (INTVAL (operands
[1]) < 0x8000
3312 && INTVAL (operands
[1]) >= -0x8000)
3313 return "move%.w %1,%0";
3315 else if (CONSTANT_P (operands
[1]))
3317 return "move%.w %1,%0";
3321 output_move_qimode (rtx
*operands
)
3323 handle_flags_for_move (operands
);
3325 /* 68k family always modifies the stack pointer by at least 2, even for
3326 byte pushes. The 5200 (ColdFire) does not do this. */
3328 /* This case is generated by pushqi1 pattern now. */
3329 gcc_assert (!(GET_CODE (operands
[0]) == MEM
3330 && GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
3331 && XEXP (XEXP (operands
[0], 0), 0) == stack_pointer_rtx
3332 && ! ADDRESS_REG_P (operands
[1])
3333 && ! TARGET_COLDFIRE
));
3335 /* clr and st insns on 68000 read before writing. */
3336 if (!ADDRESS_REG_P (operands
[0])
3337 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3338 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3340 if (operands
[1] == const0_rtx
)
3342 if ((!TARGET_COLDFIRE
|| DATA_REG_P (operands
[0]))
3343 && GET_CODE (operands
[1]) == CONST_INT
3344 && (INTVAL (operands
[1]) & 255) == 255)
3350 if (GET_CODE (operands
[1]) == CONST_INT
3351 && DATA_REG_P (operands
[0])
3352 && INTVAL (operands
[1]) < 128
3353 && INTVAL (operands
[1]) >= -128)
3354 return "moveq %1,%0";
3355 if (operands
[1] == const0_rtx
&& ADDRESS_REG_P (operands
[0]))
3356 return "sub%.l %0,%0";
3357 if (GET_CODE (operands
[1]) != CONST_INT
&& CONSTANT_P (operands
[1]))
3359 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3360 from address registers. */
3361 if (ADDRESS_REG_P (operands
[0]) || ADDRESS_REG_P (operands
[1]))
3363 if (ADDRESS_REG_P (operands
[1]))
3365 return "move%.w %1,%0";
3367 return "move%.b %1,%0";
3371 output_move_stricthi (rtx
*operands
)
3373 if (operands
[1] == const0_rtx
3374 /* clr insns on 68000 read before writing. */
3375 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3376 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3378 return "move%.w %1,%0";
3382 output_move_strictqi (rtx
*operands
)
3384 if (operands
[1] == const0_rtx
3385 /* clr insns on 68000 read before writing. */
3386 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3387 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3389 return "move%.b %1,%0";
3392 /* Return the best assembler insn template
3393 for moving operands[1] into operands[0] as a fullword. */
3396 singlemove_string (rtx
*operands
)
3398 if (GET_CODE (operands
[1]) == CONST_INT
)
3399 return output_move_simode_const (operands
);
3400 return "move%.l %1,%0";
3404 /* Output assembler or rtl code to perform a doubleword move insn
3405 with operands OPERANDS.
3406 Pointers to 3 helper functions should be specified:
3407 HANDLE_REG_ADJUST to adjust a register by a small value,
3408 HANDLE_COMPADR to compute an address and
3409 HANDLE_MOVSI to move 4 bytes. */
3412 handle_move_double (rtx operands
[2],
3413 void (*handle_reg_adjust
) (rtx
, int),
3414 void (*handle_compadr
) (rtx
[2]),
3415 void (*handle_movsi
) (rtx
[2]))
3419 REGOP
, OFFSOP
, MEMOP
, PUSHOP
, POPOP
, CNSTOP
, RNDOP
3424 rtx addreg0
= 0, addreg1
= 0;
3425 int dest_overlapped_low
= 0;
3426 int size
= GET_MODE_SIZE (GET_MODE (operands
[0]));
3431 /* First classify both operands. */
3433 if (REG_P (operands
[0]))
3435 else if (offsettable_memref_p (operands
[0]))
3437 else if (GET_CODE (XEXP (operands
[0], 0)) == POST_INC
)
3439 else if (GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
)
3441 else if (GET_CODE (operands
[0]) == MEM
)
3446 if (REG_P (operands
[1]))
3448 else if (CONSTANT_P (operands
[1]))
3450 else if (offsettable_memref_p (operands
[1]))
3452 else if (GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3454 else if (GET_CODE (XEXP (operands
[1], 0)) == PRE_DEC
)
3456 else if (GET_CODE (operands
[1]) == MEM
)
3461 /* Check for the cases that the operand constraints are not supposed
3462 to allow to happen. Generating code for these cases is
3464 gcc_assert (optype0
!= RNDOP
&& optype1
!= RNDOP
);
3466 /* If one operand is decrementing and one is incrementing
3467 decrement the former register explicitly
3468 and change that operand into ordinary indexing. */
3470 if (optype0
== PUSHOP
&& optype1
== POPOP
)
3472 operands
[0] = XEXP (XEXP (operands
[0], 0), 0);
3474 handle_reg_adjust (operands
[0], -size
);
3476 if (GET_MODE (operands
[1]) == XFmode
)
3477 operands
[0] = gen_rtx_MEM (XFmode
, operands
[0]);
3478 else if (GET_MODE (operands
[0]) == DFmode
)
3479 operands
[0] = gen_rtx_MEM (DFmode
, operands
[0]);
3481 operands
[0] = gen_rtx_MEM (DImode
, operands
[0]);
3484 if (optype0
== POPOP
&& optype1
== PUSHOP
)
3486 operands
[1] = XEXP (XEXP (operands
[1], 0), 0);
3488 handle_reg_adjust (operands
[1], -size
);
3490 if (GET_MODE (operands
[1]) == XFmode
)
3491 operands
[1] = gen_rtx_MEM (XFmode
, operands
[1]);
3492 else if (GET_MODE (operands
[1]) == DFmode
)
3493 operands
[1] = gen_rtx_MEM (DFmode
, operands
[1]);
3495 operands
[1] = gen_rtx_MEM (DImode
, operands
[1]);
3499 /* If an operand is an unoffsettable memory ref, find a register
3500 we can increment temporarily to make it refer to the second word. */
3502 if (optype0
== MEMOP
)
3503 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
3505 if (optype1
== MEMOP
)
3506 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
3508 /* Ok, we can do one word at a time.
3509 Normally we do the low-numbered word first,
3510 but if either operand is autodecrementing then we
3511 do the high-numbered word first.
3513 In either case, set up in LATEHALF the operands to use
3514 for the high-numbered word and in some cases alter the
3515 operands in OPERANDS to be suitable for the low-numbered word. */
3519 if (optype0
== REGOP
)
3521 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 2);
3522 middlehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
3524 else if (optype0
== OFFSOP
)
3526 middlehalf
[0] = adjust_address (operands
[0], SImode
, 4);
3527 latehalf
[0] = adjust_address (operands
[0], SImode
, size
- 4);
3531 middlehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3532 latehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3535 if (optype1
== REGOP
)
3537 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 2);
3538 middlehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
3540 else if (optype1
== OFFSOP
)
3542 middlehalf
[1] = adjust_address (operands
[1], SImode
, 4);
3543 latehalf
[1] = adjust_address (operands
[1], SImode
, size
- 4);
3545 else if (optype1
== CNSTOP
)
3547 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
3551 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3552 (*CONST_DOUBLE_REAL_VALUE (operands
[1]), l
);
3553 operands
[1] = GEN_INT (l
[0]);
3554 middlehalf
[1] = GEN_INT (l
[1]);
3555 latehalf
[1] = GEN_INT (l
[2]);
3559 /* No non-CONST_DOUBLE constant should ever appear
3561 gcc_assert (!CONSTANT_P (operands
[1]));
3566 middlehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3567 latehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3571 /* size is not 12: */
3573 if (optype0
== REGOP
)
3574 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
3575 else if (optype0
== OFFSOP
)
3576 latehalf
[0] = adjust_address (operands
[0], SImode
, size
- 4);
3578 latehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3580 if (optype1
== REGOP
)
3581 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
3582 else if (optype1
== OFFSOP
)
3583 latehalf
[1] = adjust_address (operands
[1], SImode
, size
- 4);
3584 else if (optype1
== CNSTOP
)
3585 split_double (operands
[1], &operands
[1], &latehalf
[1]);
3587 latehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3590 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3591 word first. We should use the adjusted operand 1 (which is N+4(REG))
3592 for the low word as well, to compensate for the first decrement of
3594 if (optype0
== PUSHOP
3595 && reg_overlap_mentioned_p (XEXP (XEXP (operands
[0], 0), 0), operands
[1]))
3596 operands
[1] = middlehalf
[1] = latehalf
[1];
3598 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3599 if the upper part of reg N does not appear in the MEM, arrange to
3600 emit the move late-half first. Otherwise, compute the MEM address
3601 into the upper part of N and use that as a pointer to the memory
3603 if (optype0
== REGOP
3604 && (optype1
== OFFSOP
|| optype1
== MEMOP
))
3606 rtx testlow
= gen_rtx_REG (SImode
, REGNO (operands
[0]));
3608 if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0))
3609 && reg_overlap_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
3611 /* If both halves of dest are used in the src memory address,
3612 compute the address into latehalf of dest.
3613 Note that this can't happen if the dest is two data regs. */
3615 xops
[0] = latehalf
[0];
3616 xops
[1] = XEXP (operands
[1], 0);
3618 handle_compadr (xops
);
3619 if (GET_MODE (operands
[1]) == XFmode
)
3621 operands
[1] = gen_rtx_MEM (XFmode
, latehalf
[0]);
3622 middlehalf
[1] = adjust_address (operands
[1], DImode
, size
- 8);
3623 latehalf
[1] = adjust_address (operands
[1], DImode
, size
- 4);
3627 operands
[1] = gen_rtx_MEM (DImode
, latehalf
[0]);
3628 latehalf
[1] = adjust_address (operands
[1], DImode
, size
- 4);
3632 && reg_overlap_mentioned_p (middlehalf
[0],
3633 XEXP (operands
[1], 0)))
3635 /* Check for two regs used by both source and dest.
3636 Note that this can't happen if the dest is all data regs.
3637 It can happen if the dest is d6, d7, a0.
3638 But in that case, latehalf is an addr reg, so
3639 the code at compadr does ok. */
3641 if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0))
3642 || reg_overlap_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
3645 /* JRV says this can't happen: */
3646 gcc_assert (!addreg0
&& !addreg1
);
3648 /* Only the middle reg conflicts; simply put it last. */
3649 handle_movsi (operands
);
3650 handle_movsi (latehalf
);
3651 handle_movsi (middlehalf
);
3655 else if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0)))
3656 /* If the low half of dest is mentioned in the source memory
3657 address, the arrange to emit the move late half first. */
3658 dest_overlapped_low
= 1;
3661 /* If one or both operands autodecrementing,
3662 do the two words, high-numbered first. */
3664 /* Likewise, the first move would clobber the source of the second one,
3665 do them in the other order. This happens only for registers;
3666 such overlap can't happen in memory unless the user explicitly
3667 sets it up, and that is an undefined circumstance. */
3669 if (optype0
== PUSHOP
|| optype1
== PUSHOP
3670 || (optype0
== REGOP
&& optype1
== REGOP
3671 && ((middlehalf
[1] && REGNO (operands
[0]) == REGNO (middlehalf
[1]))
3672 || REGNO (operands
[0]) == REGNO (latehalf
[1])))
3673 || dest_overlapped_low
)
3675 /* Make any unoffsettable addresses point at high-numbered word. */
3677 handle_reg_adjust (addreg0
, size
- 4);
3679 handle_reg_adjust (addreg1
, size
- 4);
3682 handle_movsi (latehalf
);
3684 /* Undo the adds we just did. */
3686 handle_reg_adjust (addreg0
, -4);
3688 handle_reg_adjust (addreg1
, -4);
3692 handle_movsi (middlehalf
);
3695 handle_reg_adjust (addreg0
, -4);
3697 handle_reg_adjust (addreg1
, -4);
3700 /* Do low-numbered word. */
3702 handle_movsi (operands
);
3706 /* Normal case: do the two words, low-numbered first. */
3708 m68k_final_prescan_insn (NULL
, operands
, 2);
3709 handle_movsi (operands
);
3711 /* Do the middle one of the three words for long double */
3715 handle_reg_adjust (addreg0
, 4);
3717 handle_reg_adjust (addreg1
, 4);
3719 m68k_final_prescan_insn (NULL
, middlehalf
, 2);
3720 handle_movsi (middlehalf
);
3723 /* Make any unoffsettable addresses point at high-numbered word. */
3725 handle_reg_adjust (addreg0
, 4);
3727 handle_reg_adjust (addreg1
, 4);
3730 m68k_final_prescan_insn (NULL
, latehalf
, 2);
3731 handle_movsi (latehalf
);
3733 /* Undo the adds we just did. */
3735 handle_reg_adjust (addreg0
, -(size
- 4));
3737 handle_reg_adjust (addreg1
, -(size
- 4));
3742 /* Output assembler code to adjust REG by N. */
3744 output_reg_adjust (rtx reg
, int n
)
3748 gcc_assert (GET_MODE (reg
) == SImode
&& n
>= -12 && n
!= 0 && n
<= 12);
3753 s
= "add%.l #12,%0";
3757 s
= "addq%.l #8,%0";
3761 s
= "addq%.l #4,%0";
3765 s
= "sub%.l #12,%0";
3769 s
= "subq%.l #8,%0";
3773 s
= "subq%.l #4,%0";
3781 output_asm_insn (s
, ®
);
3784 /* Emit rtl code to adjust REG by N. */
3786 emit_reg_adjust (rtx reg1
, int n
)
3790 gcc_assert (GET_MODE (reg1
) == SImode
&& n
>= -12 && n
!= 0 && n
<= 12);
3792 reg1
= copy_rtx (reg1
);
3793 reg2
= copy_rtx (reg1
);
3796 emit_insn (gen_subsi3 (reg1
, reg2
, GEN_INT (-n
)));
3798 emit_insn (gen_addsi3 (reg1
, reg2
, GEN_INT (n
)));
3803 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3805 output_compadr (rtx operands
[2])
3807 output_asm_insn ("lea %a1,%0", operands
);
3810 /* Output the best assembler insn for moving operands[1] into operands[0]
3813 output_movsi (rtx operands
[2])
3815 output_asm_insn (singlemove_string (operands
), operands
);
3818 /* Copy OP and change its mode to MODE. */
3820 copy_operand (rtx op
, machine_mode mode
)
3822 /* ??? This looks really ugly. There must be a better way
3823 to change a mode on the operand. */
3824 if (GET_MODE (op
) != VOIDmode
)
3827 op
= gen_rtx_REG (mode
, REGNO (op
));
3831 PUT_MODE (op
, mode
);
3838 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3840 emit_movsi (rtx operands
[2])
3842 operands
[0] = copy_operand (operands
[0], SImode
);
3843 operands
[1] = copy_operand (operands
[1], SImode
);
3845 emit_insn (gen_movsi (operands
[0], operands
[1]));
3848 /* Output assembler code to perform a doubleword move insn
3849 with operands OPERANDS. */
3851 output_move_double (rtx
*operands
)
3853 handle_move_double (operands
,
3854 output_reg_adjust
, output_compadr
, output_movsi
);
3859 /* Output rtl code to perform a doubleword move insn
3860 with operands OPERANDS. */
3862 m68k_emit_move_double (rtx operands
[2])
3864 handle_move_double (operands
, emit_reg_adjust
, emit_movsi
, emit_movsi
);
3867 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3868 new rtx with the correct mode. */
3871 force_mode (machine_mode mode
, rtx orig
)
3873 if (mode
== GET_MODE (orig
))
3876 if (REGNO (orig
) >= FIRST_PSEUDO_REGISTER
)
3879 return gen_rtx_REG (mode
, REGNO (orig
));
3883 fp_reg_operand (rtx op
, machine_mode mode ATTRIBUTE_UNUSED
)
3885 return reg_renumber
&& FP_REG_P (op
);
3888 /* Emit insns to move operands[1] into operands[0].
3890 Return 1 if we have written out everything that needs to be done to
3891 do the move. Otherwise, return 0 and the caller will emit the move
3894 Note SCRATCH_REG may not be in the proper mode depending on how it
3895 will be used. This routine is responsible for creating a new copy
3896 of SCRATCH_REG in the proper mode. */
3899 emit_move_sequence (rtx
*operands
, machine_mode mode
, rtx scratch_reg
)
3901 rtx operand0
= operands
[0];
3902 rtx operand1
= operands
[1];
3906 && (reload_in_progress
|| lra_in_progress
)
3907 && GET_CODE (operand0
) == REG
3908 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
3909 operand0
= reg_equiv_mem (REGNO (operand0
));
3910 else if (scratch_reg
3911 && (reload_in_progress
|| lra_in_progress
)
3912 && GET_CODE (operand0
) == SUBREG
3913 && GET_CODE (SUBREG_REG (operand0
)) == REG
3914 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
3916 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3917 the code which tracks sets/uses for delete_output_reload. */
3918 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
3919 reg_equiv_mem (REGNO (SUBREG_REG (operand0
))),
3920 SUBREG_BYTE (operand0
));
3921 operand0
= alter_subreg (&temp
, true);
3925 && (reload_in_progress
|| lra_in_progress
)
3926 && GET_CODE (operand1
) == REG
3927 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
3928 operand1
= reg_equiv_mem (REGNO (operand1
));
3929 else if (scratch_reg
3930 && (reload_in_progress
|| lra_in_progress
)
3931 && GET_CODE (operand1
) == SUBREG
3932 && GET_CODE (SUBREG_REG (operand1
)) == REG
3933 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
3935 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3936 the code which tracks sets/uses for delete_output_reload. */
3937 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
3938 reg_equiv_mem (REGNO (SUBREG_REG (operand1
))),
3939 SUBREG_BYTE (operand1
));
3940 operand1
= alter_subreg (&temp
, true);
3943 if (scratch_reg
&& (reload_in_progress
|| lra_in_progress
)
3944 && GET_CODE (operand0
) == MEM
3945 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
3946 != XEXP (operand0
, 0)))
3947 operand0
= gen_rtx_MEM (GET_MODE (operand0
), tem
);
3948 if (scratch_reg
&& (reload_in_progress
|| lra_in_progress
)
3949 && GET_CODE (operand1
) == MEM
3950 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
3951 != XEXP (operand1
, 0)))
3952 operand1
= gen_rtx_MEM (GET_MODE (operand1
), tem
);
3954 /* Handle secondary reloads for loads/stores of FP registers where
3955 the address is symbolic by using the scratch register */
3956 if (fp_reg_operand (operand0
, mode
)
3957 && ((GET_CODE (operand1
) == MEM
3958 && ! memory_address_p (DFmode
, XEXP (operand1
, 0)))
3959 || ((GET_CODE (operand1
) == SUBREG
3960 && GET_CODE (XEXP (operand1
, 0)) == MEM
3961 && !memory_address_p (DFmode
, XEXP (XEXP (operand1
, 0), 0)))))
3964 if (GET_CODE (operand1
) == SUBREG
)
3965 operand1
= XEXP (operand1
, 0);
3967 /* SCRATCH_REG will hold an address. We want
3968 it in SImode regardless of what mode it was originally given
3970 scratch_reg
= force_mode (SImode
, scratch_reg
);
3972 /* D might not fit in 14 bits either; for such cases load D into
3974 if (!memory_address_p (Pmode
, XEXP (operand1
, 0)))
3976 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
3977 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
3979 XEXP (XEXP (operand1
, 0), 0),
3983 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
3984 emit_insn (gen_rtx_SET (operand0
, gen_rtx_MEM (mode
, scratch_reg
)));
3987 else if (fp_reg_operand (operand1
, mode
)
3988 && ((GET_CODE (operand0
) == MEM
3989 && ! memory_address_p (DFmode
, XEXP (operand0
, 0)))
3990 || ((GET_CODE (operand0
) == SUBREG
)
3991 && GET_CODE (XEXP (operand0
, 0)) == MEM
3992 && !memory_address_p (DFmode
, XEXP (XEXP (operand0
, 0), 0))))
3995 if (GET_CODE (operand0
) == SUBREG
)
3996 operand0
= XEXP (operand0
, 0);
3998 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3999 it in SIMODE regardless of what mode it was originally given
4001 scratch_reg
= force_mode (SImode
, scratch_reg
);
4003 /* D might not fit in 14 bits either; for such cases load D into
4005 if (!memory_address_p (Pmode
, XEXP (operand0
, 0)))
4007 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
4008 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
4011 XEXP (XEXP (operand0
, 0),
4016 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
4017 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode
, scratch_reg
), operand1
));
4020 /* Handle secondary reloads for loads of FP registers from constant
4021 expressions by forcing the constant into memory.
4023 use scratch_reg to hold the address of the memory location.
4025 The proper fix is to change PREFERRED_RELOAD_CLASS to return
4026 NO_REGS when presented with a const_int and an register class
4027 containing only FP registers. Doing so unfortunately creates
4028 more problems than it solves. Fix this for 2.5. */
4029 else if (fp_reg_operand (operand0
, mode
)
4030 && CONSTANT_P (operand1
)
4035 /* SCRATCH_REG will hold an address and maybe the actual data. We want
4036 it in SIMODE regardless of what mode it was originally given
4038 scratch_reg
= force_mode (SImode
, scratch_reg
);
4040 /* Force the constant into memory and put the address of the
4041 memory location into scratch_reg. */
4042 xoperands
[0] = scratch_reg
;
4043 xoperands
[1] = XEXP (force_const_mem (mode
, operand1
), 0);
4044 emit_insn (gen_rtx_SET (scratch_reg
, xoperands
[1]));
4046 /* Now load the destination register. */
4047 emit_insn (gen_rtx_SET (operand0
, gen_rtx_MEM (mode
, scratch_reg
)));
4051 /* Now have insn-emit do whatever it normally does. */
4055 /* Split one or more DImode RTL references into pairs of SImode
4056 references. The RTL can be REG, offsettable MEM, integer constant, or
4057 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
4058 split and "num" is its length. lo_half and hi_half are output arrays
4059 that parallel "operands". */
4062 split_di (rtx operands
[], int num
, rtx lo_half
[], rtx hi_half
[])
4066 rtx op
= operands
[num
];
4068 /* simplify_subreg refuses to split volatile memory addresses,
4069 but we still have to handle it. */
4070 if (GET_CODE (op
) == MEM
)
4072 lo_half
[num
] = adjust_address (op
, SImode
, 4);
4073 hi_half
[num
] = adjust_address (op
, SImode
, 0);
4077 lo_half
[num
] = simplify_gen_subreg (SImode
, op
,
4078 GET_MODE (op
) == VOIDmode
4079 ? DImode
: GET_MODE (op
), 4);
4080 hi_half
[num
] = simplify_gen_subreg (SImode
, op
,
4081 GET_MODE (op
) == VOIDmode
4082 ? DImode
: GET_MODE (op
), 0);
4087 /* Split X into a base and a constant offset, storing them in *BASE
4088 and *OFFSET respectively. */
4091 m68k_split_offset (rtx x
, rtx
*base
, HOST_WIDE_INT
*offset
)
4094 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4096 *offset
+= INTVAL (XEXP (x
, 1));
4102 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
4103 instruction. STORE_P says whether the move is a load or store.
4105 If the instruction uses post-increment or pre-decrement addressing,
4106 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
4107 adjustment. This adjustment will be made by the first element of
4108 PARALLEL, with the loads or stores starting at element 1. If the
4109 instruction does not use post-increment or pre-decrement addressing,
4110 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
4111 start at element 0. */
4114 m68k_movem_pattern_p (rtx pattern
, rtx automod_base
,
4115 HOST_WIDE_INT automod_offset
, bool store_p
)
4117 rtx base
, mem_base
, set
, mem
, reg
, last_reg
;
4118 HOST_WIDE_INT offset
, mem_offset
;
4120 enum reg_class rclass
;
4122 len
= XVECLEN (pattern
, 0);
4123 first
= (automod_base
!= NULL
);
4127 /* Stores must be pre-decrement and loads must be post-increment. */
4128 if (store_p
!= (automod_offset
< 0))
4131 /* Work out the base and offset for lowest memory location. */
4132 base
= automod_base
;
4133 offset
= (automod_offset
< 0 ? automod_offset
: 0);
4137 /* Allow any valid base and offset in the first access. */
4144 for (i
= first
; i
< len
; i
++)
4146 /* We need a plain SET. */
4147 set
= XVECEXP (pattern
, 0, i
);
4148 if (GET_CODE (set
) != SET
)
4151 /* Check that we have a memory location... */
4152 mem
= XEXP (set
, !store_p
);
4153 if (!MEM_P (mem
) || !memory_operand (mem
, VOIDmode
))
4156 /* ...with the right address. */
4159 m68k_split_offset (XEXP (mem
, 0), &base
, &offset
);
4160 /* The ColdFire instruction only allows (An) and (d16,An) modes.
4161 There are no mode restrictions for 680x0 besides the
4162 automodification rules enforced above. */
4164 && !m68k_legitimate_base_reg_p (base
, reload_completed
))
4169 m68k_split_offset (XEXP (mem
, 0), &mem_base
, &mem_offset
);
4170 if (!rtx_equal_p (base
, mem_base
) || offset
!= mem_offset
)
4174 /* Check that we have a register of the required mode and class. */
4175 reg
= XEXP (set
, store_p
);
4177 || !HARD_REGISTER_P (reg
)
4178 || GET_MODE (reg
) != reg_raw_mode
[REGNO (reg
)])
4183 /* The register must belong to RCLASS and have a higher number
4184 than the register in the previous SET. */
4185 if (!TEST_HARD_REG_BIT (reg_class_contents
[rclass
], REGNO (reg
))
4186 || REGNO (last_reg
) >= REGNO (reg
))
4191 /* Work out which register class we need. */
4192 if (INT_REGNO_P (REGNO (reg
)))
4193 rclass
= GENERAL_REGS
;
4194 else if (FP_REGNO_P (REGNO (reg
)))
4201 offset
+= GET_MODE_SIZE (GET_MODE (reg
));
4204 /* If we have an automodification, check whether the final offset is OK. */
4205 if (automod_base
&& offset
!= (automod_offset
< 0 ? 0 : automod_offset
))
4208 /* Reject unprofitable cases. */
4209 if (len
< first
+ (rclass
== FP_REGS
? MIN_FMOVEM_REGS
: MIN_MOVEM_REGS
))
4215 /* Return the assembly code template for a movem or fmovem instruction
4216 whose pattern is given by PATTERN. Store the template's operands
4219 If the instruction uses post-increment or pre-decrement addressing,
4220 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
4221 is true if this is a store instruction. */
4224 m68k_output_movem (rtx
*operands
, rtx pattern
,
4225 HOST_WIDE_INT automod_offset
, bool store_p
)
4230 gcc_assert (GET_CODE (pattern
) == PARALLEL
);
4232 first
= (automod_offset
!= 0);
4233 for (i
= first
; i
< XVECLEN (pattern
, 0); i
++)
4235 /* When using movem with pre-decrement addressing, register X + D0_REG
4236 is controlled by bit 15 - X. For all other addressing modes,
4237 register X + D0_REG is controlled by bit X. Confusingly, the
4238 register mask for fmovem is in the opposite order to that for
4242 gcc_assert (MEM_P (XEXP (XVECEXP (pattern
, 0, i
), !store_p
)));
4243 gcc_assert (REG_P (XEXP (XVECEXP (pattern
, 0, i
), store_p
)));
4244 regno
= REGNO (XEXP (XVECEXP (pattern
, 0, i
), store_p
));
4245 if (automod_offset
< 0)
4247 if (FP_REGNO_P (regno
))
4248 mask
|= 1 << (regno
- FP0_REG
);
4250 mask
|= 1 << (15 - (regno
- D0_REG
));
4254 if (FP_REGNO_P (regno
))
4255 mask
|= 1 << (7 - (regno
- FP0_REG
));
4257 mask
|= 1 << (regno
- D0_REG
);
4262 if (automod_offset
== 0)
4263 operands
[0] = XEXP (XEXP (XVECEXP (pattern
, 0, first
), !store_p
), 0);
4264 else if (automod_offset
< 0)
4265 operands
[0] = gen_rtx_PRE_DEC (Pmode
, SET_DEST (XVECEXP (pattern
, 0, 0)));
4267 operands
[0] = gen_rtx_POST_INC (Pmode
, SET_DEST (XVECEXP (pattern
, 0, 0)));
4268 operands
[1] = GEN_INT (mask
);
4269 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern
, 0, first
), store_p
))))
4272 return "fmovem %1,%a0";
4274 return "fmovem %a0,%1";
4279 return "movem%.l %1,%a0";
4281 return "movem%.l %a0,%1";
4285 /* Return a REG that occurs in ADDR with coefficient 1.
4286 ADDR can be effectively incremented by incrementing REG. */
4289 find_addr_reg (rtx addr
)
4291 while (GET_CODE (addr
) == PLUS
)
4293 if (GET_CODE (XEXP (addr
, 0)) == REG
)
4294 addr
= XEXP (addr
, 0);
4295 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
4296 addr
= XEXP (addr
, 1);
4297 else if (CONSTANT_P (XEXP (addr
, 0)))
4298 addr
= XEXP (addr
, 1);
4299 else if (CONSTANT_P (XEXP (addr
, 1)))
4300 addr
= XEXP (addr
, 0);
4304 gcc_assert (GET_CODE (addr
) == REG
);
4308 /* Output assembler code to perform a 32-bit 3-operand add. */
4311 output_addsi3 (rtx
*operands
)
4313 if (! operands_match_p (operands
[0], operands
[1]))
4315 if (!ADDRESS_REG_P (operands
[1]))
4317 rtx tmp
= operands
[1];
4319 operands
[1] = operands
[2];
4323 /* These insns can result from reloads to access
4324 stack slots over 64k from the frame pointer. */
4325 if (GET_CODE (operands
[2]) == CONST_INT
4326 && (INTVAL (operands
[2]) < -32768 || INTVAL (operands
[2]) > 32767))
4327 return "move%.l %2,%0\n\tadd%.l %1,%0";
4328 if (GET_CODE (operands
[2]) == REG
)
4329 return MOTOROLA
? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4330 return MOTOROLA
? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4332 if (GET_CODE (operands
[2]) == CONST_INT
)
4334 if (INTVAL (operands
[2]) > 0
4335 && INTVAL (operands
[2]) <= 8)
4336 return "addq%.l %2,%0";
4337 if (INTVAL (operands
[2]) < 0
4338 && INTVAL (operands
[2]) >= -8)
4340 operands
[2] = GEN_INT (- INTVAL (operands
[2]));
4341 return "subq%.l %2,%0";
4343 /* On the CPU32 it is faster to use two addql instructions to
4344 add a small integer (8 < N <= 16) to a register.
4345 Likewise for subql. */
4346 if (TUNE_CPU32
&& REG_P (operands
[0]))
4348 if (INTVAL (operands
[2]) > 8
4349 && INTVAL (operands
[2]) <= 16)
4351 operands
[2] = GEN_INT (INTVAL (operands
[2]) - 8);
4352 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4354 if (INTVAL (operands
[2]) < -8
4355 && INTVAL (operands
[2]) >= -16)
4357 operands
[2] = GEN_INT (- INTVAL (operands
[2]) - 8);
4358 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4361 if (ADDRESS_REG_P (operands
[0])
4362 && INTVAL (operands
[2]) >= -0x8000
4363 && INTVAL (operands
[2]) < 0x8000)
4366 return "add%.w %2,%0";
4368 return MOTOROLA
? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4371 return "add%.l %2,%0";
4374 /* Emit a comparison between OP0 and OP1. Return true iff the comparison
4375 was reversed. SC1 is an SImode scratch reg, and SC2 a DImode scratch reg,
4376 as needed. CODE is the code of the comparison, we return it unchanged or
4377 swapped, as necessary. */
4379 m68k_output_compare_di (rtx op0
, rtx op1
, rtx sc1
, rtx sc2
, rtx_insn
*insn
,
4387 if (op1
== const0_rtx
)
4389 if (!REG_P (op0
) || ADDRESS_REG_P (op0
))
4395 output_move_double (xoperands
);
4396 output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", xoperands
);
4397 return swap_condition (code
);
4399 if (find_reg_note (insn
, REG_DEAD
, op0
))
4401 output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", ops
);
4402 return swap_condition (code
);
4406 /* 'sub' clears %1, and also clears the X cc bit.
4407 'tst' sets the Z cc bit according to the low part of the DImode
4409 'subx %1' (i.e. subx #0) acts as a (non-existent) tstx on the high
4411 output_asm_insn ("sub%.l %2,%2\n\ttst%.l %R0\n\tsubx%.l %2,%0", ops
);
4416 if (rtx_equal_p (sc2
, op0
))
4418 output_asm_insn ("sub%.l %R1,%R3\n\tsubx%.l %1,%3", ops
);
4423 output_asm_insn ("sub%.l %R0,%R3\n\tsubx%.l %0,%3", ops
);
4424 return swap_condition (code
);
4429 remember_compare_flags (rtx op0
, rtx op1
)
4431 if (side_effects_p (op0
) || side_effects_p (op1
))
4435 flags_compare_op0
= op0
;
4436 flags_compare_op1
= op1
;
4437 flags_operand1
= flags_operand2
= NULL_RTX
;
4438 flags_valid
= FLAGS_VALID_SET
;
4442 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4443 comparison. It is returned, potentially modified if necessary. */
4445 m68k_output_compare_si (rtx op0
, rtx op1
, rtx_code code
)
4447 rtx_code tmp
= m68k_find_flags_value (op0
, op1
, code
);
4451 remember_compare_flags (op0
, op1
);
4456 if (op1
== const0_rtx
&& (TARGET_68020
|| TARGET_COLDFIRE
|| !ADDRESS_REG_P (op0
)))
4457 output_asm_insn ("tst%.l %0", ops
);
4458 else if (GET_CODE (op0
) == MEM
&& GET_CODE (op1
) == MEM
)
4459 output_asm_insn ("cmpm%.l %1,%0", ops
);
4460 else if (REG_P (op1
)
4461 || (!REG_P (op0
) && GET_CODE (op0
) != MEM
))
4463 output_asm_insn ("cmp%.l %d0,%d1", ops
);
4464 std::swap (flags_compare_op0
, flags_compare_op1
);
4465 return swap_condition (code
);
4467 else if (!TARGET_COLDFIRE
4468 && ADDRESS_REG_P (op0
)
4469 && GET_CODE (op1
) == CONST_INT
4470 && INTVAL (op1
) < 0x8000
4471 && INTVAL (op1
) >= -0x8000)
4472 output_asm_insn ("cmp%.w %1,%0", ops
);
4474 output_asm_insn ("cmp%.l %d1,%d0", ops
);
4478 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4479 comparison. It is returned, potentially modified if necessary. */
4481 m68k_output_compare_hi (rtx op0
, rtx op1
, rtx_code code
)
4483 rtx_code tmp
= m68k_find_flags_value (op0
, op1
, code
);
4487 remember_compare_flags (op0
, op1
);
4492 if (op1
== const0_rtx
)
4493 output_asm_insn ("tst%.w %d0", ops
);
4494 else if (GET_CODE (op0
) == MEM
&& GET_CODE (op1
) == MEM
)
4495 output_asm_insn ("cmpm%.w %1,%0", ops
);
4496 else if ((REG_P (op1
) && !ADDRESS_REG_P (op1
))
4497 || (!REG_P (op0
) && GET_CODE (op0
) != MEM
))
4499 output_asm_insn ("cmp%.w %d0,%d1", ops
);
4500 std::swap (flags_compare_op0
, flags_compare_op1
);
4501 return swap_condition (code
);
4504 output_asm_insn ("cmp%.w %d1,%d0", ops
);
4508 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4509 comparison. It is returned, potentially modified if necessary. */
4511 m68k_output_compare_qi (rtx op0
, rtx op1
, rtx_code code
)
4513 rtx_code tmp
= m68k_find_flags_value (op0
, op1
, code
);
4517 remember_compare_flags (op0
, op1
);
4522 if (op1
== const0_rtx
)
4523 output_asm_insn ("tst%.b %d0", ops
);
4524 else if (GET_CODE (op0
) == MEM
&& GET_CODE (op1
) == MEM
)
4525 output_asm_insn ("cmpm%.b %1,%0", ops
);
4526 else if (REG_P (op1
) || (!REG_P (op0
) && GET_CODE (op0
) != MEM
))
4528 output_asm_insn ("cmp%.b %d0,%d1", ops
);
4529 std::swap (flags_compare_op0
, flags_compare_op1
);
4530 return swap_condition (code
);
4533 output_asm_insn ("cmp%.b %d1,%d0", ops
);
4537 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4538 comparison. It is returned, potentially modified if necessary. */
4540 m68k_output_compare_fp (rtx op0
, rtx op1
, rtx_code code
)
4542 rtx_code tmp
= m68k_find_flags_value (op0
, op1
, code
);
4550 remember_compare_flags (op0
, op1
);
4552 machine_mode mode
= GET_MODE (op0
);
4553 std::string prec
= mode
== SFmode
? "s" : mode
== DFmode
? "d" : "x";
4555 if (op1
== CONST0_RTX (GET_MODE (op0
)))
4559 if (TARGET_COLDFIRE_FPU
)
4560 output_asm_insn ("ftst%.d %0", ops
);
4562 output_asm_insn ("ftst%.x %0", ops
);
4565 output_asm_insn (("ftst%." + prec
+ " %0").c_str (), ops
);
4569 switch (which_alternative
)
4572 if (TARGET_COLDFIRE_FPU
)
4573 output_asm_insn ("fcmp%.d %1,%0", ops
);
4575 output_asm_insn ("fcmp%.x %1,%0", ops
);
4578 output_asm_insn (("fcmp%." + prec
+ " %f1,%0").c_str (), ops
);
4581 output_asm_insn (("fcmp%." + prec
+ " %0,%f1").c_str (), ops
);
4582 std::swap (flags_compare_op0
, flags_compare_op1
);
4583 return swap_condition (code
);
4585 /* This is the ftst case, handled earlier. */
4591 /* Return an output template for a branch with CODE. */
4593 m68k_output_branch_integer (rtx_code code
)
4626 /* Return an output template for a reversed branch with CODE. */
4628 m68k_output_branch_integer_rev (rtx_code code
)
4661 /* Return an output template for a scc instruction with CODE. */
4663 m68k_output_scc (rtx_code code
)
4696 /* Return an output template for a floating point branch
4697 instruction with CODE. */
4699 m68k_output_branch_float (rtx_code code
)
4736 /* Return an output template for a reversed floating point branch
4737 instruction with CODE. */
4739 m68k_output_branch_float_rev (rtx_code code
)
4776 /* Return an output template for a floating point scc
4777 instruction with CODE. */
4779 m68k_output_scc_float (rtx_code code
)
4819 output_move_const_double (rtx
*operands
)
4821 int code
= standard_68881_constant_p (operands
[1]);
4825 static char buf
[40];
4827 sprintf (buf
, "fmovecr #0x%x,%%0", code
& 0xff);
4830 return "fmove%.d %1,%0";
4834 output_move_const_single (rtx
*operands
)
4836 int code
= standard_68881_constant_p (operands
[1]);
4840 static char buf
[40];
4842 sprintf (buf
, "fmovecr #0x%x,%%0", code
& 0xff);
4845 return "fmove%.s %f1,%0";
4848 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4849 from the "fmovecr" instruction.
4850 The value, anded with 0xff, gives the code to use in fmovecr
4851 to get the desired constant. */
4853 /* This code has been fixed for cross-compilation. */
4855 static int inited_68881_table
= 0;
4857 static const char *const strings_68881
[7] = {
4867 static const int codes_68881
[7] = {
4877 REAL_VALUE_TYPE values_68881
[7];
4879 /* Set up values_68881 array by converting the decimal values
4880 strings_68881 to binary. */
4883 init_68881_table (void)
4890 for (i
= 0; i
< 7; i
++)
4894 r
= REAL_VALUE_ATOF (strings_68881
[i
], mode
);
4895 values_68881
[i
] = r
;
4897 inited_68881_table
= 1;
4901 standard_68881_constant_p (rtx x
)
4903 const REAL_VALUE_TYPE
*r
;
4906 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4907 used at all on those chips. */
4911 if (! inited_68881_table
)
4912 init_68881_table ();
4914 r
= CONST_DOUBLE_REAL_VALUE (x
);
4916 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4917 for (i
= 0; i
< 6; i
++)
4919 if (real_identical (r
, &values_68881
[i
]))
4920 return (codes_68881
[i
]);
4923 if (GET_MODE (x
) == SFmode
)
4926 if (real_equal (r
, &values_68881
[6]))
4927 return (codes_68881
[6]);
4929 /* larger powers of ten in the constants ram are not used
4930 because they are not equal to a `double' C constant. */
4934 /* If X is a floating-point constant, return the logarithm of X base 2,
4935 or 0 if X is not a power of 2. */
4938 floating_exact_log2 (rtx x
)
4940 const REAL_VALUE_TYPE
*r
;
4944 r
= CONST_DOUBLE_REAL_VALUE (x
);
4946 if (real_less (r
, &dconst1
))
4949 exp
= real_exponent (r
);
4950 real_2expN (&r1
, exp
, DFmode
);
4951 if (real_equal (&r1
, r
))
4957 /* A C compound statement to output to stdio stream STREAM the
4958 assembler syntax for an instruction operand X. X is an RTL
4961 CODE is a value that can be used to specify one of several ways
4962 of printing the operand. It is used when identical operands
4963 must be printed differently depending on the context. CODE
4964 comes from the `%' specification that was used to request
4965 printing of the operand. If the specification was just `%DIGIT'
4966 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4967 is the ASCII code for LTR.
4969 If X is a register, this macro should print the register's name.
4970 The names can be found in an array `reg_names' whose type is
4971 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4973 When the machine description has a specification `%PUNCT' (a `%'
4974 followed by a punctuation character), this macro is called with
4975 a null pointer for X and the punctuation character for CODE.
4977 The m68k specific codes are:
4979 '.' for dot needed in Motorola-style opcode names.
4980 '-' for an operand pushing on the stack:
4981 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4982 '+' for an operand pushing on the stack:
4983 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4984 '@' for a reference to the top word on the stack:
4985 sp@, (sp) or (%sp) depending on the style of syntax.
4986 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4987 but & in SGS syntax).
4988 '!' for the cc register (used in an `and to cc' insn).
4989 '$' for the letter `s' in an op code, but only on the 68040.
4990 '&' for the letter `d' in an op code, but only on the 68040.
4991 '/' for register prefix needed by longlong.h.
4992 '?' for m68k_library_id_string
4994 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4995 'd' to force memory addressing to be absolute, not relative.
4996 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4997 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4998 or print pair of registers as rx:ry.
4999 'p' print an address with @PLTPC attached, but only if the operand
5000 is not locally-bound. */
5003 print_operand (FILE *file
, rtx op
, int letter
)
5006 m68k_adjust_decorated_operand (op
);
5011 fprintf (file
, ".");
5013 else if (letter
== '#')
5014 asm_fprintf (file
, "%I");
5015 else if (letter
== '-')
5016 asm_fprintf (file
, MOTOROLA
? "-(%Rsp)" : "%Rsp@-");
5017 else if (letter
== '+')
5018 asm_fprintf (file
, MOTOROLA
? "(%Rsp)+" : "%Rsp@+");
5019 else if (letter
== '@')
5020 asm_fprintf (file
, MOTOROLA
? "(%Rsp)" : "%Rsp@");
5021 else if (letter
== '!')
5022 asm_fprintf (file
, "%Rfpcr");
5023 else if (letter
== '$')
5026 fprintf (file
, "s");
5028 else if (letter
== '&')
5031 fprintf (file
, "d");
5033 else if (letter
== '/')
5034 asm_fprintf (file
, "%R");
5035 else if (letter
== '?')
5036 asm_fprintf (file
, m68k_library_id_string
);
5037 else if (letter
== 'p')
5039 output_addr_const (file
, op
);
5040 if (!(GET_CODE (op
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (op
)))
5041 fprintf (file
, "@PLTPC");
5043 else if (GET_CODE (op
) == REG
)
5046 /* Print out the second register name of a register pair.
5047 I.e., R (6) => 7. */
5048 fputs (M68K_REGNAME(REGNO (op
) + 1), file
);
5050 fputs (M68K_REGNAME(REGNO (op
)), file
);
5052 else if (GET_CODE (op
) == MEM
)
5054 output_address (GET_MODE (op
), XEXP (op
, 0));
5055 if (letter
== 'd' && ! TARGET_68020
5056 && CONSTANT_ADDRESS_P (XEXP (op
, 0))
5057 && !(GET_CODE (XEXP (op
, 0)) == CONST_INT
5058 && INTVAL (XEXP (op
, 0)) < 0x8000
5059 && INTVAL (XEXP (op
, 0)) >= -0x8000))
5060 fprintf (file
, MOTOROLA
? ".l" : ":l");
5062 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == SFmode
)
5065 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5066 asm_fprintf (file
, "%I0x%lx", l
& 0xFFFFFFFF);
5068 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == XFmode
)
5071 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5072 asm_fprintf (file
, "%I0x%lx%08lx%08lx", l
[0] & 0xFFFFFFFF,
5073 l
[1] & 0xFFFFFFFF, l
[2] & 0xFFFFFFFF);
5075 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == DFmode
)
5078 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5079 asm_fprintf (file
, "%I0x%lx%08lx", l
[0] & 0xFFFFFFFF, l
[1] & 0xFFFFFFFF);
5083 /* Use `print_operand_address' instead of `output_addr_const'
5084 to ensure that we print relevant PIC stuff. */
5085 asm_fprintf (file
, "%I");
5087 && (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == CONST
))
5088 print_operand_address (file
, op
);
5090 output_addr_const (file
, op
);
5094 /* Return string for TLS relocation RELOC. */
5097 m68k_get_reloc_decoration (enum m68k_reloc reloc
)
5099 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
5100 gcc_assert (MOTOROLA
|| reloc
== RELOC_GOT
);
5107 if (flag_pic
== 1 && TARGET_68020
)
5149 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
5152 m68k_output_addr_const_extra (FILE *file
, rtx x
)
5154 if (GET_CODE (x
) == UNSPEC
)
5156 switch (XINT (x
, 1))
5158 case UNSPEC_RELOC16
:
5159 case UNSPEC_RELOC32
:
5160 output_addr_const (file
, XVECEXP (x
, 0, 0));
5161 fputs (m68k_get_reloc_decoration
5162 ((enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1))), file
);
5173 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
5176 m68k_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5178 gcc_assert (size
== 4);
5179 fputs ("\t.long\t", file
);
5180 output_addr_const (file
, x
);
5181 fputs ("@TLSLDO+0x8000", file
);
5184 /* In the name of slightly smaller debug output, and to cater to
5185 general assembler lossage, recognize various UNSPEC sequences
5186 and turn them back into a direct symbol reference. */
5189 m68k_delegitimize_address (rtx orig_x
)
5192 struct m68k_address addr
;
5195 orig_x
= delegitimize_mem_from_attrs (orig_x
);
5200 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
5203 if (!m68k_decompose_address (GET_MODE (x
), x
, false, &addr
)
5204 || addr
.offset
== NULL_RTX
5205 || GET_CODE (addr
.offset
) != CONST
)
5208 unspec
= XEXP (addr
.offset
, 0);
5209 if (GET_CODE (unspec
) == PLUS
&& CONST_INT_P (XEXP (unspec
, 1)))
5210 unspec
= XEXP (unspec
, 0);
5211 if (GET_CODE (unspec
) != UNSPEC
5212 || (XINT (unspec
, 1) != UNSPEC_RELOC16
5213 && XINT (unspec
, 1) != UNSPEC_RELOC32
))
5215 x
= XVECEXP (unspec
, 0, 0);
5216 gcc_assert (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
);
5217 if (unspec
!= XEXP (addr
.offset
, 0))
5218 x
= gen_rtx_PLUS (Pmode
, x
, XEXP (XEXP (addr
.offset
, 0), 1));
5221 rtx idx
= addr
.index
;
5222 if (addr
.scale
!= 1)
5223 idx
= gen_rtx_MULT (Pmode
, idx
, GEN_INT (addr
.scale
));
5224 x
= gen_rtx_PLUS (Pmode
, idx
, x
);
5227 x
= gen_rtx_PLUS (Pmode
, addr
.base
, x
);
5229 x
= replace_equiv_address_nv (orig_x
, x
);
5234 /* A C compound statement to output to stdio stream STREAM the
5235 assembler syntax for an instruction operand that is a memory
5236 reference whose address is ADDR. ADDR is an RTL expression.
5238 Note that this contains a kludge that knows that the only reason
5239 we have an address (plus (label_ref...) (reg...)) when not generating
5240 PIC code is in the insn before a tablejump, and we know that m68k.md
5241 generates a label LInnn: on such an insn.
5243 It is possible for PIC to generate a (plus (label_ref...) (reg...))
5244 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
5246 This routine is responsible for distinguishing between -fpic and -fPIC
5247 style relocations in an address. When generating -fpic code the
5248 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
5249 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
5252 print_operand_address (FILE *file
, rtx addr
)
5254 struct m68k_address address
;
5256 m68k_adjust_decorated_operand (addr
);
5258 if (!m68k_decompose_address (QImode
, addr
, true, &address
))
5261 if (address
.code
== PRE_DEC
)
5262 fprintf (file
, MOTOROLA
? "-(%s)" : "%s@-",
5263 M68K_REGNAME (REGNO (address
.base
)));
5264 else if (address
.code
== POST_INC
)
5265 fprintf (file
, MOTOROLA
? "(%s)+" : "%s@+",
5266 M68K_REGNAME (REGNO (address
.base
)));
5267 else if (!address
.base
&& !address
.index
)
5269 /* A constant address. */
5270 gcc_assert (address
.offset
== addr
);
5271 if (GET_CODE (addr
) == CONST_INT
)
5273 /* (xxx).w or (xxx).l. */
5274 if (IN_RANGE (INTVAL (addr
), -0x8000, 0x7fff))
5275 fprintf (file
, MOTOROLA
? "%d.w" : "%d:w", (int) INTVAL (addr
));
5277 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (addr
));
5279 else if (TARGET_PCREL
)
5281 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
5283 output_addr_const (file
, addr
);
5284 asm_fprintf (file
, flag_pic
== 1 ? ":w,%Rpc)" : ":l,%Rpc)");
5288 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
5289 name ends in `.<letter>', as the last 2 characters can be
5290 mistaken as a size suffix. Put the name in parentheses. */
5291 if (GET_CODE (addr
) == SYMBOL_REF
5292 && strlen (XSTR (addr
, 0)) > 2
5293 && XSTR (addr
, 0)[strlen (XSTR (addr
, 0)) - 2] == '.')
5296 output_addr_const (file
, addr
);
5300 output_addr_const (file
, addr
);
5307 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
5308 label being accessed, otherwise it is -1. */
5309 labelno
= (address
.offset
5311 && GET_CODE (address
.offset
) == LABEL_REF
5312 ? CODE_LABEL_NUMBER (XEXP (address
.offset
, 0))
5316 /* Print the "offset(base" component. */
5318 asm_fprintf (file
, "%LL%d(%Rpc,", labelno
);
5322 output_addr_const (file
, address
.offset
);
5326 fputs (M68K_REGNAME (REGNO (address
.base
)), file
);
5328 /* Print the ",index" component, if any. */
5333 fprintf (file
, "%s.%c",
5334 M68K_REGNAME (REGNO (address
.index
)),
5335 GET_MODE (address
.index
) == HImode
? 'w' : 'l');
5336 if (address
.scale
!= 1)
5337 fprintf (file
, "*%d", address
.scale
);
5341 else /* !MOTOROLA */
5343 if (!address
.offset
&& !address
.index
)
5344 fprintf (file
, "%s@", M68K_REGNAME (REGNO (address
.base
)));
5347 /* Print the "base@(offset" component. */
5349 asm_fprintf (file
, "%Rpc@(%LL%d", labelno
);
5353 fputs (M68K_REGNAME (REGNO (address
.base
)), file
);
5354 fprintf (file
, "@(");
5356 output_addr_const (file
, address
.offset
);
5358 /* Print the ",index" component, if any. */
5361 fprintf (file
, ",%s:%c",
5362 M68K_REGNAME (REGNO (address
.index
)),
5363 GET_MODE (address
.index
) == HImode
? 'w' : 'l');
5364 if (address
.scale
!= 1)
5365 fprintf (file
, ":%d", address
.scale
);
5373 /* Check for cases where a clr insns can be omitted from code using
5374 strict_low_part sets. For example, the second clrl here is not needed:
5375 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
5377 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
5378 insn we are checking for redundancy. TARGET is the register set by the
5382 strict_low_part_peephole_ok (machine_mode mode
, rtx_insn
*first_insn
,
5385 rtx_insn
*p
= first_insn
;
5387 while ((p
= PREV_INSN (p
)))
5389 if (NOTE_INSN_BASIC_BLOCK_P (p
))
5395 /* If it isn't an insn, then give up. */
5399 if (reg_set_p (target
, p
))
5401 rtx set
= single_set (p
);
5404 /* If it isn't an easy to recognize insn, then give up. */
5408 dest
= SET_DEST (set
);
5410 /* If this sets the entire target register to zero, then our
5411 first_insn is redundant. */
5412 if (rtx_equal_p (dest
, target
)
5413 && SET_SRC (set
) == const0_rtx
)
5415 else if (GET_CODE (dest
) == STRICT_LOW_PART
5416 && GET_CODE (XEXP (dest
, 0)) == REG
5417 && REGNO (XEXP (dest
, 0)) == REGNO (target
)
5418 && (GET_MODE_SIZE (GET_MODE (XEXP (dest
, 0)))
5419 <= GET_MODE_SIZE (mode
)))
5420 /* This is a strict low part set which modifies less than
5421 we are using, so it is safe. */
5431 /* Operand predicates for implementing asymmetric pc-relative addressing
5432 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
5433 when used as a source operand, but not as a destination operand.
5435 We model this by restricting the meaning of the basic predicates
5436 (general_operand, memory_operand, etc) to forbid the use of this
5437 addressing mode, and then define the following predicates that permit
5438 this addressing mode. These predicates can then be used for the
5439 source operands of the appropriate instructions.
5441 n.b. While it is theoretically possible to change all machine patterns
5442 to use this addressing more where permitted by the architecture,
5443 it has only been implemented for "common" cases: SImode, HImode, and
5444 QImode operands, and only for the principle operations that would
5445 require this addressing mode: data movement and simple integer operations.
5447 In parallel with these new predicates, two new constraint letters
5448 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
5449 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
5450 In the pcrel case 's' is only valid in combination with 'a' registers.
5451 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
5452 of how these constraints are used.
5454 The use of these predicates is strictly optional, though patterns that
5455 don't will cause an extra reload register to be allocated where one
5458 lea (abc:w,%pc),%a0 ; need to reload address
5459 moveq &1,%d1 ; since write to pc-relative space
5460 movel %d1,%a0@ ; is not allowed
5462 lea (abc:w,%pc),%a1 ; no need to reload address here
5463 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
5465 For more info, consult tiemann@cygnus.com.
5468 All of the ugliness with predicates and constraints is due to the
5469 simple fact that the m68k does not allow a pc-relative addressing
5470 mode as a destination. gcc does not distinguish between source and
5471 destination addresses. Hence, if we claim that pc-relative address
5472 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
5473 end up with invalid code. To get around this problem, we left
5474 pc-relative modes as invalid addresses, and then added special
5475 predicates and constraints to accept them.
5477 A cleaner way to handle this is to modify gcc to distinguish
5478 between source and destination addresses. We can then say that
5479 pc-relative is a valid source address but not a valid destination
5480 address, and hopefully avoid a lot of the predicate and constraint
5481 hackery. Unfortunately, this would be a pretty big change. It would
5482 be a useful change for a number of ports, but there aren't any current
5483 plans to undertake this.
5485 ***************************************************************************/
5489 output_andsi3 (rtx
*operands
)
5493 if (GET_CODE (operands
[2]) == CONST_INT
5494 && (INTVAL (operands
[2]) | 0xffff) == -1
5495 && (DATA_REG_P (operands
[0])
5496 || offsettable_memref_p (operands
[0]))
5497 && !TARGET_COLDFIRE
)
5499 if (GET_CODE (operands
[0]) != REG
)
5500 operands
[0] = adjust_address (operands
[0], HImode
, 2);
5501 operands
[2] = GEN_INT (INTVAL (operands
[2]) & 0xffff);
5502 if (operands
[2] == const0_rtx
)
5504 return "and%.w %2,%0";
5506 if (GET_CODE (operands
[2]) == CONST_INT
5507 && (logval
= exact_log2 (~ INTVAL (operands
[2]) & 0xffffffff)) >= 0
5508 && (DATA_REG_P (operands
[0])
5509 || offsettable_memref_p (operands
[0])))
5511 if (DATA_REG_P (operands
[0]))
5512 operands
[1] = GEN_INT (logval
);
5515 operands
[0] = adjust_address (operands
[0], QImode
, 3 - (logval
/ 8));
5516 operands
[1] = GEN_INT (logval
% 8);
5518 return "bclr %1,%0";
5520 /* Only a standard logical operation on the whole word sets the
5521 condition codes in a way we can use. */
5522 if (!side_effects_p (operands
[0]))
5523 flags_operand1
= operands
[0];
5524 flags_valid
= FLAGS_VALID_YES
;
5525 return "and%.l %2,%0";
5529 output_iorsi3 (rtx
*operands
)
5533 if (GET_CODE (operands
[2]) == CONST_INT
5534 && INTVAL (operands
[2]) >> 16 == 0
5535 && (DATA_REG_P (operands
[0])
5536 || offsettable_memref_p (operands
[0]))
5537 && !TARGET_COLDFIRE
)
5539 if (GET_CODE (operands
[0]) != REG
)
5540 operands
[0] = adjust_address (operands
[0], HImode
, 2);
5541 if (INTVAL (operands
[2]) == 0xffff)
5542 return "mov%.w %2,%0";
5543 return "or%.w %2,%0";
5545 if (GET_CODE (operands
[2]) == CONST_INT
5546 && (logval
= exact_log2 (INTVAL (operands
[2]) & 0xffffffff)) >= 0
5547 && (DATA_REG_P (operands
[0])
5548 || offsettable_memref_p (operands
[0])))
5550 if (DATA_REG_P (operands
[0]))
5551 operands
[1] = GEN_INT (logval
);
5554 operands
[0] = adjust_address (operands
[0], QImode
, 3 - (logval
/ 8));
5555 operands
[1] = GEN_INT (logval
% 8);
5557 return "bset %1,%0";
5559 /* Only a standard logical operation on the whole word sets the
5560 condition codes in a way we can use. */
5561 if (!side_effects_p (operands
[0]))
5562 flags_operand1
= operands
[0];
5563 flags_valid
= FLAGS_VALID_YES
;
5564 return "or%.l %2,%0";
5568 output_xorsi3 (rtx
*operands
)
5572 if (GET_CODE (operands
[2]) == CONST_INT
5573 && INTVAL (operands
[2]) >> 16 == 0
5574 && (offsettable_memref_p (operands
[0]) || DATA_REG_P (operands
[0]))
5575 && !TARGET_COLDFIRE
)
5577 if (! DATA_REG_P (operands
[0]))
5578 operands
[0] = adjust_address (operands
[0], HImode
, 2);
5579 if (INTVAL (operands
[2]) == 0xffff)
5581 return "eor%.w %2,%0";
5583 if (GET_CODE (operands
[2]) == CONST_INT
5584 && (logval
= exact_log2 (INTVAL (operands
[2]) & 0xffffffff)) >= 0
5585 && (DATA_REG_P (operands
[0])
5586 || offsettable_memref_p (operands
[0])))
5588 if (DATA_REG_P (operands
[0]))
5589 operands
[1] = GEN_INT (logval
);
5592 operands
[0] = adjust_address (operands
[0], QImode
, 3 - (logval
/ 8));
5593 operands
[1] = GEN_INT (logval
% 8);
5595 return "bchg %1,%0";
5597 /* Only a standard logical operation on the whole word sets the
5598 condition codes in a way we can use. */
5599 if (!side_effects_p (operands
[0]))
5600 flags_operand1
= operands
[0];
5601 flags_valid
= FLAGS_VALID_YES
;
5602 return "eor%.l %2,%0";
5605 /* Return the instruction that should be used for a call to address X,
5606 which is known to be in operand 0. */
5611 if (symbolic_operand (x
, VOIDmode
))
5612 return m68k_symbolic_call
;
5617 /* Likewise sibling calls. */
5620 output_sibcall (rtx x
)
5622 if (symbolic_operand (x
, VOIDmode
))
5623 return m68k_symbolic_jump
;
5629 m68k_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
5630 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
5633 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk
));
5634 rtx this_slot
, offset
, addr
, mem
, tmp
;
5637 /* Avoid clobbering the struct value reg by using the
5638 static chain reg as a temporary. */
5639 tmp
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
5641 /* Pretend to be a post-reload pass while generating rtl. */
5642 reload_completed
= 1;
5644 /* The "this" pointer is stored at 4(%sp). */
5645 this_slot
= gen_rtx_MEM (Pmode
, plus_constant (Pmode
,
5646 stack_pointer_rtx
, 4));
5648 /* Add DELTA to THIS. */
5651 /* Make the offset a legitimate operand for memory addition. */
5652 offset
= GEN_INT (delta
);
5653 if ((delta
< -8 || delta
> 8)
5654 && (TARGET_COLDFIRE
|| USE_MOVQ (delta
)))
5656 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
), offset
);
5657 offset
= gen_rtx_REG (Pmode
, D0_REG
);
5659 emit_insn (gen_add3_insn (copy_rtx (this_slot
),
5660 copy_rtx (this_slot
), offset
));
5663 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5664 if (vcall_offset
!= 0)
5666 /* Set the static chain register to *THIS. */
5667 emit_move_insn (tmp
, this_slot
);
5668 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
5670 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5671 addr
= plus_constant (Pmode
, tmp
, vcall_offset
);
5672 if (!m68k_legitimate_address_p (Pmode
, addr
, true))
5674 emit_insn (gen_rtx_SET (tmp
, addr
));
5678 /* Load the offset into %d0 and add it to THIS. */
5679 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
),
5680 gen_rtx_MEM (Pmode
, addr
));
5681 emit_insn (gen_add3_insn (copy_rtx (this_slot
),
5682 copy_rtx (this_slot
),
5683 gen_rtx_REG (Pmode
, D0_REG
)));
5686 /* Jump to the target function. Use a sibcall if direct jumps are
5687 allowed, otherwise load the address into a register first. */
5688 mem
= DECL_RTL (function
);
5689 if (!sibcall_operand (XEXP (mem
, 0), VOIDmode
))
5691 gcc_assert (flag_pic
);
5693 if (!TARGET_SEP_DATA
)
5695 /* Use the static chain register as a temporary (call-clobbered)
5696 GOT pointer for this function. We can use the static chain
5697 register because it isn't live on entry to the thunk. */
5698 SET_REGNO (pic_offset_table_rtx
, STATIC_CHAIN_REGNUM
);
5699 emit_insn (gen_load_got (pic_offset_table_rtx
));
5701 legitimize_pic_address (XEXP (mem
, 0), Pmode
, tmp
);
5702 mem
= replace_equiv_address (mem
, tmp
);
5704 insn
= emit_call_insn (gen_sibcall (mem
, const0_rtx
));
5705 SIBLING_CALL_P (insn
) = 1;
5707 /* Run just enough of rest_of_compilation. */
5708 insn
= get_insns ();
5709 split_all_insns_noflow ();
5710 assemble_start_function (thunk
, fnname
);
5711 final_start_function (insn
, file
, 1);
5712 final (insn
, file
, 1);
5713 final_end_function ();
5714 assemble_end_function (thunk
, fnname
);
5716 /* Clean up the vars set above. */
5717 reload_completed
= 0;
5719 /* Restore the original PIC register. */
5721 SET_REGNO (pic_offset_table_rtx
, PIC_REG
);
5724 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5727 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
5728 int incoming ATTRIBUTE_UNUSED
)
5730 return gen_rtx_REG (Pmode
, M68K_STRUCT_VALUE_REGNUM
);
5733 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5735 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
5736 unsigned int new_reg
)
5739 /* Interrupt functions can only use registers that have already been
5740 saved by the prologue, even if they would normally be
5743 if ((m68k_get_function_kind (current_function_decl
)
5744 == m68k_fk_interrupt_handler
)
5745 && !df_regs_ever_live_p (new_reg
))
5751 /* Implement TARGET_HARD_REGNO_NREGS.
5753 On the m68k, ordinary registers hold 32 bits worth;
5754 for the 68881 registers, a single register is always enough for
5755 anything that can be stored in them at all. */
5758 m68k_hard_regno_nregs (unsigned int regno
, machine_mode mode
)
5761 return GET_MODE_NUNITS (mode
);
5762 return CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
);
5765 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5766 registers can hold any mode, but restrict the 68881 registers to
5767 floating-point modes. */
5770 m68k_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
5772 if (DATA_REGNO_P (regno
))
5774 /* Data Registers, can hold aggregate if fits in. */
5775 if (regno
+ GET_MODE_SIZE (mode
) / 4 <= 8)
5778 else if (ADDRESS_REGNO_P (regno
))
5780 if (regno
+ GET_MODE_SIZE (mode
) / 4 <= 16)
5783 else if (FP_REGNO_P (regno
))
5785 /* FPU registers, hold float or complex float of long double or
5787 if ((GET_MODE_CLASS (mode
) == MODE_FLOAT
5788 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
5789 && GET_MODE_UNIT_SIZE (mode
) <= TARGET_FP_REG_SIZE
)
5795 /* Implement TARGET_MODES_TIEABLE_P. */
5798 m68k_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
5800 return (!TARGET_HARD_FLOAT
5801 || ((GET_MODE_CLASS (mode1
) == MODE_FLOAT
5802 || GET_MODE_CLASS (mode1
) == MODE_COMPLEX_FLOAT
)
5803 == (GET_MODE_CLASS (mode2
) == MODE_FLOAT
5804 || GET_MODE_CLASS (mode2
) == MODE_COMPLEX_FLOAT
)));
5807 /* Implement SECONDARY_RELOAD_CLASS. */
5810 m68k_secondary_reload_class (enum reg_class rclass
,
5811 machine_mode mode
, rtx x
)
5815 regno
= true_regnum (x
);
5817 /* If one operand of a movqi is an address register, the other
5818 operand must be a general register or constant. Other types
5819 of operand must be reloaded through a data register. */
5820 if (GET_MODE_SIZE (mode
) == 1
5821 && reg_classes_intersect_p (rclass
, ADDR_REGS
)
5822 && !(INT_REGNO_P (regno
) || CONSTANT_P (x
)))
5825 /* PC-relative addresses must be loaded into an address register first. */
5827 && !reg_class_subset_p (rclass
, ADDR_REGS
)
5828 && symbolic_operand (x
, VOIDmode
))
5834 /* Implement PREFERRED_RELOAD_CLASS. */
5837 m68k_preferred_reload_class (rtx x
, enum reg_class rclass
)
5839 enum reg_class secondary_class
;
5841 /* If RCLASS might need a secondary reload, try restricting it to
5842 a class that doesn't. */
5843 secondary_class
= m68k_secondary_reload_class (rclass
, GET_MODE (x
), x
);
5844 if (secondary_class
!= NO_REGS
5845 && reg_class_subset_p (secondary_class
, rclass
))
5846 return secondary_class
;
5848 /* Prefer to use moveq for in-range constants. */
5849 if (GET_CODE (x
) == CONST_INT
5850 && reg_class_subset_p (DATA_REGS
, rclass
)
5851 && IN_RANGE (INTVAL (x
), -0x80, 0x7f))
5854 /* ??? Do we really need this now? */
5855 if (GET_CODE (x
) == CONST_DOUBLE
5856 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
5858 if (TARGET_HARD_FLOAT
&& reg_class_subset_p (FP_REGS
, rclass
))
5867 /* Return floating point values in a 68881 register. This makes 68881 code
5868 a little bit faster. It also makes -msoft-float code incompatible with
5869 hard-float code, so people have to be careful not to mix the two.
5870 For ColdFire it was decided the ABI incompatibility is undesirable.
5871 If there is need for a hard-float ABI it is probably worth doing it
5872 properly and also passing function arguments in FP registers. */
5874 m68k_libcall_value (machine_mode mode
)
5881 return gen_rtx_REG (mode
, FP0_REG
);
5887 return gen_rtx_REG (mode
, m68k_libcall_value_in_a0_p
? A0_REG
: D0_REG
);
5890 /* Location in which function value is returned.
5891 NOTE: Due to differences in ABIs, don't call this function directly,
5892 use FUNCTION_VALUE instead. */
5894 m68k_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
)
5898 mode
= TYPE_MODE (valtype
);
5904 return gen_rtx_REG (mode
, FP0_REG
);
5910 /* If the function returns a pointer, push that into %a0. */
5911 if (func
&& POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func
))))
5912 /* For compatibility with the large body of existing code which
5913 does not always properly declare external functions returning
5914 pointer types, the m68k/SVR4 convention is to copy the value
5915 returned for pointer functions from a0 to d0 in the function
5916 epilogue, so that callers that have neglected to properly
5917 declare the callee can still find the correct return value in
5919 return gen_rtx_PARALLEL
5922 gen_rtx_EXPR_LIST (VOIDmode
,
5923 gen_rtx_REG (mode
, A0_REG
),
5925 gen_rtx_EXPR_LIST (VOIDmode
,
5926 gen_rtx_REG (mode
, D0_REG
),
5928 else if (POINTER_TYPE_P (valtype
))
5929 return gen_rtx_REG (mode
, A0_REG
);
5931 return gen_rtx_REG (mode
, D0_REG
);
5934 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5935 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5937 m68k_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
5939 machine_mode mode
= TYPE_MODE (type
);
5941 if (mode
== BLKmode
)
5944 /* If TYPE's known alignment is less than the alignment of MODE that
5945 would contain the structure, then return in memory. We need to
5946 do so to maintain the compatibility between code compiled with
5947 -mstrict-align and that compiled with -mno-strict-align. */
5948 if (AGGREGATE_TYPE_P (type
)
5949 && TYPE_ALIGN (type
) < GET_MODE_ALIGNMENT (mode
))
5956 /* CPU to schedule the program for. */
5957 enum attr_cpu m68k_sched_cpu
;
5959 /* MAC to schedule the program for. */
5960 enum attr_mac m68k_sched_mac
;
5968 /* Integer register. */
5974 /* Implicit mem reference (e.g. stack). */
5977 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5980 /* Memory with offset but without indexing. EA mode 5. */
5983 /* Memory with indexing. EA mode 6. */
5986 /* Memory referenced by absolute address. EA mode 7. */
5989 /* Immediate operand that doesn't require extension word. */
5992 /* Immediate 16 bit operand. */
5995 /* Immediate 32 bit operand. */
5999 /* Return type of memory ADDR_RTX refers to. */
6000 static enum attr_op_type
6001 sched_address_type (machine_mode mode
, rtx addr_rtx
)
6003 struct m68k_address address
;
6005 if (symbolic_operand (addr_rtx
, VOIDmode
))
6006 return OP_TYPE_MEM7
;
6008 if (!m68k_decompose_address (mode
, addr_rtx
,
6009 reload_completed
, &address
))
6011 gcc_assert (!reload_completed
);
6012 /* Reload will likely fix the address to be in the register. */
6013 return OP_TYPE_MEM234
;
6016 if (address
.scale
!= 0)
6017 return OP_TYPE_MEM6
;
6019 if (address
.base
!= NULL_RTX
)
6021 if (address
.offset
== NULL_RTX
)
6022 return OP_TYPE_MEM234
;
6024 return OP_TYPE_MEM5
;
6027 gcc_assert (address
.offset
!= NULL_RTX
);
6029 return OP_TYPE_MEM7
;
6032 /* Return X or Y (depending on OPX_P) operand of INSN. */
6034 sched_get_operand (rtx_insn
*insn
, bool opx_p
)
6038 if (recog_memoized (insn
) < 0)
6041 extract_constrain_insn_cached (insn
);
6044 i
= get_attr_opx (insn
);
6046 i
= get_attr_opy (insn
);
6048 if (i
>= recog_data
.n_operands
)
6051 return recog_data
.operand
[i
];
6054 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
6055 If ADDRESS_P is true, return type of memory location operand refers to. */
6056 static enum attr_op_type
6057 sched_attr_op_type (rtx_insn
*insn
, bool opx_p
, bool address_p
)
6061 op
= sched_get_operand (insn
, opx_p
);
6065 gcc_assert (!reload_completed
);
6070 return sched_address_type (QImode
, op
);
6072 if (memory_operand (op
, VOIDmode
))
6073 return sched_address_type (GET_MODE (op
), XEXP (op
, 0));
6075 if (register_operand (op
, VOIDmode
))
6077 if ((!reload_completed
&& FLOAT_MODE_P (GET_MODE (op
)))
6078 || (reload_completed
&& FP_REG_P (op
)))
6084 if (GET_CODE (op
) == CONST_INT
)
6090 /* Check for quick constants. */
6091 switch (get_attr_type (insn
))
6094 if (IN_RANGE (ival
, 1, 8) || IN_RANGE (ival
, -8, -1))
6095 return OP_TYPE_IMM_Q
;
6097 gcc_assert (!reload_completed
);
6101 if (USE_MOVQ (ival
))
6102 return OP_TYPE_IMM_Q
;
6104 gcc_assert (!reload_completed
);
6108 if (valid_mov3q_const (ival
))
6109 return OP_TYPE_IMM_Q
;
6111 gcc_assert (!reload_completed
);
6118 if (IN_RANGE (ival
, -0x8000, 0x7fff))
6119 return OP_TYPE_IMM_W
;
6121 return OP_TYPE_IMM_L
;
6124 if (GET_CODE (op
) == CONST_DOUBLE
)
6126 switch (GET_MODE (op
))
6129 return OP_TYPE_IMM_W
;
6133 return OP_TYPE_IMM_L
;
6140 if (GET_CODE (op
) == CONST
6141 || symbolic_operand (op
, VOIDmode
)
6144 switch (GET_MODE (op
))
6147 return OP_TYPE_IMM_Q
;
6150 return OP_TYPE_IMM_W
;
6153 return OP_TYPE_IMM_L
;
6156 if (symbolic_operand (m68k_unwrap_symbol (op
, false), VOIDmode
))
6158 return OP_TYPE_IMM_W
;
6160 return OP_TYPE_IMM_L
;
6164 gcc_assert (!reload_completed
);
6166 if (FLOAT_MODE_P (GET_MODE (op
)))
6172 /* Implement opx_type attribute.
6173 Return type of INSN's operand X.
6174 If ADDRESS_P is true, return type of memory location operand refers to. */
6176 m68k_sched_attr_opx_type (rtx_insn
*insn
, int address_p
)
6178 switch (sched_attr_op_type (insn
, true, address_p
!= 0))
6184 return OPX_TYPE_FPN
;
6187 return OPX_TYPE_MEM1
;
6189 case OP_TYPE_MEM234
:
6190 return OPX_TYPE_MEM234
;
6193 return OPX_TYPE_MEM5
;
6196 return OPX_TYPE_MEM6
;
6199 return OPX_TYPE_MEM7
;
6202 return OPX_TYPE_IMM_Q
;
6205 return OPX_TYPE_IMM_W
;
6208 return OPX_TYPE_IMM_L
;
6215 /* Implement opy_type attribute.
6216 Return type of INSN's operand Y.
6217 If ADDRESS_P is true, return type of memory location operand refers to. */
6219 m68k_sched_attr_opy_type (rtx_insn
*insn
, int address_p
)
6221 switch (sched_attr_op_type (insn
, false, address_p
!= 0))
6227 return OPY_TYPE_FPN
;
6230 return OPY_TYPE_MEM1
;
6232 case OP_TYPE_MEM234
:
6233 return OPY_TYPE_MEM234
;
6236 return OPY_TYPE_MEM5
;
6239 return OPY_TYPE_MEM6
;
6242 return OPY_TYPE_MEM7
;
6245 return OPY_TYPE_IMM_Q
;
6248 return OPY_TYPE_IMM_W
;
6251 return OPY_TYPE_IMM_L
;
6258 /* Return size of INSN as int. */
6260 sched_get_attr_size_int (rtx_insn
*insn
)
6264 switch (get_attr_type (insn
))
6267 /* There should be no references to m68k_sched_attr_size for 'ignore'
6281 switch (get_attr_opx_type (insn
))
6287 case OPX_TYPE_MEM234
:
6288 case OPY_TYPE_IMM_Q
:
6293 /* Here we assume that most absolute references are short. */
6295 case OPY_TYPE_IMM_W
:
6299 case OPY_TYPE_IMM_L
:
6307 switch (get_attr_opy_type (insn
))
6313 case OPY_TYPE_MEM234
:
6314 case OPY_TYPE_IMM_Q
:
6319 /* Here we assume that most absolute references are short. */
6321 case OPY_TYPE_IMM_W
:
6325 case OPY_TYPE_IMM_L
:
6335 gcc_assert (!reload_completed
);
6343 /* Return size of INSN as attribute enum value. */
6345 m68k_sched_attr_size (rtx_insn
*insn
)
6347 switch (sched_get_attr_size_int (insn
))
6363 /* Return operand X or Y (depending on OPX_P) of INSN,
6364 if it is a MEM, or NULL overwise. */
6365 static enum attr_op_type
6366 sched_get_opxy_mem_type (rtx_insn
*insn
, bool opx_p
)
6370 switch (get_attr_opx_type (insn
))
6375 case OPX_TYPE_IMM_Q
:
6376 case OPX_TYPE_IMM_W
:
6377 case OPX_TYPE_IMM_L
:
6381 case OPX_TYPE_MEM234
:
6384 return OP_TYPE_MEM1
;
6387 return OP_TYPE_MEM6
;
6395 switch (get_attr_opy_type (insn
))
6400 case OPY_TYPE_IMM_Q
:
6401 case OPY_TYPE_IMM_W
:
6402 case OPY_TYPE_IMM_L
:
6406 case OPY_TYPE_MEM234
:
6409 return OP_TYPE_MEM1
;
6412 return OP_TYPE_MEM6
;
6420 /* Implement op_mem attribute. */
6422 m68k_sched_attr_op_mem (rtx_insn
*insn
)
6424 enum attr_op_type opx
;
6425 enum attr_op_type opy
;
6427 opx
= sched_get_opxy_mem_type (insn
, true);
6428 opy
= sched_get_opxy_mem_type (insn
, false);
6430 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_RN
)
6433 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_MEM1
)
6435 switch (get_attr_opx_access (insn
))
6451 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_MEM6
)
6453 switch (get_attr_opx_access (insn
))
6469 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_RN
)
6472 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_MEM1
)
6474 switch (get_attr_opx_access (insn
))
6480 gcc_assert (!reload_completed
);
6485 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_MEM6
)
6487 switch (get_attr_opx_access (insn
))
6493 gcc_assert (!reload_completed
);
6498 if (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_RN
)
6501 if (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_MEM1
)
6503 switch (get_attr_opx_access (insn
))
6509 gcc_assert (!reload_completed
);
6514 gcc_assert (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_MEM6
);
6515 gcc_assert (!reload_completed
);
6519 /* Data for ColdFire V4 index bypass.
6520 Producer modifies register that is used as index in consumer with
6524 /* Producer instruction. */
6527 /* Consumer instruction. */
6530 /* Scale of indexed memory access within consumer.
6531 Or zero if bypass should not be effective at the moment. */
6533 } sched_cfv4_bypass_data
;
6535 /* An empty state that is used in m68k_sched_adjust_cost. */
6536 static state_t sched_adjust_cost_state
;
6538 /* Implement adjust_cost scheduler hook.
6539 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
6541 m68k_sched_adjust_cost (rtx_insn
*insn
, int, rtx_insn
*def_insn
, int cost
,
6546 if (recog_memoized (def_insn
) < 0
6547 || recog_memoized (insn
) < 0)
6550 if (sched_cfv4_bypass_data
.scale
== 1)
6551 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
6553 /* haifa-sched.cc: insn_cost () calls bypass_p () just before
6554 targetm.sched.adjust_cost (). Hence, we can be relatively sure
6555 that the data in sched_cfv4_bypass_data is up to date. */
6556 gcc_assert (sched_cfv4_bypass_data
.pro
== def_insn
6557 && sched_cfv4_bypass_data
.con
== insn
);
6562 sched_cfv4_bypass_data
.pro
= NULL
;
6563 sched_cfv4_bypass_data
.con
= NULL
;
6564 sched_cfv4_bypass_data
.scale
= 0;
6567 gcc_assert (sched_cfv4_bypass_data
.pro
== NULL
6568 && sched_cfv4_bypass_data
.con
== NULL
6569 && sched_cfv4_bypass_data
.scale
== 0);
6571 /* Don't try to issue INSN earlier than DFA permits.
6572 This is especially useful for instructions that write to memory,
6573 as their true dependence (default) latency is better to be set to 0
6574 to workaround alias analysis limitations.
6575 This is, in fact, a machine independent tweak, so, probably,
6576 it should be moved to haifa-sched.cc: insn_cost (). */
6577 delay
= min_insn_conflict_delay (sched_adjust_cost_state
, def_insn
, insn
);
6584 /* Return maximal number of insns that can be scheduled on a single cycle. */
6586 m68k_sched_issue_rate (void)
6588 switch (m68k_sched_cpu
)
6604 /* Maximal length of instruction for current CPU.
6605 E.g. it is 3 for any ColdFire core. */
6606 static int max_insn_size
;
6608 /* Data to model instruction buffer of CPU. */
6611 /* True if instruction buffer model is modeled for current CPU. */
6614 /* Size of the instruction buffer in words. */
6617 /* Number of filled words in the instruction buffer. */
6620 /* Additional information about instruction buffer for CPUs that have
6621 a buffer of instruction records, rather then a plain buffer
6622 of instruction words. */
6623 struct _sched_ib_records
6625 /* Size of buffer in records. */
6628 /* Array to hold data on adjustments made to the size of the buffer. */
6631 /* Index of the above array. */
6635 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6639 static struct _sched_ib sched_ib
;
6641 /* ID of memory unit. */
6642 static int sched_mem_unit_code
;
6644 /* Implementation of the targetm.sched.variable_issue () hook.
6645 It is called after INSN was issued. It returns the number of insns
6646 that can possibly get scheduled on the current cycle.
6647 It is used here to determine the effect of INSN on the instruction
6650 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED
,
6651 int sched_verbose ATTRIBUTE_UNUSED
,
6652 rtx_insn
*insn
, int can_issue_more
)
6656 if (recog_memoized (insn
) >= 0 && get_attr_type (insn
) != TYPE_IGNORE
)
6658 switch (m68k_sched_cpu
)
6662 insn_size
= sched_get_attr_size_int (insn
);
6666 insn_size
= sched_get_attr_size_int (insn
);
6668 /* ColdFire V3 and V4 cores have instruction buffers that can
6669 accumulate up to 8 instructions regardless of instructions'
6670 sizes. So we should take care not to "prefetch" 24 one-word
6671 or 12 two-words instructions.
6672 To model this behavior we temporarily decrease size of the
6673 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6677 adjust
= max_insn_size
- insn_size
;
6678 sched_ib
.size
-= adjust
;
6680 if (sched_ib
.filled
> sched_ib
.size
)
6681 sched_ib
.filled
= sched_ib
.size
;
6683 sched_ib
.records
.adjust
[sched_ib
.records
.adjust_index
] = adjust
;
6686 ++sched_ib
.records
.adjust_index
;
6687 if (sched_ib
.records
.adjust_index
== sched_ib
.records
.n_insns
)
6688 sched_ib
.records
.adjust_index
= 0;
6690 /* Undo adjustment we did 7 instructions ago. */
6692 += sched_ib
.records
.adjust
[sched_ib
.records
.adjust_index
];
6697 gcc_assert (!sched_ib
.enabled_p
);
6705 if (insn_size
> sched_ib
.filled
)
6706 /* Scheduling for register pressure does not always take DFA into
6707 account. Workaround instruction buffer not being filled enough. */
6709 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
6710 insn_size
= sched_ib
.filled
;
6715 else if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6716 || asm_noperands (PATTERN (insn
)) >= 0)
6717 insn_size
= sched_ib
.filled
;
6721 sched_ib
.filled
-= insn_size
;
6723 return can_issue_more
;
6726 /* Return how many instructions should scheduler lookahead to choose the
6729 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6731 return m68k_sched_issue_rate () - 1;
6734 /* Implementation of targetm.sched.init_global () hook.
6735 It is invoked once per scheduling pass and is used here
6736 to initialize scheduler constants. */
6738 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED
,
6739 int sched_verbose ATTRIBUTE_UNUSED
,
6740 int n_insns ATTRIBUTE_UNUSED
)
6742 /* Check that all instructions have DFA reservations and
6743 that all instructions can be issued from a clean state. */
6749 state
= alloca (state_size ());
6751 for (insn
= get_insns (); insn
!= NULL
; insn
= NEXT_INSN (insn
))
6753 if (INSN_P (insn
) && recog_memoized (insn
) >= 0)
6755 gcc_assert (insn_has_dfa_reservation_p (insn
));
6757 state_reset (state
);
6758 if (state_transition (state
, insn
) >= 0)
6764 /* Setup target cpu. */
6766 /* ColdFire V4 has a set of features to keep its instruction buffer full
6767 (e.g., a separate memory bus for instructions) and, hence, we do not model
6768 buffer for this CPU. */
6769 sched_ib
.enabled_p
= (m68k_sched_cpu
!= CPU_CFV4
);
6771 switch (m68k_sched_cpu
)
6774 sched_ib
.filled
= 0;
6781 sched_ib
.records
.n_insns
= 0;
6782 sched_ib
.records
.adjust
= NULL
;
6787 sched_ib
.records
.n_insns
= 8;
6788 sched_ib
.records
.adjust
= XNEWVEC (int, sched_ib
.records
.n_insns
);
6795 sched_mem_unit_code
= get_cpu_unit_code ("cf_mem1");
6797 sched_adjust_cost_state
= xmalloc (state_size ());
6798 state_reset (sched_adjust_cost_state
);
6801 emit_insn (gen_ib ());
6802 sched_ib
.insn
= get_insns ();
6806 /* Scheduling pass is now finished. Free/reset static variables. */
6808 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED
,
6809 int verbose ATTRIBUTE_UNUSED
)
6811 sched_ib
.insn
= NULL
;
6813 free (sched_adjust_cost_state
);
6814 sched_adjust_cost_state
= NULL
;
6816 sched_mem_unit_code
= 0;
6818 free (sched_ib
.records
.adjust
);
6819 sched_ib
.records
.adjust
= NULL
;
6820 sched_ib
.records
.n_insns
= 0;
6824 /* Implementation of targetm.sched.init () hook.
6825 It is invoked each time scheduler starts on the new block (basic block or
6826 extended basic block). */
6828 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED
,
6829 int sched_verbose ATTRIBUTE_UNUSED
,
6830 int n_insns ATTRIBUTE_UNUSED
)
6832 switch (m68k_sched_cpu
)
6840 sched_ib
.size
= sched_ib
.records
.n_insns
* max_insn_size
;
6842 memset (sched_ib
.records
.adjust
, 0,
6843 sched_ib
.records
.n_insns
* sizeof (*sched_ib
.records
.adjust
));
6844 sched_ib
.records
.adjust_index
= 0;
6848 gcc_assert (!sched_ib
.enabled_p
);
6856 if (sched_ib
.enabled_p
)
6857 /* haifa-sched.cc: schedule_block () calls advance_cycle () just before
6858 the first cycle. Workaround that. */
6859 sched_ib
.filled
= -2;
6862 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6863 It is invoked just before current cycle finishes and is used here
6864 to track if instruction buffer got its two words this cycle. */
6866 m68k_sched_dfa_pre_advance_cycle (void)
6868 if (!sched_ib
.enabled_p
)
6871 if (!cpu_unit_reservation_p (curr_state
, sched_mem_unit_code
))
6873 sched_ib
.filled
+= 2;
6875 if (sched_ib
.filled
> sched_ib
.size
)
6876 sched_ib
.filled
= sched_ib
.size
;
6880 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6881 It is invoked just after new cycle begins and is used here
6882 to setup number of filled words in the instruction buffer so that
6883 instructions which won't have all their words prefetched would be
6884 stalled for a cycle. */
6886 m68k_sched_dfa_post_advance_cycle (void)
6890 if (!sched_ib
.enabled_p
)
6893 /* Setup number of prefetched instruction words in the instruction
6895 i
= max_insn_size
- sched_ib
.filled
;
6899 if (state_transition (curr_state
, sched_ib
.insn
) >= 0)
6900 /* Pick up scheduler state. */
6905 /* Return X or Y (depending on OPX_P) operand of INSN,
6906 if it is an integer register, or NULL overwise. */
6908 sched_get_reg_operand (rtx_insn
*insn
, bool opx_p
)
6914 if (get_attr_opx_type (insn
) == OPX_TYPE_RN
)
6916 op
= sched_get_operand (insn
, true);
6917 gcc_assert (op
!= NULL
);
6919 if (!reload_completed
&& !REG_P (op
))
6925 if (get_attr_opy_type (insn
) == OPY_TYPE_RN
)
6927 op
= sched_get_operand (insn
, false);
6928 gcc_assert (op
!= NULL
);
6930 if (!reload_completed
&& !REG_P (op
))
6938 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6941 sched_mem_operand_p (rtx_insn
*insn
, bool opx_p
)
6943 switch (sched_get_opxy_mem_type (insn
, opx_p
))
6954 /* Return X or Y (depending on OPX_P) operand of INSN,
6955 if it is a MEM, or NULL overwise. */
6957 sched_get_mem_operand (rtx_insn
*insn
, bool must_read_p
, bool must_write_p
)
6977 if (opy_p
&& sched_mem_operand_p (insn
, false))
6978 return sched_get_operand (insn
, false);
6980 if (opx_p
&& sched_mem_operand_p (insn
, true))
6981 return sched_get_operand (insn
, true);
6987 /* Return non-zero if PRO modifies register used as part of
6990 m68k_sched_address_bypass_p (rtx_insn
*pro
, rtx_insn
*con
)
6995 pro_x
= sched_get_reg_operand (pro
, true);
6999 con_mem_read
= sched_get_mem_operand (con
, true, false);
7000 gcc_assert (con_mem_read
!= NULL
);
7002 if (reg_mentioned_p (pro_x
, con_mem_read
))
7008 /* Helper function for m68k_sched_indexed_address_bypass_p.
7009 if PRO modifies register used as index in CON,
7010 return scale of indexed memory access in CON. Return zero overwise. */
7012 sched_get_indexed_address_scale (rtx_insn
*pro
, rtx_insn
*con
)
7016 struct m68k_address address
;
7018 reg
= sched_get_reg_operand (pro
, true);
7022 mem
= sched_get_mem_operand (con
, true, false);
7023 gcc_assert (mem
!= NULL
&& MEM_P (mem
));
7025 if (!m68k_decompose_address (GET_MODE (mem
), XEXP (mem
, 0), reload_completed
,
7029 if (REGNO (reg
) == REGNO (address
.index
))
7031 gcc_assert (address
.scale
!= 0);
7032 return address
.scale
;
7038 /* Return non-zero if PRO modifies register used
7039 as index with scale 2 or 4 in CON. */
7041 m68k_sched_indexed_address_bypass_p (rtx_insn
*pro
, rtx_insn
*con
)
7043 gcc_assert (sched_cfv4_bypass_data
.pro
== NULL
7044 && sched_cfv4_bypass_data
.con
== NULL
7045 && sched_cfv4_bypass_data
.scale
== 0);
7047 switch (sched_get_indexed_address_scale (pro
, con
))
7050 /* We can't have a variable latency bypass, so
7051 remember to adjust the insn cost in adjust_cost hook. */
7052 sched_cfv4_bypass_data
.pro
= pro
;
7053 sched_cfv4_bypass_data
.con
= con
;
7054 sched_cfv4_bypass_data
.scale
= 1;
7066 /* We generate a two-instructions program at M_TRAMP :
7067 movea.l &CHAIN_VALUE,%a0
7069 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
7072 m68k_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
7074 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
7077 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM
));
7079 mem
= adjust_address (m_tramp
, HImode
, 0);
7080 emit_move_insn (mem
, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM
-8) << 9)));
7081 mem
= adjust_address (m_tramp
, SImode
, 2);
7082 emit_move_insn (mem
, chain_value
);
7084 mem
= adjust_address (m_tramp
, HImode
, 6);
7085 emit_move_insn (mem
, GEN_INT(0x4EF9));
7086 mem
= adjust_address (m_tramp
, SImode
, 8);
7087 emit_move_insn (mem
, fnaddr
);
7089 FINALIZE_TRAMPOLINE (XEXP (m_tramp
, 0));
7092 /* On the 68000, the RTS insn cannot pop anything.
7093 On the 68010, the RTD insn may be used to pop them if the number
7094 of args is fixed, but if the number is variable then the caller
7095 must pop them all. RTD can't be used for library calls now
7096 because the library is compiled with the Unix compiler.
7097 Use of RTD is a selectable option, since it is incompatible with
7098 standard Unix calling sequences. If the option is not selected,
7099 the caller must always pop the args. */
7102 m68k_return_pops_args (tree fundecl
, tree funtype
, poly_int64 size
)
7106 || TREE_CODE (fundecl
) != IDENTIFIER_NODE
)
7107 && (!stdarg_p (funtype
)))
7108 ? (HOST_WIDE_INT
) size
: 0);
7111 /* Make sure everything's fine if we *don't* have a given processor.
7112 This assumes that putting a register in fixed_regs will keep the
7113 compiler's mitts completely off it. We don't bother to zero it out
7114 of register classes. */
7117 m68k_conditional_register_usage (void)
7121 if (!TARGET_HARD_FLOAT
)
7123 x
= reg_class_contents
[FP_REGS
];
7124 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7125 if (TEST_HARD_REG_BIT (x
, i
))
7126 fixed_regs
[i
] = call_used_regs
[i
] = 1;
7129 fixed_regs
[PIC_REG
] = call_used_regs
[PIC_REG
] = 1;
7133 m68k_init_sync_libfuncs (void)
7135 init_sync_libfuncs (UNITS_PER_WORD
);
7138 /* Implements EPILOGUE_USES. All registers are live on exit from an
7139 interrupt routine. */
7141 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
7143 return (reload_completed
7144 && (m68k_get_function_kind (current_function_decl
)
7145 == m68k_fk_interrupt_handler
));
7149 /* Implement TARGET_C_EXCESS_PRECISION.
7151 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
7152 instructions, we get proper intermediate rounding, otherwise we
7153 get extended precision results. */
7155 static enum flt_eval_method
7156 m68k_excess_precision (enum excess_precision_type type
)
7160 case EXCESS_PRECISION_TYPE_FAST
:
7161 /* The fastest type to promote to will always be the native type,
7162 whether that occurs with implicit excess precision or
7164 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT
;
7165 case EXCESS_PRECISION_TYPE_STANDARD
:
7166 case EXCESS_PRECISION_TYPE_IMPLICIT
:
7167 /* Otherwise, the excess precision we want when we are
7168 in a standards compliant mode, and the implicit precision we
7169 provide can be identical. */
7170 if (TARGET_68040
|| ! TARGET_68881
)
7171 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT
;
7173 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE
;
7174 case EXCESS_PRECISION_TYPE_FLOAT16
:
7175 error ("%<-fexcess-precision=16%> is not supported on this target");
7180 return FLT_EVAL_METHOD_UNPREDICTABLE
;
7183 /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes
7184 a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */
7187 m68k_push_rounding (poly_int64 bytes
)
7189 if (TARGET_COLDFIRE
)
7191 return (bytes
+ 1) & ~1;
7194 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
7197 m68k_promote_function_mode (const_tree type
, machine_mode mode
,
7198 int *punsignedp ATTRIBUTE_UNUSED
,
7199 const_tree fntype ATTRIBUTE_UNUSED
,
7202 /* Promote libcall arguments narrower than int to match the normal C
7203 ABI (for which promotions are handled via
7204 TARGET_PROMOTE_PROTOTYPES). */
7205 if (type
== NULL_TREE
&& !for_return
&& (mode
== QImode
|| mode
== HImode
))
7210 /* Implement TARGET_ZERO_CALL_USED_REGS. */
7213 m68k_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs
)
7215 rtx zero_fpreg
= NULL_RTX
;
7217 for (unsigned int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
7218 if (TEST_HARD_REG_BIT (need_zeroed_hardregs
, regno
))
7222 if (INT_REGNO_P (regno
))
7224 reg
= regno_reg_rtx
[regno
];
7225 zero
= CONST0_RTX (SImode
);
7227 else if (FP_REGNO_P (regno
))
7229 reg
= gen_raw_REG (SFmode
, regno
);
7230 if (zero_fpreg
== NULL_RTX
)
7232 /* On the 040/060 clearing an FP reg loads a large
7233 immediate. To reduce code size use the first
7234 cleared FP reg to clear remaining ones. Don't do
7235 this on cores which use fmovecr. */
7236 zero
= CONST0_RTX (SFmode
);
7246 emit_move_insn (reg
, zero
);
7249 return need_zeroed_hardregs
;
7252 /* Implement TARGET_C_MODE_FOR_FLOATING_TYPE. Return XFmode or DFmode
7253 for TI_LONG_DOUBLE_TYPE which is for long double type, go with the
7254 default one for the others. */
7257 m68k_c_mode_for_floating_type (enum tree_index ti
)
7259 if (ti
== TI_LONG_DOUBLE_TYPE
)
7260 return LONG_DOUBLE_TYPE_MODE
;
7261 return default_mode_for_floating_type (ti
);
7264 /* Implement TARGET_LRA_P. */
7272 #include "gt-m68k.h"