1 /* Subroutines for insn-output.cc for Motorola 68000 family.
2 Copyright (C) 1987-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
23 #define INCLUDE_STRING
25 #include "coretypes.h"
29 #include "stringpool.h"
34 #include "fold-const.h"
36 #include "stor-layout.h"
39 #include "insn-config.h"
40 #include "conditions.h"
42 #include "insn-attr.h"
44 #include "diagnostic-core.h"
61 #include "cfgcleanup.h"
62 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
63 #include "sched-int.h"
64 #include "insn-codes.h"
71 /* This file should be included last. */
72 #include "target-def.h"
74 enum reg_class regno_reg_class
[] =
76 DATA_REGS
, DATA_REGS
, DATA_REGS
, DATA_REGS
,
77 DATA_REGS
, DATA_REGS
, DATA_REGS
, DATA_REGS
,
78 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
79 ADDR_REGS
, ADDR_REGS
, ADDR_REGS
, ADDR_REGS
,
80 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
81 FP_REGS
, FP_REGS
, FP_REGS
, FP_REGS
,
86 /* The minimum number of integer registers that we want to save with the
87 movem instruction. Using two movel instructions instead of a single
88 moveml is about 15% faster for the 68020 and 68030 at no expense in
90 #define MIN_MOVEM_REGS 3
92 /* The minimum number of floating point registers that we want to save
93 with the fmovem instruction. */
94 #define MIN_FMOVEM_REGS 1
96 /* Structure describing stack frame layout. */
99 /* Stack pointer to frame pointer offset. */
100 HOST_WIDE_INT offset
;
102 /* Offset of FPU registers. */
103 HOST_WIDE_INT foffset
;
105 /* Frame size in bytes (rounded up). */
108 /* Data and address register. */
110 unsigned int reg_mask
;
114 unsigned int fpu_mask
;
116 /* Offsets relative to ARG_POINTER. */
117 HOST_WIDE_INT frame_pointer_offset
;
118 HOST_WIDE_INT stack_pointer_offset
;
120 /* Function which the above information refers to. */
124 /* Current frame information calculated by m68k_compute_frame_layout(). */
125 static struct m68k_frame current_frame
;
127 /* Structure describing an m68k address.
129 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
130 with null fields evaluating to 0. Here:
132 - BASE satisfies m68k_legitimate_base_reg_p
133 - INDEX satisfies m68k_legitimate_index_reg_p
134 - OFFSET satisfies m68k_legitimate_constant_address_p
136 INDEX is either HImode or SImode. The other fields are SImode.
138 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
139 the address is (BASE)+. */
140 struct m68k_address
{
148 static int m68k_sched_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
150 static int m68k_sched_issue_rate (void);
151 static int m68k_sched_variable_issue (FILE *, int, rtx_insn
*, int);
152 static void m68k_sched_md_init_global (FILE *, int, int);
153 static void m68k_sched_md_finish_global (FILE *, int);
154 static void m68k_sched_md_init (FILE *, int, int);
155 static void m68k_sched_dfa_pre_advance_cycle (void);
156 static void m68k_sched_dfa_post_advance_cycle (void);
157 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
159 static bool m68k_can_eliminate (const int, const int);
160 static void m68k_conditional_register_usage (void);
161 static bool m68k_legitimate_address_p (machine_mode
, rtx
, bool,
162 code_helper
= ERROR_MARK
);
163 static void m68k_option_override (void);
164 static void m68k_override_options_after_change (void);
165 static rtx
find_addr_reg (rtx
);
166 static const char *singlemove_string (rtx
*);
167 static void m68k_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
,
168 HOST_WIDE_INT
, tree
);
169 static rtx
m68k_struct_value_rtx (tree
, int);
170 static tree
m68k_handle_fndecl_attribute (tree
*node
, tree name
,
171 tree args
, int flags
,
173 static void m68k_compute_frame_layout (void);
174 static bool m68k_save_reg (unsigned int regno
, bool interrupt_handler
);
175 static bool m68k_ok_for_sibcall_p (tree
, tree
);
176 static bool m68k_tls_symbol_p (rtx
);
177 static rtx
m68k_legitimize_address (rtx
, rtx
, machine_mode
);
178 static bool m68k_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
179 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
180 static bool m68k_return_in_memory (const_tree
, const_tree
);
182 static void m68k_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
183 static void m68k_trampoline_init (rtx
, tree
, rtx
);
184 static poly_int64
m68k_return_pops_args (tree
, tree
, poly_int64
);
185 static rtx
m68k_delegitimize_address (rtx
);
186 static void m68k_function_arg_advance (cumulative_args_t
,
187 const function_arg_info
&);
188 static rtx
m68k_function_arg (cumulative_args_t
, const function_arg_info
&);
189 static bool m68k_cannot_force_const_mem (machine_mode mode
, rtx x
);
190 static bool m68k_output_addr_const_extra (FILE *, rtx
);
191 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED
;
192 static enum flt_eval_method
193 m68k_excess_precision (enum excess_precision_type
);
194 static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode
);
195 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode
);
196 static bool m68k_modes_tieable_p (machine_mode
, machine_mode
);
197 static machine_mode
m68k_promote_function_mode (const_tree
, machine_mode
,
198 int *, const_tree
, int);
199 static void m68k_asm_final_postscan_insn (FILE *, rtx_insn
*insn
, rtx
[], int);
200 static HARD_REG_SET
m68k_zero_call_used_regs (HARD_REG_SET
);
201 static machine_mode
m68k_c_mode_for_floating_type (enum tree_index
);
202 static bool m68k_use_lra_p (void);
204 /* Initialize the GCC target structure. */
206 #if INT_OP_GROUP == INT_OP_DOT_WORD
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
211 #if INT_OP_GROUP == INT_OP_NO_DOT
212 #undef TARGET_ASM_BYTE_OP
213 #define TARGET_ASM_BYTE_OP "\tbyte\t"
214 #undef TARGET_ASM_ALIGNED_HI_OP
215 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
216 #undef TARGET_ASM_ALIGNED_SI_OP
217 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
220 #if INT_OP_GROUP == INT_OP_DC
221 #undef TARGET_ASM_BYTE_OP
222 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
223 #undef TARGET_ASM_ALIGNED_HI_OP
224 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
225 #undef TARGET_ASM_ALIGNED_SI_OP
226 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
229 #undef TARGET_ASM_UNALIGNED_HI_OP
230 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
231 #undef TARGET_ASM_UNALIGNED_SI_OP
232 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
234 #undef TARGET_ASM_OUTPUT_MI_THUNK
235 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
236 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
237 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
239 #undef TARGET_ASM_FILE_START_APP_OFF
240 #define TARGET_ASM_FILE_START_APP_OFF true
242 #undef TARGET_LEGITIMIZE_ADDRESS
243 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
245 #undef TARGET_SCHED_ADJUST_COST
246 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
248 #undef TARGET_SCHED_ISSUE_RATE
249 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
251 #undef TARGET_SCHED_VARIABLE_ISSUE
252 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
254 #undef TARGET_SCHED_INIT_GLOBAL
255 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
257 #undef TARGET_SCHED_FINISH_GLOBAL
258 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
260 #undef TARGET_SCHED_INIT
261 #define TARGET_SCHED_INIT m68k_sched_md_init
263 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
264 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
266 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
267 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
269 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
270 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
271 m68k_sched_first_cycle_multipass_dfa_lookahead
273 #undef TARGET_OPTION_OVERRIDE
274 #define TARGET_OPTION_OVERRIDE m68k_option_override
276 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
277 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
279 #undef TARGET_RTX_COSTS
280 #define TARGET_RTX_COSTS m68k_rtx_costs
282 #undef TARGET_ATTRIBUTE_TABLE
283 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
285 #undef TARGET_PROMOTE_PROTOTYPES
286 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
288 #undef TARGET_STRUCT_VALUE_RTX
289 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
291 #undef TARGET_CANNOT_FORCE_CONST_MEM
292 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
294 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
295 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
297 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
298 #undef TARGET_RETURN_IN_MEMORY
299 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
303 #undef TARGET_HAVE_TLS
304 #define TARGET_HAVE_TLS (true)
306 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
307 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
311 #define TARGET_LRA_P m68k_use_lra_p
313 #undef TARGET_LEGITIMATE_ADDRESS_P
314 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
316 #undef TARGET_CAN_ELIMINATE
317 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
319 #undef TARGET_CONDITIONAL_REGISTER_USAGE
320 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
322 #undef TARGET_TRAMPOLINE_INIT
323 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
325 #undef TARGET_RETURN_POPS_ARGS
326 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
328 #undef TARGET_DELEGITIMIZE_ADDRESS
329 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
331 #undef TARGET_FUNCTION_ARG
332 #define TARGET_FUNCTION_ARG m68k_function_arg
334 #undef TARGET_FUNCTION_ARG_ADVANCE
335 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
337 #undef TARGET_LEGITIMATE_CONSTANT_P
338 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
340 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
341 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
343 #undef TARGET_C_EXCESS_PRECISION
344 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
346 /* The value stored by TAS. */
347 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
348 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
350 #undef TARGET_HARD_REGNO_NREGS
351 #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
352 #undef TARGET_HARD_REGNO_MODE_OK
353 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
355 #undef TARGET_MODES_TIEABLE_P
356 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
358 #undef TARGET_PROMOTE_FUNCTION_MODE
359 #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode
361 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
362 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
364 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
365 #define TARGET_ASM_FINAL_POSTSCAN_INSN m68k_asm_final_postscan_insn
367 #undef TARGET_ZERO_CALL_USED_REGS
368 #define TARGET_ZERO_CALL_USED_REGS m68k_zero_call_used_regs
370 #undef TARGET_C_MODE_FOR_FLOATING_TYPE
371 #define TARGET_C_MODE_FOR_FLOATING_TYPE m68k_c_mode_for_floating_type
373 TARGET_GNU_ATTRIBUTES (m68k_attribute_table
,
375 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
376 affects_type_identity, handler, exclude } */
377 { "interrupt", 0, 0, true, false, false, false,
378 m68k_handle_fndecl_attribute
, NULL
},
379 { "interrupt_handler", 0, 0, true, false, false, false,
380 m68k_handle_fndecl_attribute
, NULL
},
381 { "interrupt_thread", 0, 0, true, false, false, false,
382 m68k_handle_fndecl_attribute
, NULL
}
385 struct gcc_target targetm
= TARGET_INITIALIZER
;
387 /* Base flags for 68k ISAs. */
388 #define FL_FOR_isa_00 FL_ISA_68000
389 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
390 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
391 generated 68881 code for 68020 and 68030 targets unless explicitly told
393 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
394 | FL_BITFIELD | FL_68881 | FL_CAS)
395 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
396 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
398 /* Base flags for ColdFire ISAs. */
399 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
400 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
401 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
402 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
403 /* ISA_C is not upwardly compatible with ISA_B. */
404 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
408 /* Traditional 68000 instruction sets. */
414 /* ColdFire instruction set variants. */
422 /* Information about one of the -march, -mcpu or -mtune arguments. */
423 struct m68k_target_selection
425 /* The argument being described. */
428 /* For -mcpu, this is the device selected by the option.
429 For -mtune and -march, it is a representative device
430 for the microarchitecture or ISA respectively. */
431 enum target_device device
;
433 /* The M68K_DEVICE fields associated with DEVICE. See the comment
434 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
436 enum uarch_type microarch
;
441 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
442 static const struct m68k_target_selection all_devices
[] =
444 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
445 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
446 #include "m68k-devices.def"
448 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
451 /* A list of all ISAs, mapping each one to a representative device.
452 Used for -march selection. */
453 static const struct m68k_target_selection all_isas
[] =
455 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
456 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
457 #include "m68k-isas.def"
459 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
462 /* A list of all microarchitectures, mapping each one to a representative
463 device. Used for -mtune selection. */
464 static const struct m68k_target_selection all_microarchs
[] =
466 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
467 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
468 #include "m68k-microarchs.def"
469 #undef M68K_MICROARCH
470 { NULL
, unk_device
, NULL
, unk_arch
, isa_max
, 0 }
473 /* The entries associated with the -mcpu, -march and -mtune settings,
474 or null for options that have not been used. */
475 const struct m68k_target_selection
*m68k_cpu_entry
;
476 const struct m68k_target_selection
*m68k_arch_entry
;
477 const struct m68k_target_selection
*m68k_tune_entry
;
479 /* Which CPU we are generating code for. */
480 enum target_device m68k_cpu
;
482 /* Which microarchitecture to tune for. */
483 enum uarch_type m68k_tune
;
485 /* Which FPU to use. */
486 enum fpu_type m68k_fpu
;
488 /* The set of FL_* flags that apply to the target processor. */
489 unsigned int m68k_cpu_flags
;
491 /* The set of FL_* flags that apply to the processor to be tuned for. */
492 unsigned int m68k_tune_flags
;
494 /* Asm templates for calling or jumping to an arbitrary symbolic address,
495 or NULL if such calls or jumps are not supported. The address is held
497 const char *m68k_symbolic_call
;
498 const char *m68k_symbolic_jump
;
500 /* Enum variable that corresponds to m68k_symbolic_call values. */
501 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var
;
504 /* Implement TARGET_OPTION_OVERRIDE. */
507 m68k_option_override (void)
509 const struct m68k_target_selection
*entry
;
510 unsigned long target_mask
;
512 if (OPTION_SET_P (m68k_arch_option
))
513 m68k_arch_entry
= &all_isas
[m68k_arch_option
];
515 if (OPTION_SET_P (m68k_cpu_option
))
516 m68k_cpu_entry
= &all_devices
[(int) m68k_cpu_option
];
518 if (OPTION_SET_P (m68k_tune_option
))
519 m68k_tune_entry
= &all_microarchs
[(int) m68k_tune_option
];
527 -march=ARCH should generate code that runs any processor
528 implementing architecture ARCH. -mcpu=CPU should override -march
529 and should generate code that runs on processor CPU, making free
530 use of any instructions that CPU understands. -mtune=UARCH applies
531 on top of -mcpu or -march and optimizes the code for UARCH. It does
532 not change the target architecture. */
535 /* Complain if the -march setting is for a different microarchitecture,
536 or includes flags that the -mcpu setting doesn't. */
538 && (m68k_arch_entry
->microarch
!= m68k_cpu_entry
->microarch
539 || (m68k_arch_entry
->flags
& ~m68k_cpu_entry
->flags
) != 0))
540 warning (0, "%<-mcpu=%s%> conflicts with %<-march=%s%>",
541 m68k_cpu_entry
->name
, m68k_arch_entry
->name
);
543 entry
= m68k_cpu_entry
;
546 entry
= m68k_arch_entry
;
549 entry
= all_devices
+ TARGET_CPU_DEFAULT
;
551 m68k_cpu_flags
= entry
->flags
;
553 /* Use the architecture setting to derive default values for
557 /* ColdFire is lenient about alignment. */
558 if (!TARGET_COLDFIRE
)
559 target_mask
|= MASK_STRICT_ALIGNMENT
;
561 if ((m68k_cpu_flags
& FL_BITFIELD
) != 0)
562 target_mask
|= MASK_BITFIELD
;
563 if ((m68k_cpu_flags
& FL_CF_HWDIV
) != 0)
564 target_mask
|= MASK_CF_HWDIV
;
565 if ((m68k_cpu_flags
& (FL_68881
| FL_CF_FPU
)) != 0)
566 target_mask
|= MASK_HARD_FLOAT
;
567 target_flags
|= target_mask
& ~target_flags_explicit
;
569 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
570 m68k_cpu
= entry
->device
;
573 m68k_tune
= m68k_tune_entry
->microarch
;
574 m68k_tune_flags
= m68k_tune_entry
->flags
;
576 #ifdef M68K_DEFAULT_TUNE
577 else if (!m68k_cpu_entry
&& !m68k_arch_entry
)
579 enum target_device dev
;
580 dev
= all_microarchs
[M68K_DEFAULT_TUNE
].device
;
581 m68k_tune_flags
= all_devices
[dev
].flags
;
586 m68k_tune
= entry
->microarch
;
587 m68k_tune_flags
= entry
->flags
;
590 /* Set the type of FPU. */
591 m68k_fpu
= (!TARGET_HARD_FLOAT
? FPUTYPE_NONE
592 : (m68k_cpu_flags
& FL_COLDFIRE
) != 0 ? FPUTYPE_COLDFIRE
595 /* Sanity check to ensure that msep-data and mid-sahred-library are not
596 * both specified together. Doing so simply doesn't make sense.
598 if (TARGET_SEP_DATA
&& TARGET_ID_SHARED_LIBRARY
)
599 error ("cannot specify both %<-msep-data%> and %<-mid-shared-library%>");
601 /* If we're generating code for a separate A5 relative data segment,
602 * we've got to enable -fPIC as well. This might be relaxable to
603 * -fpic but it hasn't been tested properly.
605 if (TARGET_SEP_DATA
|| TARGET_ID_SHARED_LIBRARY
)
608 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
609 error if the target does not support them. */
610 if (TARGET_PCREL
&& !TARGET_68020
&& flag_pic
== 2)
611 error ("%<-mpcrel%> %<-fPIC%> is not currently supported on selected cpu");
613 /* ??? A historic way of turning on pic, or is this intended to
614 be an embedded thing that doesn't have the same name binding
615 significance that it does on hosted ELF systems? */
616 if (TARGET_PCREL
&& flag_pic
== 0)
621 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_JSR
;
623 m68k_symbolic_jump
= "jra %a0";
625 else if (TARGET_ID_SHARED_LIBRARY
)
626 /* All addresses must be loaded from the GOT. */
628 else if (TARGET_68020
|| TARGET_ISAB
|| TARGET_ISAC
)
631 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_BSR_C
;
633 m68k_symbolic_call_var
= M68K_SYMBOLIC_CALL_BSR_P
;
636 /* No unconditional long branch */;
637 else if (TARGET_PCREL
)
638 m68k_symbolic_jump
= "bra%.l %c0";
640 m68k_symbolic_jump
= "bra%.l %p0";
641 /* Turn off function cse if we are doing PIC. We always want
642 function call to be done as `bsr foo@PLTPC'. */
643 /* ??? It's traditional to do this for -mpcrel too, but it isn't
644 clear how intentional that is. */
645 flag_no_function_cse
= 1;
648 switch (m68k_symbolic_call_var
)
650 case M68K_SYMBOLIC_CALL_JSR
:
651 m68k_symbolic_call
= "jsr %a0";
654 case M68K_SYMBOLIC_CALL_BSR_C
:
655 m68k_symbolic_call
= "bsr%.l %c0";
658 case M68K_SYMBOLIC_CALL_BSR_P
:
659 m68k_symbolic_call
= "bsr%.l %p0";
662 case M68K_SYMBOLIC_CALL_NONE
:
663 gcc_assert (m68k_symbolic_call
== NULL
);
670 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
671 parse_alignment_opts ();
672 int label_alignment
= align_labels
.levels
[0].get_value ();
673 if (label_alignment
> 2)
675 warning (0, "%<-falign-labels=%d%> is not supported", label_alignment
);
676 str_align_labels
= "1";
679 int loop_alignment
= align_loops
.levels
[0].get_value ();
680 if (loop_alignment
> 2)
682 warning (0, "%<-falign-loops=%d%> is not supported", loop_alignment
);
683 str_align_loops
= "1";
687 if ((opt_fstack_limit_symbol_arg
!= NULL
|| opt_fstack_limit_register_no
>= 0)
690 warning (0, "%<-fstack-limit-%> options are not supported on this cpu");
691 opt_fstack_limit_symbol_arg
= NULL
;
692 opt_fstack_limit_register_no
= -1;
695 SUBTARGET_OVERRIDE_OPTIONS
;
697 /* Setup scheduling options. */
699 m68k_sched_cpu
= CPU_CFV1
;
701 m68k_sched_cpu
= CPU_CFV2
;
703 m68k_sched_cpu
= CPU_CFV3
;
705 m68k_sched_cpu
= CPU_CFV4
;
708 m68k_sched_cpu
= CPU_UNKNOWN
;
709 flag_schedule_insns
= 0;
710 flag_schedule_insns_after_reload
= 0;
711 flag_modulo_sched
= 0;
712 flag_live_range_shrinkage
= 0;
715 if (m68k_sched_cpu
!= CPU_UNKNOWN
)
717 if ((m68k_cpu_flags
& (FL_CF_EMAC
| FL_CF_EMAC_B
)) != 0)
718 m68k_sched_mac
= MAC_CF_EMAC
;
719 else if ((m68k_cpu_flags
& FL_CF_MAC
) != 0)
720 m68k_sched_mac
= MAC_CF_MAC
;
722 m68k_sched_mac
= MAC_NO
;
726 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
729 m68k_override_options_after_change (void)
731 if (m68k_sched_cpu
== CPU_UNKNOWN
)
733 flag_schedule_insns
= 0;
734 flag_schedule_insns_after_reload
= 0;
735 flag_modulo_sched
= 0;
736 flag_live_range_shrinkage
= 0;
740 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
741 given argument and NAME is the argument passed to -mcpu. Return NULL
742 if -mcpu was not passed. */
745 m68k_cpp_cpu_ident (const char *prefix
)
749 return concat ("__m", prefix
, "_cpu_", m68k_cpu_entry
->name
, NULL
);
752 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
753 given argument and NAME is the name of the representative device for
754 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
757 m68k_cpp_cpu_family (const char *prefix
)
761 return concat ("__m", prefix
, "_family_", m68k_cpu_entry
->family
, NULL
);
764 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
765 "interrupt_handler" attribute and interrupt_thread if FUNC has an
766 "interrupt_thread" attribute. Otherwise, return
767 m68k_fk_normal_function. */
769 enum m68k_function_kind
770 m68k_get_function_kind (tree func
)
774 gcc_assert (TREE_CODE (func
) == FUNCTION_DECL
);
776 a
= lookup_attribute ("interrupt", DECL_ATTRIBUTES (func
));
778 return m68k_fk_interrupt_handler
;
780 a
= lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func
));
782 return m68k_fk_interrupt_handler
;
784 a
= lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func
));
786 return m68k_fk_interrupt_thread
;
788 return m68k_fk_normal_function
;
791 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
792 struct attribute_spec.handler. */
794 m68k_handle_fndecl_attribute (tree
*node
, tree name
,
795 tree args ATTRIBUTE_UNUSED
,
796 int flags ATTRIBUTE_UNUSED
,
799 if (TREE_CODE (*node
) != FUNCTION_DECL
)
801 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
803 *no_add_attrs
= true;
806 if (m68k_get_function_kind (*node
) != m68k_fk_normal_function
)
808 error ("multiple interrupt attributes not allowed");
809 *no_add_attrs
= true;
813 && !strcmp (IDENTIFIER_POINTER (name
), "interrupt_thread"))
815 error ("%<interrupt_thread%> is available only on fido");
816 *no_add_attrs
= true;
823 m68k_compute_frame_layout (void)
827 enum m68k_function_kind func_kind
=
828 m68k_get_function_kind (current_function_decl
);
829 bool interrupt_handler
= func_kind
== m68k_fk_interrupt_handler
;
830 bool interrupt_thread
= func_kind
== m68k_fk_interrupt_thread
;
832 /* Only compute the frame once per function.
833 Don't cache information until reload has been completed. */
834 if (current_frame
.funcdef_no
== current_function_funcdef_no
838 current_frame
.size
= (get_frame_size () + 3) & -4;
842 /* Interrupt thread does not need to save any register. */
843 if (!interrupt_thread
)
844 for (regno
= 0; regno
< 16; regno
++)
845 if (m68k_save_reg (regno
, interrupt_handler
))
847 mask
|= 1 << (regno
- D0_REG
);
850 current_frame
.offset
= saved
* 4;
851 current_frame
.reg_no
= saved
;
852 current_frame
.reg_mask
= mask
;
854 current_frame
.foffset
= 0;
856 if (TARGET_HARD_FLOAT
)
858 /* Interrupt thread does not need to save any register. */
859 if (!interrupt_thread
)
860 for (regno
= 16; regno
< 24; regno
++)
861 if (m68k_save_reg (regno
, interrupt_handler
))
863 mask
|= 1 << (regno
- FP0_REG
);
866 current_frame
.foffset
= saved
* TARGET_FP_REG_SIZE
;
867 current_frame
.offset
+= current_frame
.foffset
;
869 current_frame
.fpu_no
= saved
;
870 current_frame
.fpu_mask
= mask
;
872 /* Remember what function this frame refers to. */
873 current_frame
.funcdef_no
= current_function_funcdef_no
;
876 /* Worker function for TARGET_CAN_ELIMINATE. */
879 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED
, const int to
)
881 return (to
== STACK_POINTER_REGNUM
? ! frame_pointer_needed
: true);
885 m68k_initial_elimination_offset (int from
, int to
)
888 /* The arg pointer points 8 bytes before the start of the arguments,
889 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
890 frame pointer in most frames. */
891 argptr_offset
= frame_pointer_needed
? 0 : UNITS_PER_WORD
;
892 if (from
== ARG_POINTER_REGNUM
&& to
== FRAME_POINTER_REGNUM
)
893 return argptr_offset
;
895 m68k_compute_frame_layout ();
897 gcc_assert (to
== STACK_POINTER_REGNUM
);
900 case ARG_POINTER_REGNUM
:
901 return current_frame
.offset
+ current_frame
.size
- argptr_offset
;
902 case FRAME_POINTER_REGNUM
:
903 return current_frame
.offset
+ current_frame
.size
;
909 /* Refer to the array `regs_ever_live' to determine which registers
910 to save; `regs_ever_live[I]' is nonzero if register number I
911 is ever used in the function. This function is responsible for
912 knowing which registers should not be saved even if used.
913 Return true if we need to save REGNO. */
916 m68k_save_reg (unsigned int regno
, bool interrupt_handler
)
918 if (flag_pic
&& regno
== PIC_REG
)
920 if (crtl
->saves_all_registers
)
922 if (crtl
->uses_pic_offset_table
)
924 /* Reload may introduce constant pool references into a function
925 that thitherto didn't need a PIC register. Note that the test
926 above will not catch that case because we will only set
927 crtl->uses_pic_offset_table when emitting
928 the address reloads. */
929 if (crtl
->uses_const_pool
)
933 if (crtl
->calls_eh_return
)
938 unsigned int test
= EH_RETURN_DATA_REGNO (i
);
939 if (test
== INVALID_REGNUM
)
946 /* Fixed regs we never touch. */
947 if (fixed_regs
[regno
])
950 /* The frame pointer (if it is such) is handled specially. */
951 if (regno
== FRAME_POINTER_REGNUM
&& frame_pointer_needed
)
954 /* Interrupt handlers must also save call_used_regs
955 if they are live or when calling nested functions. */
956 if (interrupt_handler
)
958 if (df_regs_ever_live_p (regno
))
961 if (!crtl
->is_leaf
&& call_used_or_fixed_reg_p (regno
))
965 /* Never need to save registers that aren't touched. */
966 if (!df_regs_ever_live_p (regno
))
969 /* Otherwise save everything that isn't call-clobbered. */
970 return !call_used_or_fixed_reg_p (regno
);
973 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
974 the lowest memory address. COUNT is the number of registers to be
975 moved, with register REGNO + I being moved if bit I of MASK is set.
976 STORE_P specifies the direction of the move and ADJUST_STACK_P says
977 whether or not this is pre-decrement (if STORE_P) or post-increment
978 (if !STORE_P) operation. */
981 m68k_emit_movem (rtx base
, HOST_WIDE_INT offset
,
982 unsigned int count
, unsigned int regno
,
983 unsigned int mask
, bool store_p
, bool adjust_stack_p
)
986 rtx body
, addr
, src
, operands
[2];
989 body
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (adjust_stack_p
+ count
));
990 mode
= reg_raw_mode
[regno
];
995 src
= plus_constant (Pmode
, base
,
997 * GET_MODE_SIZE (mode
)
998 * (HOST_WIDE_INT
) (store_p
? -1 : 1)));
999 XVECEXP (body
, 0, i
++) = gen_rtx_SET (base
, src
);
1002 for (; mask
!= 0; mask
>>= 1, regno
++)
1005 addr
= plus_constant (Pmode
, base
, offset
);
1006 operands
[!store_p
] = gen_frame_mem (mode
, addr
);
1007 operands
[store_p
] = gen_rtx_REG (mode
, regno
);
1008 XVECEXP (body
, 0, i
++)
1009 = gen_rtx_SET (operands
[0], operands
[1]);
1010 offset
+= GET_MODE_SIZE (mode
);
1012 gcc_assert (i
== XVECLEN (body
, 0));
1014 return emit_insn (body
);
1017 /* Make INSN a frame-related instruction. */
1020 m68k_set_frame_related (rtx_insn
*insn
)
1025 RTX_FRAME_RELATED_P (insn
) = 1;
1026 body
= PATTERN (insn
);
1027 if (GET_CODE (body
) == PARALLEL
)
1028 for (i
= 0; i
< XVECLEN (body
, 0); i
++)
1029 RTX_FRAME_RELATED_P (XVECEXP (body
, 0, i
)) = 1;
1032 /* Emit RTL for the "prologue" define_expand. */
1035 m68k_expand_prologue (void)
1037 HOST_WIDE_INT fsize_with_regs
;
1038 rtx limit
, src
, dest
;
1040 m68k_compute_frame_layout ();
1042 if (flag_stack_usage_info
)
1043 current_function_static_stack_size
1044 = current_frame
.size
+ current_frame
.offset
;
1046 /* If the stack limit is a symbol, we can check it here,
1047 before actually allocating the space. */
1048 if (crtl
->limit_stack
1049 && GET_CODE (stack_limit_rtx
) == SYMBOL_REF
)
1051 limit
= plus_constant (Pmode
, stack_limit_rtx
, current_frame
.size
+ 4);
1052 if (!m68k_legitimate_constant_p (Pmode
, limit
))
1054 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
), limit
);
1055 limit
= gen_rtx_REG (Pmode
, D0_REG
);
1057 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode
,
1058 stack_pointer_rtx
, limit
),
1059 stack_pointer_rtx
, limit
,
1063 fsize_with_regs
= current_frame
.size
;
1064 if (TARGET_COLDFIRE
)
1066 /* ColdFire's move multiple instructions do not allow pre-decrement
1067 addressing. Add the size of movem saves to the initial stack
1068 allocation instead. */
1069 if (current_frame
.reg_no
>= MIN_MOVEM_REGS
)
1070 fsize_with_regs
+= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1071 if (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
)
1072 fsize_with_regs
+= current_frame
.fpu_no
* GET_MODE_SIZE (DFmode
);
1075 if (frame_pointer_needed
)
1077 if (fsize_with_regs
== 0 && TUNE_68040
)
1079 /* On the 68040, two separate moves are faster than link.w 0. */
1080 dest
= gen_frame_mem (Pmode
,
1081 gen_rtx_PRE_DEC (Pmode
, stack_pointer_rtx
));
1082 m68k_set_frame_related (emit_move_insn (dest
, frame_pointer_rtx
));
1083 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx
,
1084 stack_pointer_rtx
));
1086 else if (fsize_with_regs
< 0x8000 || TARGET_68020
)
1087 m68k_set_frame_related
1088 (emit_insn (gen_link (frame_pointer_rtx
,
1089 GEN_INT (-4 - fsize_with_regs
))));
1092 m68k_set_frame_related
1093 (emit_insn (gen_link (frame_pointer_rtx
, GEN_INT (-4))));
1094 m68k_set_frame_related
1095 (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1097 GEN_INT (-fsize_with_regs
))));
1100 /* If the frame pointer is needed, emit a special barrier that
1101 will prevent the scheduler from moving stores to the frame
1102 before the stack adjustment. */
1103 emit_insn (gen_stack_tie (stack_pointer_rtx
, frame_pointer_rtx
));
1105 else if (fsize_with_regs
!= 0)
1106 m68k_set_frame_related
1107 (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1109 GEN_INT (-fsize_with_regs
))));
1111 if (current_frame
.fpu_mask
)
1113 gcc_assert (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
);
1115 m68k_set_frame_related
1116 (m68k_emit_movem (stack_pointer_rtx
,
1117 current_frame
.fpu_no
* -GET_MODE_SIZE (XFmode
),
1118 current_frame
.fpu_no
, FP0_REG
,
1119 current_frame
.fpu_mask
, true, true));
1124 /* If we're using moveml to save the integer registers,
1125 the stack pointer will point to the bottom of the moveml
1126 save area. Find the stack offset of the first FP register. */
1127 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1130 offset
= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1131 m68k_set_frame_related
1132 (m68k_emit_movem (stack_pointer_rtx
, offset
,
1133 current_frame
.fpu_no
, FP0_REG
,
1134 current_frame
.fpu_mask
, true, false));
1138 /* If the stack limit is not a symbol, check it here.
1139 This has the disadvantage that it may be too late... */
1140 if (crtl
->limit_stack
)
1142 if (REG_P (stack_limit_rtx
))
1143 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode
, stack_pointer_rtx
,
1145 stack_pointer_rtx
, stack_limit_rtx
,
1148 else if (GET_CODE (stack_limit_rtx
) != SYMBOL_REF
)
1149 warning (0, "stack limit expression is not supported");
1152 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1154 /* Store each register separately in the same order moveml does. */
1157 for (i
= 16; i
-- > 0; )
1158 if (current_frame
.reg_mask
& (1 << i
))
1160 src
= gen_rtx_REG (SImode
, D0_REG
+ i
);
1161 dest
= gen_frame_mem (SImode
,
1162 gen_rtx_PRE_DEC (Pmode
, stack_pointer_rtx
));
1163 m68k_set_frame_related (emit_insn (gen_movsi (dest
, src
)));
1168 if (TARGET_COLDFIRE
)
1169 /* The required register save space has already been allocated.
1170 The first register should be stored at (%sp). */
1171 m68k_set_frame_related
1172 (m68k_emit_movem (stack_pointer_rtx
, 0,
1173 current_frame
.reg_no
, D0_REG
,
1174 current_frame
.reg_mask
, true, false));
1176 m68k_set_frame_related
1177 (m68k_emit_movem (stack_pointer_rtx
,
1178 current_frame
.reg_no
* -GET_MODE_SIZE (SImode
),
1179 current_frame
.reg_no
, D0_REG
,
1180 current_frame
.reg_mask
, true, true));
1183 if (!TARGET_SEP_DATA
1184 && crtl
->uses_pic_offset_table
)
1185 emit_insn (gen_load_got (pic_offset_table_rtx
));
1188 /* Return true if a simple (return) instruction is sufficient for this
1189 instruction (i.e. if no epilogue is needed). */
1192 m68k_use_return_insn (void)
1194 if (!reload_completed
|| frame_pointer_needed
|| get_frame_size () != 0)
1197 m68k_compute_frame_layout ();
1198 return current_frame
.offset
== 0;
1201 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1202 SIBCALL_P says which.
1204 The function epilogue should not depend on the current stack pointer!
1205 It should use the frame pointer only, if there is a frame pointer.
1206 This is mandatory because of alloca; we also take advantage of it to
1207 omit stack adjustments before returning. */
1210 m68k_expand_epilogue (bool sibcall_p
)
1212 HOST_WIDE_INT fsize
, fsize_with_regs
;
1213 bool big
, restore_from_sp
;
1215 m68k_compute_frame_layout ();
1217 fsize
= current_frame
.size
;
1219 restore_from_sp
= false;
1221 /* FIXME : crtl->is_leaf below is too strong.
1222 What we really need to know there is if there could be pending
1223 stack adjustment needed at that point. */
1224 restore_from_sp
= (!frame_pointer_needed
1225 || (!cfun
->calls_alloca
&& crtl
->is_leaf
));
1227 /* fsize_with_regs is the size we need to adjust the sp when
1228 popping the frame. */
1229 fsize_with_regs
= fsize
;
1230 if (TARGET_COLDFIRE
&& restore_from_sp
)
1232 /* ColdFire's move multiple instructions do not allow post-increment
1233 addressing. Add the size of movem loads to the final deallocation
1235 if (current_frame
.reg_no
>= MIN_MOVEM_REGS
)
1236 fsize_with_regs
+= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1237 if (current_frame
.fpu_no
>= MIN_FMOVEM_REGS
)
1238 fsize_with_regs
+= current_frame
.fpu_no
* GET_MODE_SIZE (DFmode
);
1241 if (current_frame
.offset
+ fsize
>= 0x8000
1243 && (current_frame
.reg_mask
|| current_frame
.fpu_mask
))
1246 && (current_frame
.reg_no
>= MIN_MOVEM_REGS
1247 || current_frame
.fpu_no
>= MIN_FMOVEM_REGS
))
1249 /* ColdFire's move multiple instructions do not support the
1250 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1251 stack-based restore. */
1252 emit_move_insn (gen_rtx_REG (Pmode
, A1_REG
),
1253 GEN_INT (-(current_frame
.offset
+ fsize
)));
1254 emit_insn (gen_blockage ());
1255 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1256 gen_rtx_REG (Pmode
, A1_REG
),
1257 frame_pointer_rtx
));
1258 restore_from_sp
= true;
1262 emit_move_insn (gen_rtx_REG (Pmode
, A1_REG
), GEN_INT (-fsize
));
1268 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1270 /* Restore each register separately in the same order moveml does. */
1272 HOST_WIDE_INT offset
;
1274 offset
= current_frame
.offset
+ fsize
;
1275 for (i
= 0; i
< 16; i
++)
1276 if (current_frame
.reg_mask
& (1 << i
))
1282 /* Generate the address -OFFSET(%fp,%a1.l). */
1283 addr
= gen_rtx_REG (Pmode
, A1_REG
);
1284 addr
= gen_rtx_PLUS (Pmode
, addr
, frame_pointer_rtx
);
1285 addr
= plus_constant (Pmode
, addr
, -offset
);
1287 else if (restore_from_sp
)
1288 addr
= gen_rtx_POST_INC (Pmode
, stack_pointer_rtx
);
1290 addr
= plus_constant (Pmode
, frame_pointer_rtx
, -offset
);
1291 emit_move_insn (gen_rtx_REG (SImode
, D0_REG
+ i
),
1292 gen_frame_mem (SImode
, addr
));
1293 offset
-= GET_MODE_SIZE (SImode
);
1296 else if (current_frame
.reg_mask
)
1299 m68k_emit_movem (gen_rtx_PLUS (Pmode
,
1300 gen_rtx_REG (Pmode
, A1_REG
),
1302 -(current_frame
.offset
+ fsize
),
1303 current_frame
.reg_no
, D0_REG
,
1304 current_frame
.reg_mask
, false, false);
1305 else if (restore_from_sp
)
1306 m68k_emit_movem (stack_pointer_rtx
, 0,
1307 current_frame
.reg_no
, D0_REG
,
1308 current_frame
.reg_mask
, false,
1311 m68k_emit_movem (frame_pointer_rtx
,
1312 -(current_frame
.offset
+ fsize
),
1313 current_frame
.reg_no
, D0_REG
,
1314 current_frame
.reg_mask
, false, false);
1317 if (current_frame
.fpu_no
> 0)
1320 m68k_emit_movem (gen_rtx_PLUS (Pmode
,
1321 gen_rtx_REG (Pmode
, A1_REG
),
1323 -(current_frame
.foffset
+ fsize
),
1324 current_frame
.fpu_no
, FP0_REG
,
1325 current_frame
.fpu_mask
, false, false);
1326 else if (restore_from_sp
)
1328 if (TARGET_COLDFIRE
)
1332 /* If we used moveml to restore the integer registers, the
1333 stack pointer will still point to the bottom of the moveml
1334 save area. Find the stack offset of the first FP
1336 if (current_frame
.reg_no
< MIN_MOVEM_REGS
)
1339 offset
= current_frame
.reg_no
* GET_MODE_SIZE (SImode
);
1340 m68k_emit_movem (stack_pointer_rtx
, offset
,
1341 current_frame
.fpu_no
, FP0_REG
,
1342 current_frame
.fpu_mask
, false, false);
1345 m68k_emit_movem (stack_pointer_rtx
, 0,
1346 current_frame
.fpu_no
, FP0_REG
,
1347 current_frame
.fpu_mask
, false, true);
1350 m68k_emit_movem (frame_pointer_rtx
,
1351 -(current_frame
.foffset
+ fsize
),
1352 current_frame
.fpu_no
, FP0_REG
,
1353 current_frame
.fpu_mask
, false, false);
1356 emit_insn (gen_blockage ());
1357 if (frame_pointer_needed
)
1358 emit_insn (gen_unlink (frame_pointer_rtx
));
1359 else if (fsize_with_regs
)
1360 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1362 GEN_INT (fsize_with_regs
)));
1364 if (crtl
->calls_eh_return
)
1365 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1367 EH_RETURN_STACKADJ_RTX
));
1370 emit_jump_insn (ret_rtx
);
1373 /* Return true if PARALLEL contains register REGNO. */
1375 m68k_reg_present_p (const_rtx parallel
, unsigned int regno
)
1379 if (REG_P (parallel
) && REGNO (parallel
) == regno
)
1382 if (GET_CODE (parallel
) != PARALLEL
)
1385 for (i
= 0; i
< XVECLEN (parallel
, 0); ++i
)
1389 x
= XEXP (XVECEXP (parallel
, 0, i
), 0);
1390 if (REG_P (x
) && REGNO (x
) == regno
)
1397 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1400 m68k_ok_for_sibcall_p (tree decl
, tree exp
)
1402 enum m68k_function_kind kind
;
1404 /* We cannot use sibcalls for nested functions because we use the
1405 static chain register for indirect calls. */
1406 if (CALL_EXPR_STATIC_CHAIN (exp
))
1409 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun
->decl
))))
1411 /* Check that the return value locations are the same. For
1412 example that we aren't returning a value from the sibling in
1413 a D0 register but then need to transfer it to a A0 register. */
1417 cfun_value
= FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun
->decl
)),
1419 call_value
= FUNCTION_VALUE (TREE_TYPE (exp
), decl
);
1421 /* Check that the values are equal or that the result the callee
1422 function returns is superset of what the current function returns. */
1423 if (!(rtx_equal_p (cfun_value
, call_value
)
1424 || (REG_P (cfun_value
)
1425 && m68k_reg_present_p (call_value
, REGNO (cfun_value
)))))
1429 kind
= m68k_get_function_kind (current_function_decl
);
1430 if (kind
== m68k_fk_normal_function
)
1431 /* We can always sibcall from a normal function, because it's
1432 undefined if it is calling an interrupt function. */
1435 /* Otherwise we can only sibcall if the function kind is known to be
1437 if (decl
&& m68k_get_function_kind (decl
) == kind
)
1443 /* On the m68k all args are always pushed. */
1446 m68k_function_arg (cumulative_args_t
, const function_arg_info
&)
1452 m68k_function_arg_advance (cumulative_args_t cum_v
,
1453 const function_arg_info
&arg
)
1455 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1457 *cum
+= (arg
.promoted_size_in_bytes () + 3) & ~3;
1460 /* Convert X to a legitimate function call memory reference and return the
1464 m68k_legitimize_call_address (rtx x
)
1466 gcc_assert (MEM_P (x
));
1467 if (call_operand (XEXP (x
, 0), VOIDmode
))
1469 return replace_equiv_address (x
, force_reg (Pmode
, XEXP (x
, 0)));
1472 /* Likewise for sibling calls. */
1475 m68k_legitimize_sibcall_address (rtx x
)
1477 gcc_assert (MEM_P (x
));
1478 if (sibcall_operand (XEXP (x
, 0), VOIDmode
))
1481 emit_move_insn (gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
), XEXP (x
, 0));
1482 return replace_equiv_address (x
, gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
));
1485 /* Convert X to a legitimate address and return it if successful. Otherwise
1488 For the 68000, we handle X+REG by loading X into a register R and
1489 using R+REG. R will go in an address reg and indexing will be used.
1490 However, if REG is a broken-out memory address or multiplication,
1491 nothing needs to be done because REG can certainly go in an address reg. */
1494 m68k_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
1496 if (m68k_tls_symbol_p (x
))
1497 return m68k_legitimize_tls_address (x
);
1499 if (GET_CODE (x
) == PLUS
)
1501 int ch
= (x
) != (oldx
);
1504 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1506 if (GET_CODE (XEXP (x
, 0)) == MULT
1507 || GET_CODE (XEXP (x
, 0)) == ASHIFT
)
1510 XEXP (x
, 0) = force_operand (XEXP (x
, 0), 0);
1512 if (GET_CODE (XEXP (x
, 1)) == MULT
1513 || GET_CODE (XEXP (x
, 1)) == ASHIFT
)
1516 XEXP (x
, 1) = force_operand (XEXP (x
, 1), 0);
1520 if (GET_CODE (XEXP (x
, 1)) == REG
1521 && GET_CODE (XEXP (x
, 0)) == REG
)
1523 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
1526 x
= force_operand (x
, 0);
1530 if (memory_address_p (mode
, x
))
1533 if (GET_CODE (XEXP (x
, 0)) == REG
1534 || (GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
1535 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
1536 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == HImode
))
1538 rtx temp
= gen_reg_rtx (Pmode
);
1539 rtx val
= force_operand (XEXP (x
, 1), 0);
1540 emit_move_insn (temp
, val
);
1543 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
1544 && GET_CODE (XEXP (x
, 0)) == REG
)
1545 x
= force_operand (x
, 0);
1547 else if (GET_CODE (XEXP (x
, 1)) == REG
1548 || (GET_CODE (XEXP (x
, 1)) == SIGN_EXTEND
1549 && GET_CODE (XEXP (XEXP (x
, 1), 0)) == REG
1550 && GET_MODE (XEXP (XEXP (x
, 1), 0)) == HImode
))
1552 rtx temp
= gen_reg_rtx (Pmode
);
1553 rtx val
= force_operand (XEXP (x
, 0), 0);
1554 emit_move_insn (temp
, val
);
1557 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
1558 && GET_CODE (XEXP (x
, 1)) == REG
)
1559 x
= force_operand (x
, 0);
1566 /* For eliding comparisons, we remember how the flags were set.
1567 FLAGS_COMPARE_OP0 and FLAGS_COMPARE_OP1 are remembered for a direct
1568 comparison, they take priority. FLAGS_OPERAND1 and FLAGS_OPERAND2
1569 are used in more cases, they are a fallback for comparisons against
1570 zero after a move or arithmetic insn.
1571 FLAGS_VALID is set to FLAGS_VALID_NO if we should not use any of
1574 static rtx flags_compare_op0
, flags_compare_op1
;
1575 static rtx flags_operand1
, flags_operand2
;
1576 static attr_flags_valid flags_valid
= FLAGS_VALID_NO
;
1578 /* Return a code other than UNKNOWN if we can elide a CODE comparison of
1582 m68k_find_flags_value (rtx op0
, rtx op1
, rtx_code code
)
1584 if (flags_compare_op0
!= NULL_RTX
)
1586 if (rtx_equal_p (op0
, flags_compare_op0
)
1587 && rtx_equal_p (op1
, flags_compare_op1
))
1589 if (rtx_equal_p (op0
, flags_compare_op1
)
1590 && rtx_equal_p (op1
, flags_compare_op0
))
1591 return swap_condition (code
);
1595 machine_mode mode
= GET_MODE (op0
);
1596 if (op1
!= CONST0_RTX (mode
))
1598 /* Comparisons against 0 with these two should have been optimized out. */
1599 gcc_assert (code
!= LTU
&& code
!= GEU
);
1600 if (flags_valid
== FLAGS_VALID_NOOV
&& (code
== GT
|| code
== LE
))
1602 if (rtx_equal_p (flags_operand1
, op0
) || rtx_equal_p (flags_operand2
, op0
))
1603 return (FLOAT_MODE_P (mode
) ? code
1604 : code
== GE
? PLUS
: code
== LT
? MINUS
: code
);
1605 /* See if we are testing whether the high part of a DImode value is
1606 positive or negative and we have the full value as a remembered
1608 if (code
!= GE
&& code
!= LT
)
1611 && flags_operand1
!= NULL_RTX
&& GET_MODE (flags_operand1
) == DImode
1612 && REG_P (flags_operand1
) && REG_P (op0
)
1613 && hard_regno_nregs (REGNO (flags_operand1
), DImode
) == 2
1614 && REGNO (flags_operand1
) == REGNO (op0
))
1615 return code
== GE
? PLUS
: MINUS
;
1617 && flags_operand2
!= NULL_RTX
&& GET_MODE (flags_operand2
) == DImode
1618 && REG_P (flags_operand2
) && REG_P (op0
)
1619 && hard_regno_nregs (REGNO (flags_operand2
), DImode
) == 2
1620 && REGNO (flags_operand2
) == REGNO (op0
))
1621 return code
== GE
? PLUS
: MINUS
;
1625 /* Called through CC_STATUS_INIT, which is invoked by final whenever a
1626 label is encountered. */
1631 flags_compare_op0
= flags_compare_op1
= NULL_RTX
;
1632 flags_operand1
= flags_operand2
= NULL_RTX
;
1633 flags_valid
= FLAGS_VALID_NO
;
1636 /* Update flags for a move operation with OPERANDS. Called for move
1637 operations where attr_flags_valid returns "set". */
1640 handle_flags_for_move (rtx
*operands
)
1642 flags_compare_op0
= flags_compare_op1
= NULL_RTX
;
1643 if (!ADDRESS_REG_P (operands
[0]))
1645 flags_valid
= FLAGS_VALID_MOVE
;
1646 flags_operand1
= side_effects_p (operands
[0]) ? NULL_RTX
: operands
[0];
1647 if (side_effects_p (operands
[1])
1648 /* ??? For mem->mem moves, this can discard the source as a
1649 valid compare operand. If you assume aligned moves, this
1650 is unnecessary, but in theory, we could have an unaligned
1651 move overwriting parts of its source. */
1652 || modified_in_p (operands
[1], current_output_insn
))
1653 flags_operand2
= NULL_RTX
;
1655 flags_operand2
= operands
[1];
1658 if (flags_operand1
!= NULL_RTX
1659 && modified_in_p (flags_operand1
, current_output_insn
))
1660 flags_operand1
= NULL_RTX
;
1661 if (flags_operand2
!= NULL_RTX
1662 && modified_in_p (flags_operand2
, current_output_insn
))
1663 flags_operand2
= NULL_RTX
;
1666 /* Process INSN to remember flag operands if possible. */
1669 m68k_asm_final_postscan_insn (FILE *, rtx_insn
*insn
, rtx
[], int)
1671 enum attr_flags_valid v
= get_attr_flags_valid (insn
);
1672 if (v
== FLAGS_VALID_SET
)
1674 /* Comparisons use FLAGS_VALID_SET, so we can be sure we need to clear these
1676 flags_compare_op0
= flags_compare_op1
= NULL_RTX
;
1678 if (v
== FLAGS_VALID_NO
)
1680 flags_operand1
= flags_operand2
= NULL_RTX
;
1683 else if (v
== FLAGS_VALID_UNCHANGED
)
1685 if (flags_operand1
!= NULL_RTX
&& modified_in_p (flags_operand1
, insn
))
1686 flags_operand1
= NULL_RTX
;
1687 if (flags_operand2
!= NULL_RTX
&& modified_in_p (flags_operand2
, insn
))
1688 flags_operand2
= NULL_RTX
;
1693 rtx set
= single_set (insn
);
1694 rtx dest
= SET_DEST (set
);
1695 rtx src
= SET_SRC (set
);
1696 if (side_effects_p (dest
))
1701 case FLAGS_VALID_YES
:
1702 case FLAGS_VALID_NOOV
:
1703 flags_operand1
= dest
;
1704 flags_operand2
= NULL_RTX
;
1706 case FLAGS_VALID_MOVE
:
1707 /* fmoves to memory or data registers do not set the condition
1708 codes. Normal moves _do_ set the condition codes, but not in
1709 a way that is appropriate for comparison with 0, because -0.0
1710 would be treated as a negative nonzero number. Note that it
1711 isn't appropriate to conditionalize this restriction on
1712 HONOR_SIGNED_ZEROS because that macro merely indicates whether
1713 we care about the difference between -0.0 and +0.0. */
1714 if (dest
!= NULL_RTX
1717 || GET_CODE (src
) == FIX
1718 || FLOAT_MODE_P (GET_MODE (dest
))))
1719 flags_operand1
= flags_operand2
= NULL_RTX
;
1722 flags_operand1
= dest
;
1723 if (GET_MODE (src
) != VOIDmode
&& !side_effects_p (src
)
1724 && !modified_in_p (src
, insn
))
1725 flags_operand2
= src
;
1727 flags_operand2
= NULL_RTX
;
1736 /* Output a dbCC; jCC sequence. Note we do not handle the
1737 floating point version of this sequence (Fdbcc).
1738 OPERANDS are as in the two peepholes. CODE is the code
1739 returned by m68k_output_branch_<mode>. */
1742 output_dbcc_and_branch (rtx
*operands
, rtx_code code
)
1747 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands
);
1751 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands
);
1755 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands
);
1759 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands
);
1763 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands
);
1767 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands
);
1771 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands
);
1775 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands
);
1779 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands
);
1783 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands
);
1787 output_asm_insn ("dbpl %0,%l1\n\tjle %l2", operands
);
1791 output_asm_insn ("dbmi %0,%l1\n\tjle %l2", operands
);
1798 /* If the decrement is to be done in SImode, then we have
1799 to compensate for the fact that dbcc decrements in HImode. */
1800 switch (GET_MODE (operands
[0]))
1803 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands
);
1815 output_scc_di (rtx op
, rtx operand1
, rtx operand2
, rtx dest
)
1818 enum rtx_code op_code
= GET_CODE (op
);
1820 /* This does not produce a useful cc. */
1823 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1824 below. Swap the operands and change the op if these requirements
1825 are not fulfilled. */
1826 if (GET_CODE (operand2
) == REG
&& GET_CODE (operand1
) != REG
)
1830 operand1
= operand2
;
1832 op_code
= swap_condition (op_code
);
1834 loperands
[0] = operand1
;
1835 if (GET_CODE (operand1
) == REG
)
1836 loperands
[1] = gen_rtx_REG (SImode
, REGNO (operand1
) + 1);
1838 loperands
[1] = adjust_address (operand1
, SImode
, 4);
1839 if (operand2
!= const0_rtx
)
1841 loperands
[2] = operand2
;
1842 if (GET_CODE (operand2
) == REG
)
1843 loperands
[3] = gen_rtx_REG (SImode
, REGNO (operand2
) + 1);
1845 loperands
[3] = adjust_address (operand2
, SImode
, 4);
1847 loperands
[4] = gen_label_rtx ();
1848 if (operand2
!= const0_rtx
)
1849 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands
);
1852 if (TARGET_68020
|| TARGET_COLDFIRE
|| ! ADDRESS_REG_P (loperands
[0]))
1853 output_asm_insn ("tst%.l %0", loperands
);
1855 output_asm_insn ("cmp%.w #0,%0", loperands
);
1857 output_asm_insn ("jne %l4", loperands
);
1859 if (TARGET_68020
|| TARGET_COLDFIRE
|| ! ADDRESS_REG_P (loperands
[1]))
1860 output_asm_insn ("tst%.l %1", loperands
);
1862 output_asm_insn ("cmp%.w #0,%1", loperands
);
1865 loperands
[5] = dest
;
1870 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1871 CODE_LABEL_NUMBER (loperands
[4]));
1872 output_asm_insn ("seq %5", loperands
);
1876 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1877 CODE_LABEL_NUMBER (loperands
[4]));
1878 output_asm_insn ("sne %5", loperands
);
1882 loperands
[6] = gen_label_rtx ();
1883 output_asm_insn ("shi %5\n\tjra %l6", loperands
);
1884 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1885 CODE_LABEL_NUMBER (loperands
[4]));
1886 output_asm_insn ("sgt %5", loperands
);
1887 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1888 CODE_LABEL_NUMBER (loperands
[6]));
1892 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1893 CODE_LABEL_NUMBER (loperands
[4]));
1894 output_asm_insn ("shi %5", loperands
);
1898 loperands
[6] = gen_label_rtx ();
1899 output_asm_insn ("scs %5\n\tjra %l6", loperands
);
1900 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1901 CODE_LABEL_NUMBER (loperands
[4]));
1902 output_asm_insn ("slt %5", loperands
);
1903 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1904 CODE_LABEL_NUMBER (loperands
[6]));
1908 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1909 CODE_LABEL_NUMBER (loperands
[4]));
1910 output_asm_insn ("scs %5", loperands
);
1914 loperands
[6] = gen_label_rtx ();
1915 output_asm_insn ("scc %5\n\tjra %l6", loperands
);
1916 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1917 CODE_LABEL_NUMBER (loperands
[4]));
1918 output_asm_insn ("sge %5", loperands
);
1919 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1920 CODE_LABEL_NUMBER (loperands
[6]));
1924 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1925 CODE_LABEL_NUMBER (loperands
[4]));
1926 output_asm_insn ("scc %5", loperands
);
1930 loperands
[6] = gen_label_rtx ();
1931 output_asm_insn ("sls %5\n\tjra %l6", loperands
);
1932 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1933 CODE_LABEL_NUMBER (loperands
[4]));
1934 output_asm_insn ("sle %5", loperands
);
1935 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1936 CODE_LABEL_NUMBER (loperands
[6]));
1940 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L",
1941 CODE_LABEL_NUMBER (loperands
[4]));
1942 output_asm_insn ("sls %5", loperands
);
1952 m68k_output_btst (rtx countop
, rtx dataop
, rtx_code code
, int signpos
)
1958 if (GET_CODE (countop
) == CONST_INT
)
1960 int count
= INTVAL (countop
);
1961 /* If COUNT is bigger than size of storage unit in use,
1962 advance to the containing unit of same size. */
1963 if (count
> signpos
)
1965 int offset
= (count
& ~signpos
) / 8;
1966 count
= count
& signpos
;
1967 ops
[1] = dataop
= adjust_address (dataop
, QImode
, offset
);
1970 if (code
== EQ
|| code
== NE
)
1974 output_asm_insn ("tst%.l %1", ops
);
1975 return code
== EQ
? PLUS
: MINUS
;
1979 output_asm_insn ("tst%.w %1", ops
);
1980 return code
== EQ
? PLUS
: MINUS
;
1984 output_asm_insn ("tst%.b %1", ops
);
1985 return code
== EQ
? PLUS
: MINUS
;
1988 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1989 On some m68k variants unfortunately that's slower than btst.
1990 On 68000 and higher, that should also work for all HImode operands. */
1991 if (TUNE_CPU32
|| TARGET_COLDFIRE
|| optimize_size
)
1993 if (count
== 3 && DATA_REG_P (ops
[1]) && (code
== EQ
|| code
== NE
))
1995 output_asm_insn ("move%.w %1,%%ccr", ops
);
1996 return code
== EQ
? PLUS
: MINUS
;
1998 if (count
== 2 && DATA_REG_P (ops
[1]) && (code
== EQ
|| code
== NE
))
2000 output_asm_insn ("move%.w %1,%%ccr", ops
);
2001 return code
== EQ
? NE
: EQ
;
2003 /* count == 1 followed by bvc/bvs and
2004 count == 0 followed by bcc/bcs are also possible, but need
2005 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
2008 output_asm_insn ("btst %0,%1", ops
);
2012 /* Output a bftst instruction for a zero_extract with ZXOP0, ZXOP1 and ZXOP2
2013 operands. CODE is the code of the comparison, and we return the code to
2014 be actually used in the jump. */
2017 m68k_output_bftst (rtx zxop0
, rtx zxop1
, rtx zxop2
, rtx_code code
)
2019 if (zxop1
== const1_rtx
&& GET_CODE (zxop2
) == CONST_INT
)
2021 int width
= GET_CODE (zxop0
) == REG
? 31 : 7;
2022 /* Pass 1000 as SIGNPOS argument so that btst will
2023 not think we are testing the sign bit for an `and'
2024 and assume that nonzero implies a negative result. */
2025 return m68k_output_btst (GEN_INT (width
- INTVAL (zxop2
)), zxop0
, code
, 1000);
2027 rtx ops
[3] = { zxop0
, zxop1
, zxop2
};
2028 output_asm_insn ("bftst %0{%b2:%b1}", ops
);
2032 /* Return true if X is a legitimate base register. STRICT_P says
2033 whether we need strict checking. */
2036 m68k_legitimate_base_reg_p (rtx x
, bool strict_p
)
2038 /* Allow SUBREG everywhere we allow REG. This results in better code. */
2039 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
2044 ? REGNO_OK_FOR_BASE_P (REGNO (x
))
2045 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x
))));
2048 /* Return true if X is a legitimate index register. STRICT_P says
2049 whether we need strict checking. */
2052 m68k_legitimate_index_reg_p (rtx x
, bool strict_p
)
2054 if (!strict_p
&& GET_CODE (x
) == SUBREG
)
2059 ? REGNO_OK_FOR_INDEX_P (REGNO (x
))
2060 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x
))));
2063 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
2064 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
2065 ADDRESS if so. STRICT_P says whether we need strict checking. */
2068 m68k_decompose_index (rtx x
, bool strict_p
, struct m68k_address
*address
)
2072 /* Check for a scale factor. */
2074 if (TARGET_68020
|| TARGET_COLDFIRE
)
2076 if (GET_CODE (x
) == MULT
2077 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2078 && (INTVAL (XEXP (x
, 1)) == 2
2079 || INTVAL (XEXP (x
, 1)) == 4
2080 || (INTVAL (XEXP (x
, 1)) == 8
2081 && (TARGET_COLDFIRE_FPU
|| !TARGET_COLDFIRE
))))
2083 scale
= INTVAL (XEXP (x
, 1));
2086 /* LRA uses ASHIFT instead of MULT outside of MEM. */
2087 else if (GET_CODE (x
) == ASHIFT
2088 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2089 && (INTVAL (XEXP (x
, 1)) == 1
2090 || INTVAL (XEXP (x
, 1)) == 2
2091 || (INTVAL (XEXP (x
, 1)) == 3
2092 && (TARGET_COLDFIRE_FPU
|| !TARGET_COLDFIRE
))))
2094 scale
= 1 << INTVAL (XEXP (x
, 1));
2099 /* Check for a word extension. */
2100 if (!TARGET_COLDFIRE
2101 && GET_CODE (x
) == SIGN_EXTEND
2102 && GET_MODE (XEXP (x
, 0)) == HImode
)
2105 if (m68k_legitimate_index_reg_p (x
, strict_p
))
2107 address
->scale
= scale
;
2115 /* Return true if X is an illegitimate symbolic constant. */
2118 m68k_illegitimate_symbolic_constant_p (rtx x
)
2122 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
)
2124 split_const (x
, &base
, &offset
);
2125 if (GET_CODE (base
) == SYMBOL_REF
2126 && !offset_within_block_p (base
, INTVAL (offset
)))
2129 return m68k_tls_reference_p (x
, false);
2132 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2135 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2137 return m68k_illegitimate_symbolic_constant_p (x
);
2140 /* Return true if X is a legitimate constant address that can reach
2141 bytes in the range [X, X + REACH). STRICT_P says whether we need
2145 m68k_legitimate_constant_address_p (rtx x
, unsigned int reach
, bool strict_p
)
2149 if (!CONSTANT_ADDRESS_P (x
))
2153 && !(strict_p
&& TARGET_PCREL
)
2154 && symbolic_operand (x
, VOIDmode
))
2157 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P
&& reach
> 1)
2159 split_const (x
, &base
, &offset
);
2160 if (GET_CODE (base
) == SYMBOL_REF
2161 && !offset_within_block_p (base
, INTVAL (offset
) + reach
- 1))
2165 return !m68k_tls_reference_p (x
, false);
2168 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
2169 labels will become jump tables. */
2172 m68k_jump_table_ref_p (rtx x
)
2174 if (GET_CODE (x
) != LABEL_REF
)
2177 rtx_insn
*insn
= as_a
<rtx_insn
*> (XEXP (x
, 0));
2178 if (!NEXT_INSN (insn
) && !PREV_INSN (insn
))
2181 insn
= next_nonnote_insn (insn
);
2182 return insn
&& JUMP_TABLE_DATA_P (insn
);
2185 /* Return true if X is a legitimate address for values of mode MODE.
2186 STRICT_P says whether strict checking is needed. If the address
2187 is valid, describe its components in *ADDRESS. */
2190 m68k_decompose_address (machine_mode mode
, rtx x
,
2191 bool strict_p
, struct m68k_address
*address
)
2195 memset (address
, 0, sizeof (*address
));
2197 if (mode
== BLKmode
)
2200 reach
= GET_MODE_SIZE (mode
);
2202 /* Check for (An) (mode 2). */
2203 if (m68k_legitimate_base_reg_p (x
, strict_p
))
2209 /* Check for -(An) and (An)+ (modes 3 and 4). */
2210 if ((GET_CODE (x
) == PRE_DEC
|| GET_CODE (x
) == POST_INC
)
2211 && m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
))
2213 address
->code
= GET_CODE (x
);
2214 address
->base
= XEXP (x
, 0);
2218 /* Check for (d16,An) (mode 5). */
2219 if (GET_CODE (x
) == PLUS
2220 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2221 && IN_RANGE (INTVAL (XEXP (x
, 1)), -0x8000, 0x8000 - reach
)
2222 && m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
))
2224 address
->base
= XEXP (x
, 0);
2225 address
->offset
= XEXP (x
, 1);
2229 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2230 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2232 if (GET_CODE (x
) == PLUS
2233 && XEXP (x
, 0) == pic_offset_table_rtx
)
2235 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2236 they are invalid in this context. */
2237 if (m68k_unwrap_symbol (XEXP (x
, 1), false) != XEXP (x
, 1))
2239 address
->base
= XEXP (x
, 0);
2240 address
->offset
= XEXP (x
, 1);
2245 /* The ColdFire FPU only accepts addressing modes 2-5. */
2246 if (TARGET_COLDFIRE_FPU
&& GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2249 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2250 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2251 All these modes are variations of mode 7. */
2252 if (m68k_legitimate_constant_address_p (x
, reach
, strict_p
))
2254 address
->offset
= x
;
2258 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2261 ??? do_tablejump creates these addresses before placing the target
2262 label, so we have to assume that unplaced labels are jump table
2263 references. It seems unlikely that we would ever generate indexed
2264 accesses to unplaced labels in other cases. Do not accept it in
2265 PIC mode, since the label address will need to be loaded from memory. */
2266 if (GET_CODE (x
) == PLUS
2268 && m68k_jump_table_ref_p (XEXP (x
, 1))
2269 && m68k_decompose_index (XEXP (x
, 0), strict_p
, address
))
2271 address
->offset
= XEXP (x
, 1);
2275 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2276 (bd,An,Xn.SIZE*SCALE) addresses. */
2280 /* Check for a nonzero base displacement. */
2281 if (GET_CODE (x
) == PLUS
2282 && m68k_legitimate_constant_address_p (XEXP (x
, 1), reach
, strict_p
))
2284 address
->offset
= XEXP (x
, 1);
2288 /* Check for a suppressed index register. */
2289 if (m68k_legitimate_base_reg_p (x
, strict_p
))
2295 /* Check for a suppressed base register. Do not allow this case
2296 for non-symbolic offsets as it effectively gives gcc freedom
2297 to treat data registers as base registers, which can generate
2300 && symbolic_operand (address
->offset
, VOIDmode
)
2301 && m68k_decompose_index (x
, strict_p
, address
))
2306 /* Check for a nonzero base displacement. */
2307 if (GET_CODE (x
) == PLUS
2308 && GET_CODE (XEXP (x
, 1)) == CONST_INT
2309 && IN_RANGE (INTVAL (XEXP (x
, 1)), -0x80, 0x80 - reach
))
2311 address
->offset
= XEXP (x
, 1);
2316 /* We now expect the sum of a base and an index. */
2317 if (GET_CODE (x
) == PLUS
)
2319 if (m68k_legitimate_base_reg_p (XEXP (x
, 0), strict_p
)
2320 && m68k_decompose_index (XEXP (x
, 1), strict_p
, address
))
2322 address
->base
= XEXP (x
, 0);
2326 if (m68k_legitimate_base_reg_p (XEXP (x
, 1), strict_p
)
2327 && m68k_decompose_index (XEXP (x
, 0), strict_p
, address
))
2329 address
->base
= XEXP (x
, 1);
2336 /* Return true if X is a legitimate address for values of mode MODE.
2337 STRICT_P says whether strict checking is needed. */
2340 m68k_legitimate_address_p (machine_mode mode
, rtx x
, bool strict_p
, code_helper
)
2342 struct m68k_address address
;
2344 return m68k_decompose_address (mode
, x
, strict_p
, &address
);
2347 /* Return true if X is a memory, describing its address in ADDRESS if so.
2348 Apply strict checking if called during or after reload. */
2351 m68k_legitimate_mem_p (rtx x
, struct m68k_address
*address
)
2354 && m68k_decompose_address (GET_MODE (x
), XEXP (x
, 0),
2355 (reload_in_progress
|| lra_in_progress
2356 || reload_completed
),
2360 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2363 m68k_legitimate_constant_p (machine_mode mode
, rtx x
)
2365 return mode
!= XFmode
&& !m68k_illegitimate_symbolic_constant_p (x
);
2368 /* Return true if X matches the 'Q' constraint. It must be a memory
2369 with a base address and no constant offset or index. */
2372 m68k_matches_q_p (rtx x
)
2374 struct m68k_address address
;
2376 return (m68k_legitimate_mem_p (x
, &address
)
2377 && address
.code
== UNKNOWN
2383 /* Return true if X matches the 'U' constraint. It must be a base address
2384 with a constant offset and no index. */
2387 m68k_matches_u_p (rtx x
)
2389 struct m68k_address address
;
2391 return (m68k_legitimate_mem_p (x
, &address
)
2392 && address
.code
== UNKNOWN
2398 /* Return GOT pointer. */
2403 if (pic_offset_table_rtx
== NULL_RTX
)
2404 pic_offset_table_rtx
= gen_rtx_REG (Pmode
, PIC_REG
);
2406 crtl
->uses_pic_offset_table
= 1;
2408 return pic_offset_table_rtx
;
2411 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2413 enum m68k_reloc
{ RELOC_GOT
, RELOC_TLSGD
, RELOC_TLSLDM
, RELOC_TLSLDO
,
2414 RELOC_TLSIE
, RELOC_TLSLE
};
2416 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2418 /* Wrap symbol X into unspec representing relocation RELOC.
2419 BASE_REG - register that should be added to the result.
2420 TEMP_REG - if non-null, temporary register. */
2423 m68k_wrap_symbol (rtx x
, enum m68k_reloc reloc
, rtx base_reg
, rtx temp_reg
)
2427 use_x_p
= (base_reg
== pic_offset_table_rtx
) ? TARGET_XGOT
: TARGET_XTLS
;
2429 if (TARGET_COLDFIRE
&& use_x_p
)
2430 /* When compiling with -mx{got, tls} switch the code will look like this:
2432 move.l <X>@<RELOC>,<TEMP_REG>
2433 add.l <BASE_REG>,<TEMP_REG> */
2435 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2436 to put @RELOC after reference. */
2437 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (reloc
)),
2439 x
= gen_rtx_CONST (Pmode
, x
);
2441 if (temp_reg
== NULL
)
2443 gcc_assert (can_create_pseudo_p ());
2444 temp_reg
= gen_reg_rtx (Pmode
);
2447 emit_move_insn (temp_reg
, x
);
2448 emit_insn (gen_addsi3 (temp_reg
, temp_reg
, base_reg
));
2453 x
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, x
, GEN_INT (reloc
)),
2455 x
= gen_rtx_CONST (Pmode
, x
);
2457 x
= gen_rtx_PLUS (Pmode
, base_reg
, x
);
2463 /* Helper for m68k_unwrap_symbol.
2464 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2465 sets *RELOC_PTR to relocation type for the symbol. */
2468 m68k_unwrap_symbol_1 (rtx orig
, bool unwrap_reloc32_p
,
2469 enum m68k_reloc
*reloc_ptr
)
2471 if (GET_CODE (orig
) == CONST
)
2474 enum m68k_reloc dummy
;
2478 if (reloc_ptr
== NULL
)
2481 /* Handle an addend. */
2482 if ((GET_CODE (x
) == PLUS
|| GET_CODE (x
) == MINUS
)
2483 && CONST_INT_P (XEXP (x
, 1)))
2486 if (GET_CODE (x
) == UNSPEC
)
2488 switch (XINT (x
, 1))
2490 case UNSPEC_RELOC16
:
2491 orig
= XVECEXP (x
, 0, 0);
2492 *reloc_ptr
= (enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1));
2495 case UNSPEC_RELOC32
:
2496 if (unwrap_reloc32_p
)
2498 orig
= XVECEXP (x
, 0, 0);
2499 *reloc_ptr
= (enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1));
2512 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2513 UNSPEC_RELOC32 wrappers. */
2516 m68k_unwrap_symbol (rtx orig
, bool unwrap_reloc32_p
)
2518 return m68k_unwrap_symbol_1 (orig
, unwrap_reloc32_p
, NULL
);
2521 /* Adjust decorated address operand before outputing assembler for it. */
2524 m68k_adjust_decorated_operand (rtx op
)
2526 /* Combine and, possibly, other optimizations may do good job
2528 (const (unspec [(symbol)]))
2530 (const (plus (unspec [(symbol)])
2532 The problem with this is emitting @TLS or @GOT decorations.
2533 The decoration is emitted when processing (unspec), so the
2534 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2536 It seems that the easiest solution to this is to convert such
2538 (const (unspec [(plus (symbol)
2540 Note, that the top level of operand remains intact, so we don't have
2541 to patch up anything outside of the operand. */
2543 subrtx_var_iterator::array_type array
;
2544 FOR_EACH_SUBRTX_VAR (iter
, array
, op
, ALL
)
2547 if (m68k_unwrap_symbol (x
, true) != x
)
2551 gcc_assert (GET_CODE (x
) == CONST
);
2554 if (GET_CODE (plus
) == PLUS
|| GET_CODE (plus
) == MINUS
)
2559 unspec
= XEXP (plus
, 0);
2560 gcc_assert (GET_CODE (unspec
) == UNSPEC
);
2561 addend
= XEXP (plus
, 1);
2562 gcc_assert (CONST_INT_P (addend
));
2564 /* We now have all the pieces, rearrange them. */
2566 /* Move symbol to plus. */
2567 XEXP (plus
, 0) = XVECEXP (unspec
, 0, 0);
2569 /* Move plus inside unspec. */
2570 XVECEXP (unspec
, 0, 0) = plus
;
2572 /* Move unspec to top level of const. */
2573 XEXP (x
, 0) = unspec
;
2575 iter
.skip_subrtxes ();
2580 /* Prescan insn before outputing assembler for it. */
2583 m68k_final_prescan_insn (rtx_insn
*insn ATTRIBUTE_UNUSED
,
2584 rtx
*operands
, int n_operands
)
2588 for (i
= 0; i
< n_operands
; ++i
)
2589 m68k_adjust_decorated_operand (operands
[i
]);
2592 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2593 If REG is non-null, use it; generate new pseudo otherwise. */
2596 m68k_move_to_reg (rtx x
, rtx orig
, rtx reg
)
2600 if (reg
== NULL_RTX
)
2602 gcc_assert (can_create_pseudo_p ());
2603 reg
= gen_reg_rtx (Pmode
);
2606 insn
= emit_move_insn (reg
, x
);
2607 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2609 set_unique_reg_note (insn
, REG_EQUAL
, orig
);
2614 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2618 m68k_wrap_symbol_into_got_ref (rtx x
, enum m68k_reloc reloc
, rtx temp_reg
)
2620 x
= m68k_wrap_symbol (x
, reloc
, m68k_get_gp (), temp_reg
);
2622 x
= gen_rtx_MEM (Pmode
, x
);
2623 MEM_READONLY_P (x
) = 1;
2628 /* Legitimize PIC addresses. If the address is already
2629 position-independent, we return ORIG. Newly generated
2630 position-independent addresses go to REG. If we need more
2631 than one register, we lose.
2633 An address is legitimized by making an indirect reference
2634 through the Global Offset Table with the name of the symbol
2637 The assembler and linker are responsible for placing the
2638 address of the symbol in the GOT. The function prologue
2639 is responsible for initializing a5 to the starting address
2642 The assembler is also responsible for translating a symbol name
2643 into a constant displacement from the start of the GOT.
2645 A quick example may make things a little clearer:
2647 When not generating PIC code to store the value 12345 into _foo
2648 we would generate the following code:
2652 When generating PIC two transformations are made. First, the compiler
2653 loads the address of foo into a register. So the first transformation makes:
2658 The code in movsi will intercept the lea instruction and call this
2659 routine which will transform the instructions into:
2661 movel a5@(_foo:w), a0
2665 That (in a nutshell) is how *all* symbol and label references are
2669 legitimize_pic_address (rtx orig
, machine_mode mode ATTRIBUTE_UNUSED
,
2674 /* First handle a simple SYMBOL_REF or LABEL_REF */
2675 if (GET_CODE (orig
) == SYMBOL_REF
|| GET_CODE (orig
) == LABEL_REF
)
2679 pic_ref
= m68k_wrap_symbol_into_got_ref (orig
, RELOC_GOT
, reg
);
2680 pic_ref
= m68k_move_to_reg (pic_ref
, orig
, reg
);
2682 else if (GET_CODE (orig
) == CONST
)
2686 /* Make sure this has not already been legitimized. */
2687 if (m68k_unwrap_symbol (orig
, true) != orig
)
2692 /* legitimize both operands of the PLUS */
2693 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
2695 base
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 0), Pmode
, reg
);
2696 orig
= legitimize_pic_address (XEXP (XEXP (orig
, 0), 1), Pmode
,
2697 base
== reg
? 0 : reg
);
2699 if (GET_CODE (orig
) == CONST_INT
)
2700 pic_ref
= plus_constant (Pmode
, base
, INTVAL (orig
));
2702 pic_ref
= gen_rtx_PLUS (Pmode
, base
, orig
);
2708 /* The __tls_get_addr symbol. */
2709 static GTY(()) rtx m68k_tls_get_addr
;
2711 /* Return SYMBOL_REF for __tls_get_addr. */
2714 m68k_get_tls_get_addr (void)
2716 if (m68k_tls_get_addr
== NULL_RTX
)
2717 m68k_tls_get_addr
= init_one_libfunc ("__tls_get_addr");
2719 return m68k_tls_get_addr
;
2722 /* Return libcall result in A0 instead of usual D0. */
2723 static bool m68k_libcall_value_in_a0_p
= false;
2725 /* Emit instruction sequence that calls __tls_get_addr. X is
2726 the TLS symbol we are referencing and RELOC is the symbol type to use
2727 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2728 emitted. A pseudo register with result of __tls_get_addr call is
2732 m68k_call_tls_get_addr (rtx x
, rtx eqv
, enum m68k_reloc reloc
)
2738 /* Emit the call sequence. */
2741 /* FIXME: Unfortunately, emit_library_call_value does not
2742 consider (plus (%a5) (const (unspec))) to be a good enough
2743 operand for push, so it forces it into a register. The bad
2744 thing about this is that combiner, due to copy propagation and other
2745 optimizations, sometimes cannot later fix this. As a consequence,
2746 additional register may be allocated resulting in a spill.
2747 For reference, see args processing loops in
2748 calls.cc:emit_library_call_value_1.
2749 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2750 x
= m68k_wrap_symbol (x
, reloc
, m68k_get_gp (), NULL_RTX
);
2752 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2753 is the simpliest way of generating a call. The difference between
2754 __tls_get_addr() and libcall is that the result is returned in D0
2755 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2756 which temporarily switches returning the result to A0. */
2758 m68k_libcall_value_in_a0_p
= true;
2759 a0
= emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX
, LCT_PURE
,
2761 m68k_libcall_value_in_a0_p
= false;
2763 insns
= get_insns ();
2766 gcc_assert (can_create_pseudo_p ());
2767 dest
= gen_reg_rtx (Pmode
);
2768 emit_libcall_block (insns
, dest
, a0
, eqv
);
2773 /* The __tls_get_addr symbol. */
2774 static GTY(()) rtx m68k_read_tp
;
2776 /* Return SYMBOL_REF for __m68k_read_tp. */
2779 m68k_get_m68k_read_tp (void)
2781 if (m68k_read_tp
== NULL_RTX
)
2782 m68k_read_tp
= init_one_libfunc ("__m68k_read_tp");
2784 return m68k_read_tp
;
2787 /* Emit instruction sequence that calls __m68k_read_tp.
2788 A pseudo register with result of __m68k_read_tp call is returned. */
2791 m68k_call_m68k_read_tp (void)
2800 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2801 is the simpliest way of generating a call. The difference between
2802 __m68k_read_tp() and libcall is that the result is returned in D0
2803 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2804 which temporarily switches returning the result to A0. */
2806 /* Emit the call sequence. */
2807 m68k_libcall_value_in_a0_p
= true;
2808 a0
= emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX
, LCT_PURE
,
2810 m68k_libcall_value_in_a0_p
= false;
2811 insns
= get_insns ();
2814 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2815 share the m68k_read_tp result with other IE/LE model accesses. */
2816 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const1_rtx
), UNSPEC_RELOC32
);
2818 gcc_assert (can_create_pseudo_p ());
2819 dest
= gen_reg_rtx (Pmode
);
2820 emit_libcall_block (insns
, dest
, a0
, eqv
);
2825 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2826 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2830 m68k_legitimize_tls_address (rtx orig
)
2832 switch (SYMBOL_REF_TLS_MODEL (orig
))
2834 case TLS_MODEL_GLOBAL_DYNAMIC
:
2835 orig
= m68k_call_tls_get_addr (orig
, orig
, RELOC_TLSGD
);
2838 case TLS_MODEL_LOCAL_DYNAMIC
:
2844 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2845 share the LDM result with other LD model accesses. */
2846 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
2849 a0
= m68k_call_tls_get_addr (orig
, eqv
, RELOC_TLSLDM
);
2851 x
= m68k_wrap_symbol (orig
, RELOC_TLSLDO
, a0
, NULL_RTX
);
2853 if (can_create_pseudo_p ())
2854 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2860 case TLS_MODEL_INITIAL_EXEC
:
2865 a0
= m68k_call_m68k_read_tp ();
2867 x
= m68k_wrap_symbol_into_got_ref (orig
, RELOC_TLSIE
, NULL_RTX
);
2868 x
= gen_rtx_PLUS (Pmode
, x
, a0
);
2870 if (can_create_pseudo_p ())
2871 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2877 case TLS_MODEL_LOCAL_EXEC
:
2882 a0
= m68k_call_m68k_read_tp ();
2884 x
= m68k_wrap_symbol (orig
, RELOC_TLSLE
, a0
, NULL_RTX
);
2886 if (can_create_pseudo_p ())
2887 x
= m68k_move_to_reg (x
, orig
, NULL_RTX
);
2900 /* Return true if X is a TLS symbol. */
2903 m68k_tls_symbol_p (rtx x
)
2905 if (!TARGET_HAVE_TLS
)
2908 if (GET_CODE (x
) != SYMBOL_REF
)
2911 return SYMBOL_REF_TLS_MODEL (x
) != 0;
2914 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2915 though illegitimate one.
2916 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2919 m68k_tls_reference_p (rtx x
, bool legitimate_p
)
2921 if (!TARGET_HAVE_TLS
)
2926 subrtx_var_iterator::array_type array
;
2927 FOR_EACH_SUBRTX_VAR (iter
, array
, x
, ALL
)
2931 /* Note: this is not the same as m68k_tls_symbol_p. */
2932 if (GET_CODE (x
) == SYMBOL_REF
&& SYMBOL_REF_TLS_MODEL (x
) != 0)
2935 /* Don't recurse into legitimate TLS references. */
2936 if (m68k_tls_reference_p (x
, true))
2937 iter
.skip_subrtxes ();
2943 enum m68k_reloc reloc
= RELOC_GOT
;
2945 return (m68k_unwrap_symbol_1 (x
, true, &reloc
) != x
2946 && TLS_RELOC_P (reloc
));
2952 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2954 /* Return the type of move that should be used for integer I. */
2957 m68k_const_method (HOST_WIDE_INT i
)
2964 /* The ColdFire doesn't have byte or word operations. */
2965 /* FIXME: This may not be useful for the m68060 either. */
2966 if (!TARGET_COLDFIRE
)
2968 /* if -256 < N < 256 but N is not in range for a moveq
2969 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2970 if (USE_MOVQ (i
^ 0xff))
2972 /* Likewise, try with not.w */
2973 if (USE_MOVQ (i
^ 0xffff))
2975 /* This is the only value where neg.w is useful */
2980 /* Try also with swap. */
2982 if (USE_MOVQ ((u
>> 16) | (u
<< 16)))
2987 /* Try using MVZ/MVS with an immediate value to load constants. */
2988 if (i
>= 0 && i
<= 65535)
2990 if (i
>= -32768 && i
<= 32767)
2994 /* Otherwise, use move.l */
2998 /* Return the cost of moving constant I into a data register. */
3001 const_int_cost (HOST_WIDE_INT i
)
3003 switch (m68k_const_method (i
))
3006 /* Constants between -128 and 127 are cheap due to moveq. */
3014 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
3024 m68k_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
3025 int opno ATTRIBUTE_UNUSED
,
3026 int *total
, bool speed ATTRIBUTE_UNUSED
)
3028 int code
= GET_CODE (x
);
3033 /* Constant zero is super cheap due to clr instruction. */
3034 if (x
== const0_rtx
)
3037 *total
= const_int_cost (INTVAL (x
));
3047 /* Make 0.0 cheaper than other floating constants to
3048 encourage creating tstsf and tstdf insns. */
3049 if ((GET_RTX_CLASS (outer_code
) == RTX_COMPARE
3050 || GET_RTX_CLASS (outer_code
) == RTX_COMM_COMPARE
)
3051 && (x
== CONST0_RTX (SFmode
) || x
== CONST0_RTX (DFmode
)))
3057 /* These are vaguely right for a 68020. */
3058 /* The costs for long multiply have been adjusted to work properly
3059 in synth_mult on the 68020, relative to an average of the time
3060 for add and the time for shift, taking away a little more because
3061 sometimes move insns are needed. */
3062 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
3067 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
3068 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
3070 : TARGET_COLDFIRE ? 3 : 13)
3075 : TUNE_68000_10 ? 5 \
3076 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
3077 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
3079 : TARGET_COLDFIRE ? 2 : 8)
3082 (TARGET_CF_HWDIV ? 11 \
3083 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3086 /* An lea costs about three times as much as a simple add. */
3088 && GET_CODE (XEXP (x
, 1)) == REG
3089 && ((GET_CODE (XEXP (x
, 0)) == MULT
3090 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
3091 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3092 && (INTVAL (XEXP (XEXP (x
, 0), 1)) == 2
3093 || INTVAL (XEXP (XEXP (x
, 0), 1)) == 4
3094 || INTVAL (XEXP (XEXP (x
, 0), 1)) == 8))
3095 || (GET_CODE (XEXP (x
, 0)) == ASHIFT
3096 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
3097 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
3098 && ((unsigned HOST_WIDE_INT
) INTVAL (XEXP (XEXP (x
, 0), 1))
3101 /* lea an@(dx:l:i),am */
3102 *total
= COSTS_N_INSNS (TARGET_COLDFIRE
? 2 : 3);
3112 *total
= COSTS_N_INSNS(1);
3117 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
)
3119 if (INTVAL (XEXP (x
, 1)) < 16)
3120 *total
= COSTS_N_INSNS (2) + INTVAL (XEXP (x
, 1)) / 2;
3122 /* We're using clrw + swap for these cases. */
3123 *total
= COSTS_N_INSNS (4) + (INTVAL (XEXP (x
, 1)) - 16) / 2;
3126 *total
= COSTS_N_INSNS (10); /* Worst case. */
3129 /* A shift by a big integer takes an extra instruction. */
3130 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
3131 && (INTVAL (XEXP (x
, 1)) == 16))
3133 *total
= COSTS_N_INSNS (2); /* clrw;swap */
3136 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
3137 && !(INTVAL (XEXP (x
, 1)) > 0
3138 && INTVAL (XEXP (x
, 1)) <= 8))
3140 *total
= COSTS_N_INSNS (TARGET_COLDFIRE
? 1 : 3); /* lsr #i,dn */
3146 if ((GET_CODE (XEXP (x
, 0)) == ZERO_EXTEND
3147 || GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
)
3149 *total
= COSTS_N_INSNS (MULW_COST
);
3150 else if (mode
== QImode
|| mode
== HImode
)
3151 *total
= COSTS_N_INSNS (MULW_COST
);
3153 *total
= COSTS_N_INSNS (MULL_COST
);
3160 if (mode
== QImode
|| mode
== HImode
)
3161 *total
= COSTS_N_INSNS (DIVW_COST
); /* div.w */
3162 else if (TARGET_CF_HWDIV
)
3163 *total
= COSTS_N_INSNS (18);
3165 *total
= COSTS_N_INSNS (43); /* div.l */
3169 if (GET_RTX_CLASS (outer_code
) == RTX_COMPARE
3170 || GET_RTX_CLASS (outer_code
) == RTX_COMM_COMPARE
)
3179 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
3183 output_move_const_into_data_reg (rtx
*operands
)
3187 i
= INTVAL (operands
[1]);
3188 switch (m68k_const_method (i
))
3191 return "mvzw %1,%0";
3193 return "mvsw %1,%0";
3195 return "moveq %1,%0";
3198 operands
[1] = GEN_INT (i
^ 0xff);
3199 return "moveq %1,%0\n\tnot%.b %0";
3202 operands
[1] = GEN_INT (i
^ 0xffff);
3203 return "moveq %1,%0\n\tnot%.w %0";
3206 return "moveq #-128,%0\n\tneg%.w %0";
3211 operands
[1] = GEN_INT ((u
<< 16) | (u
>> 16));
3212 return "moveq %1,%0\n\tswap %0";
3215 return "move%.l %1,%0";
3221 /* Return true if I can be handled by ISA B's mov3q instruction. */
3224 valid_mov3q_const (HOST_WIDE_INT i
)
3226 return TARGET_ISAB
&& (i
== -1 || IN_RANGE (i
, 1, 7));
3229 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3230 I is the value of OPERANDS[1]. */
3233 output_move_simode_const (rtx
*operands
)
3239 src
= INTVAL (operands
[1]);
3241 && (DATA_REG_P (dest
) || MEM_P (dest
))
3242 /* clr insns on 68000 read before writing. */
3243 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3244 || !(MEM_P (dest
) && MEM_VOLATILE_P (dest
))))
3246 else if (GET_MODE (dest
) == SImode
&& valid_mov3q_const (src
))
3247 return "mov3q%.l %1,%0";
3248 else if (src
== 0 && ADDRESS_REG_P (dest
))
3249 return "sub%.l %0,%0";
3250 else if (DATA_REG_P (dest
))
3251 return output_move_const_into_data_reg (operands
);
3252 else if (ADDRESS_REG_P (dest
) && IN_RANGE (src
, -0x8000, 0x7fff))
3254 if (valid_mov3q_const (src
))
3255 return "mov3q%.l %1,%0";
3256 return "move%.w %1,%0";
3258 else if (MEM_P (dest
)
3259 && GET_CODE (XEXP (dest
, 0)) == PRE_DEC
3260 && REGNO (XEXP (XEXP (dest
, 0), 0)) == STACK_POINTER_REGNUM
3261 && IN_RANGE (src
, -0x8000, 0x7fff))
3263 if (valid_mov3q_const (src
))
3264 return "mov3q%.l %1,%-";
3267 return "move%.l %1,%0";
3271 output_move_simode (rtx
*operands
)
3273 handle_flags_for_move (operands
);
3275 if (GET_CODE (operands
[1]) == CONST_INT
)
3276 return output_move_simode_const (operands
);
3277 else if ((GET_CODE (operands
[1]) == SYMBOL_REF
3278 || GET_CODE (operands
[1]) == CONST
)
3279 && push_operand (operands
[0], SImode
))
3281 else if ((GET_CODE (operands
[1]) == SYMBOL_REF
3282 || GET_CODE (operands
[1]) == CONST
)
3283 && ADDRESS_REG_P (operands
[0]))
3284 return "lea %a1,%0";
3285 return "move%.l %1,%0";
3289 output_move_himode (rtx
*operands
)
3291 if (GET_CODE (operands
[1]) == CONST_INT
)
3293 if (operands
[1] == const0_rtx
3294 && (DATA_REG_P (operands
[0])
3295 || GET_CODE (operands
[0]) == MEM
)
3296 /* clr insns on 68000 read before writing. */
3297 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3298 || !(GET_CODE (operands
[0]) == MEM
3299 && MEM_VOLATILE_P (operands
[0]))))
3301 else if (operands
[1] == const0_rtx
3302 && ADDRESS_REG_P (operands
[0]))
3303 return "sub%.l %0,%0";
3304 else if (DATA_REG_P (operands
[0])
3305 && INTVAL (operands
[1]) < 128
3306 && INTVAL (operands
[1]) >= -128)
3307 return "moveq %1,%0";
3308 else if (INTVAL (operands
[1]) < 0x8000
3309 && INTVAL (operands
[1]) >= -0x8000)
3310 return "move%.w %1,%0";
3312 else if (CONSTANT_P (operands
[1]))
3314 return "move%.w %1,%0";
3318 output_move_qimode (rtx
*operands
)
3320 handle_flags_for_move (operands
);
3322 /* 68k family always modifies the stack pointer by at least 2, even for
3323 byte pushes. The 5200 (ColdFire) does not do this. */
3325 /* This case is generated by pushqi1 pattern now. */
3326 gcc_assert (!(GET_CODE (operands
[0]) == MEM
3327 && GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
3328 && XEXP (XEXP (operands
[0], 0), 0) == stack_pointer_rtx
3329 && ! ADDRESS_REG_P (operands
[1])
3330 && ! TARGET_COLDFIRE
));
3332 /* clr and st insns on 68000 read before writing. */
3333 if (!ADDRESS_REG_P (operands
[0])
3334 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3335 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3337 if (operands
[1] == const0_rtx
)
3339 if ((!TARGET_COLDFIRE
|| DATA_REG_P (operands
[0]))
3340 && GET_CODE (operands
[1]) == CONST_INT
3341 && (INTVAL (operands
[1]) & 255) == 255)
3347 if (GET_CODE (operands
[1]) == CONST_INT
3348 && DATA_REG_P (operands
[0])
3349 && INTVAL (operands
[1]) < 128
3350 && INTVAL (operands
[1]) >= -128)
3351 return "moveq %1,%0";
3352 if (operands
[1] == const0_rtx
&& ADDRESS_REG_P (operands
[0]))
3353 return "sub%.l %0,%0";
3354 if (GET_CODE (operands
[1]) != CONST_INT
&& CONSTANT_P (operands
[1]))
3356 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3357 from address registers. */
3358 if (ADDRESS_REG_P (operands
[0]) || ADDRESS_REG_P (operands
[1]))
3360 if (ADDRESS_REG_P (operands
[1]))
3362 return "move%.w %1,%0";
3364 return "move%.b %1,%0";
3368 output_move_stricthi (rtx
*operands
)
3370 if (operands
[1] == const0_rtx
3371 /* clr insns on 68000 read before writing. */
3372 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3373 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3375 return "move%.w %1,%0";
3379 output_move_strictqi (rtx
*operands
)
3381 if (operands
[1] == const0_rtx
3382 /* clr insns on 68000 read before writing. */
3383 && ((TARGET_68010
|| TARGET_COLDFIRE
)
3384 || !(GET_CODE (operands
[0]) == MEM
&& MEM_VOLATILE_P (operands
[0]))))
3386 return "move%.b %1,%0";
3389 /* Return the best assembler insn template
3390 for moving operands[1] into operands[0] as a fullword. */
3393 singlemove_string (rtx
*operands
)
3395 if (GET_CODE (operands
[1]) == CONST_INT
)
3396 return output_move_simode_const (operands
);
3397 return "move%.l %1,%0";
3401 /* Output assembler or rtl code to perform a doubleword move insn
3402 with operands OPERANDS.
3403 Pointers to 3 helper functions should be specified:
3404 HANDLE_REG_ADJUST to adjust a register by a small value,
3405 HANDLE_COMPADR to compute an address and
3406 HANDLE_MOVSI to move 4 bytes. */
3409 handle_move_double (rtx operands
[2],
3410 void (*handle_reg_adjust
) (rtx
, int),
3411 void (*handle_compadr
) (rtx
[2]),
3412 void (*handle_movsi
) (rtx
[2]))
3416 REGOP
, OFFSOP
, MEMOP
, PUSHOP
, POPOP
, CNSTOP
, RNDOP
3421 rtx addreg0
= 0, addreg1
= 0;
3422 int dest_overlapped_low
= 0;
3423 int size
= GET_MODE_SIZE (GET_MODE (operands
[0]));
3428 /* First classify both operands. */
3430 if (REG_P (operands
[0]))
3432 else if (offsettable_memref_p (operands
[0]))
3434 else if (GET_CODE (XEXP (operands
[0], 0)) == POST_INC
)
3436 else if (GET_CODE (XEXP (operands
[0], 0)) == PRE_DEC
)
3438 else if (GET_CODE (operands
[0]) == MEM
)
3443 if (REG_P (operands
[1]))
3445 else if (CONSTANT_P (operands
[1]))
3447 else if (offsettable_memref_p (operands
[1]))
3449 else if (GET_CODE (XEXP (operands
[1], 0)) == POST_INC
)
3451 else if (GET_CODE (XEXP (operands
[1], 0)) == PRE_DEC
)
3453 else if (GET_CODE (operands
[1]) == MEM
)
3458 /* Check for the cases that the operand constraints are not supposed
3459 to allow to happen. Generating code for these cases is
3461 gcc_assert (optype0
!= RNDOP
&& optype1
!= RNDOP
);
3463 /* If one operand is decrementing and one is incrementing
3464 decrement the former register explicitly
3465 and change that operand into ordinary indexing. */
3467 if (optype0
== PUSHOP
&& optype1
== POPOP
)
3469 operands
[0] = XEXP (XEXP (operands
[0], 0), 0);
3471 handle_reg_adjust (operands
[0], -size
);
3473 if (GET_MODE (operands
[1]) == XFmode
)
3474 operands
[0] = gen_rtx_MEM (XFmode
, operands
[0]);
3475 else if (GET_MODE (operands
[0]) == DFmode
)
3476 operands
[0] = gen_rtx_MEM (DFmode
, operands
[0]);
3478 operands
[0] = gen_rtx_MEM (DImode
, operands
[0]);
3481 if (optype0
== POPOP
&& optype1
== PUSHOP
)
3483 operands
[1] = XEXP (XEXP (operands
[1], 0), 0);
3485 handle_reg_adjust (operands
[1], -size
);
3487 if (GET_MODE (operands
[1]) == XFmode
)
3488 operands
[1] = gen_rtx_MEM (XFmode
, operands
[1]);
3489 else if (GET_MODE (operands
[1]) == DFmode
)
3490 operands
[1] = gen_rtx_MEM (DFmode
, operands
[1]);
3492 operands
[1] = gen_rtx_MEM (DImode
, operands
[1]);
3496 /* If an operand is an unoffsettable memory ref, find a register
3497 we can increment temporarily to make it refer to the second word. */
3499 if (optype0
== MEMOP
)
3500 addreg0
= find_addr_reg (XEXP (operands
[0], 0));
3502 if (optype1
== MEMOP
)
3503 addreg1
= find_addr_reg (XEXP (operands
[1], 0));
3505 /* Ok, we can do one word at a time.
3506 Normally we do the low-numbered word first,
3507 but if either operand is autodecrementing then we
3508 do the high-numbered word first.
3510 In either case, set up in LATEHALF the operands to use
3511 for the high-numbered word and in some cases alter the
3512 operands in OPERANDS to be suitable for the low-numbered word. */
3516 if (optype0
== REGOP
)
3518 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 2);
3519 middlehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
3521 else if (optype0
== OFFSOP
)
3523 middlehalf
[0] = adjust_address (operands
[0], SImode
, 4);
3524 latehalf
[0] = adjust_address (operands
[0], SImode
, size
- 4);
3528 middlehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3529 latehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3532 if (optype1
== REGOP
)
3534 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 2);
3535 middlehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
3537 else if (optype1
== OFFSOP
)
3539 middlehalf
[1] = adjust_address (operands
[1], SImode
, 4);
3540 latehalf
[1] = adjust_address (operands
[1], SImode
, size
- 4);
3542 else if (optype1
== CNSTOP
)
3544 if (GET_CODE (operands
[1]) == CONST_DOUBLE
)
3548 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3549 (*CONST_DOUBLE_REAL_VALUE (operands
[1]), l
);
3550 operands
[1] = GEN_INT (l
[0]);
3551 middlehalf
[1] = GEN_INT (l
[1]);
3552 latehalf
[1] = GEN_INT (l
[2]);
3556 /* No non-CONST_DOUBLE constant should ever appear
3558 gcc_assert (!CONSTANT_P (operands
[1]));
3563 middlehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3564 latehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3568 /* size is not 12: */
3570 if (optype0
== REGOP
)
3571 latehalf
[0] = gen_rtx_REG (SImode
, REGNO (operands
[0]) + 1);
3572 else if (optype0
== OFFSOP
)
3573 latehalf
[0] = adjust_address (operands
[0], SImode
, size
- 4);
3575 latehalf
[0] = adjust_address (operands
[0], SImode
, 0);
3577 if (optype1
== REGOP
)
3578 latehalf
[1] = gen_rtx_REG (SImode
, REGNO (operands
[1]) + 1);
3579 else if (optype1
== OFFSOP
)
3580 latehalf
[1] = adjust_address (operands
[1], SImode
, size
- 4);
3581 else if (optype1
== CNSTOP
)
3582 split_double (operands
[1], &operands
[1], &latehalf
[1]);
3584 latehalf
[1] = adjust_address (operands
[1], SImode
, 0);
3587 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3588 word first. We should use the adjusted operand 1 (which is N+4(REG))
3589 for the low word as well, to compensate for the first decrement of
3591 if (optype0
== PUSHOP
3592 && reg_overlap_mentioned_p (XEXP (XEXP (operands
[0], 0), 0), operands
[1]))
3593 operands
[1] = middlehalf
[1] = latehalf
[1];
3595 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3596 if the upper part of reg N does not appear in the MEM, arrange to
3597 emit the move late-half first. Otherwise, compute the MEM address
3598 into the upper part of N and use that as a pointer to the memory
3600 if (optype0
== REGOP
3601 && (optype1
== OFFSOP
|| optype1
== MEMOP
))
3603 rtx testlow
= gen_rtx_REG (SImode
, REGNO (operands
[0]));
3605 if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0))
3606 && reg_overlap_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
3608 /* If both halves of dest are used in the src memory address,
3609 compute the address into latehalf of dest.
3610 Note that this can't happen if the dest is two data regs. */
3612 xops
[0] = latehalf
[0];
3613 xops
[1] = XEXP (operands
[1], 0);
3615 handle_compadr (xops
);
3616 if (GET_MODE (operands
[1]) == XFmode
)
3618 operands
[1] = gen_rtx_MEM (XFmode
, latehalf
[0]);
3619 middlehalf
[1] = adjust_address (operands
[1], DImode
, size
- 8);
3620 latehalf
[1] = adjust_address (operands
[1], DImode
, size
- 4);
3624 operands
[1] = gen_rtx_MEM (DImode
, latehalf
[0]);
3625 latehalf
[1] = adjust_address (operands
[1], DImode
, size
- 4);
3629 && reg_overlap_mentioned_p (middlehalf
[0],
3630 XEXP (operands
[1], 0)))
3632 /* Check for two regs used by both source and dest.
3633 Note that this can't happen if the dest is all data regs.
3634 It can happen if the dest is d6, d7, a0.
3635 But in that case, latehalf is an addr reg, so
3636 the code at compadr does ok. */
3638 if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0))
3639 || reg_overlap_mentioned_p (latehalf
[0], XEXP (operands
[1], 0)))
3642 /* JRV says this can't happen: */
3643 gcc_assert (!addreg0
&& !addreg1
);
3645 /* Only the middle reg conflicts; simply put it last. */
3646 handle_movsi (operands
);
3647 handle_movsi (latehalf
);
3648 handle_movsi (middlehalf
);
3652 else if (reg_overlap_mentioned_p (testlow
, XEXP (operands
[1], 0)))
3653 /* If the low half of dest is mentioned in the source memory
3654 address, the arrange to emit the move late half first. */
3655 dest_overlapped_low
= 1;
3658 /* If one or both operands autodecrementing,
3659 do the two words, high-numbered first. */
3661 /* Likewise, the first move would clobber the source of the second one,
3662 do them in the other order. This happens only for registers;
3663 such overlap can't happen in memory unless the user explicitly
3664 sets it up, and that is an undefined circumstance. */
3666 if (optype0
== PUSHOP
|| optype1
== PUSHOP
3667 || (optype0
== REGOP
&& optype1
== REGOP
3668 && ((middlehalf
[1] && REGNO (operands
[0]) == REGNO (middlehalf
[1]))
3669 || REGNO (operands
[0]) == REGNO (latehalf
[1])))
3670 || dest_overlapped_low
)
3672 /* Make any unoffsettable addresses point at high-numbered word. */
3674 handle_reg_adjust (addreg0
, size
- 4);
3676 handle_reg_adjust (addreg1
, size
- 4);
3679 handle_movsi (latehalf
);
3681 /* Undo the adds we just did. */
3683 handle_reg_adjust (addreg0
, -4);
3685 handle_reg_adjust (addreg1
, -4);
3689 handle_movsi (middlehalf
);
3692 handle_reg_adjust (addreg0
, -4);
3694 handle_reg_adjust (addreg1
, -4);
3697 /* Do low-numbered word. */
3699 handle_movsi (operands
);
3703 /* Normal case: do the two words, low-numbered first. */
3705 m68k_final_prescan_insn (NULL
, operands
, 2);
3706 handle_movsi (operands
);
3708 /* Do the middle one of the three words for long double */
3712 handle_reg_adjust (addreg0
, 4);
3714 handle_reg_adjust (addreg1
, 4);
3716 m68k_final_prescan_insn (NULL
, middlehalf
, 2);
3717 handle_movsi (middlehalf
);
3720 /* Make any unoffsettable addresses point at high-numbered word. */
3722 handle_reg_adjust (addreg0
, 4);
3724 handle_reg_adjust (addreg1
, 4);
3727 m68k_final_prescan_insn (NULL
, latehalf
, 2);
3728 handle_movsi (latehalf
);
3730 /* Undo the adds we just did. */
3732 handle_reg_adjust (addreg0
, -(size
- 4));
3734 handle_reg_adjust (addreg1
, -(size
- 4));
3739 /* Output assembler code to adjust REG by N. */
3741 output_reg_adjust (rtx reg
, int n
)
3745 gcc_assert (GET_MODE (reg
) == SImode
&& n
>= -12 && n
!= 0 && n
<= 12);
3750 s
= "add%.l #12,%0";
3754 s
= "addq%.l #8,%0";
3758 s
= "addq%.l #4,%0";
3762 s
= "sub%.l #12,%0";
3766 s
= "subq%.l #8,%0";
3770 s
= "subq%.l #4,%0";
3778 output_asm_insn (s
, ®
);
3781 /* Emit rtl code to adjust REG by N. */
3783 emit_reg_adjust (rtx reg1
, int n
)
3787 gcc_assert (GET_MODE (reg1
) == SImode
&& n
>= -12 && n
!= 0 && n
<= 12);
3789 reg1
= copy_rtx (reg1
);
3790 reg2
= copy_rtx (reg1
);
3793 emit_insn (gen_subsi3 (reg1
, reg2
, GEN_INT (-n
)));
3795 emit_insn (gen_addsi3 (reg1
, reg2
, GEN_INT (n
)));
3800 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3802 output_compadr (rtx operands
[2])
3804 output_asm_insn ("lea %a1,%0", operands
);
3807 /* Output the best assembler insn for moving operands[1] into operands[0]
3810 output_movsi (rtx operands
[2])
3812 output_asm_insn (singlemove_string (operands
), operands
);
3815 /* Copy OP and change its mode to MODE. */
3817 copy_operand (rtx op
, machine_mode mode
)
3819 /* ??? This looks really ugly. There must be a better way
3820 to change a mode on the operand. */
3821 if (GET_MODE (op
) != VOIDmode
)
3824 op
= gen_rtx_REG (mode
, REGNO (op
));
3828 PUT_MODE (op
, mode
);
3835 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3837 emit_movsi (rtx operands
[2])
3839 operands
[0] = copy_operand (operands
[0], SImode
);
3840 operands
[1] = copy_operand (operands
[1], SImode
);
3842 emit_insn (gen_movsi (operands
[0], operands
[1]));
3845 /* Output assembler code to perform a doubleword move insn
3846 with operands OPERANDS. */
3848 output_move_double (rtx
*operands
)
3850 handle_move_double (operands
,
3851 output_reg_adjust
, output_compadr
, output_movsi
);
3856 /* Output rtl code to perform a doubleword move insn
3857 with operands OPERANDS. */
3859 m68k_emit_move_double (rtx operands
[2])
3861 handle_move_double (operands
, emit_reg_adjust
, emit_movsi
, emit_movsi
);
3864 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3865 new rtx with the correct mode. */
3868 force_mode (machine_mode mode
, rtx orig
)
3870 if (mode
== GET_MODE (orig
))
3873 if (REGNO (orig
) >= FIRST_PSEUDO_REGISTER
)
3876 return gen_rtx_REG (mode
, REGNO (orig
));
3880 fp_reg_operand (rtx op
, machine_mode mode ATTRIBUTE_UNUSED
)
3882 return reg_renumber
&& FP_REG_P (op
);
3885 /* Emit insns to move operands[1] into operands[0].
3887 Return 1 if we have written out everything that needs to be done to
3888 do the move. Otherwise, return 0 and the caller will emit the move
3891 Note SCRATCH_REG may not be in the proper mode depending on how it
3892 will be used. This routine is responsible for creating a new copy
3893 of SCRATCH_REG in the proper mode. */
3896 emit_move_sequence (rtx
*operands
, machine_mode mode
, rtx scratch_reg
)
3898 rtx operand0
= operands
[0];
3899 rtx operand1
= operands
[1];
3903 && (reload_in_progress
|| lra_in_progress
)
3904 && GET_CODE (operand0
) == REG
3905 && REGNO (operand0
) >= FIRST_PSEUDO_REGISTER
)
3906 operand0
= reg_equiv_mem (REGNO (operand0
));
3907 else if (scratch_reg
3908 && (reload_in_progress
|| lra_in_progress
)
3909 && GET_CODE (operand0
) == SUBREG
3910 && GET_CODE (SUBREG_REG (operand0
)) == REG
3911 && REGNO (SUBREG_REG (operand0
)) >= FIRST_PSEUDO_REGISTER
)
3913 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3914 the code which tracks sets/uses for delete_output_reload. */
3915 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand0
),
3916 reg_equiv_mem (REGNO (SUBREG_REG (operand0
))),
3917 SUBREG_BYTE (operand0
));
3918 operand0
= alter_subreg (&temp
, true);
3922 && (reload_in_progress
|| lra_in_progress
)
3923 && GET_CODE (operand1
) == REG
3924 && REGNO (operand1
) >= FIRST_PSEUDO_REGISTER
)
3925 operand1
= reg_equiv_mem (REGNO (operand1
));
3926 else if (scratch_reg
3927 && (reload_in_progress
|| lra_in_progress
)
3928 && GET_CODE (operand1
) == SUBREG
3929 && GET_CODE (SUBREG_REG (operand1
)) == REG
3930 && REGNO (SUBREG_REG (operand1
)) >= FIRST_PSEUDO_REGISTER
)
3932 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3933 the code which tracks sets/uses for delete_output_reload. */
3934 rtx temp
= gen_rtx_SUBREG (GET_MODE (operand1
),
3935 reg_equiv_mem (REGNO (SUBREG_REG (operand1
))),
3936 SUBREG_BYTE (operand1
));
3937 operand1
= alter_subreg (&temp
, true);
3940 if (scratch_reg
&& (reload_in_progress
|| lra_in_progress
)
3941 && GET_CODE (operand0
) == MEM
3942 && ((tem
= find_replacement (&XEXP (operand0
, 0)))
3943 != XEXP (operand0
, 0)))
3944 operand0
= gen_rtx_MEM (GET_MODE (operand0
), tem
);
3945 if (scratch_reg
&& (reload_in_progress
|| lra_in_progress
)
3946 && GET_CODE (operand1
) == MEM
3947 && ((tem
= find_replacement (&XEXP (operand1
, 0)))
3948 != XEXP (operand1
, 0)))
3949 operand1
= gen_rtx_MEM (GET_MODE (operand1
), tem
);
3951 /* Handle secondary reloads for loads/stores of FP registers where
3952 the address is symbolic by using the scratch register */
3953 if (fp_reg_operand (operand0
, mode
)
3954 && ((GET_CODE (operand1
) == MEM
3955 && ! memory_address_p (DFmode
, XEXP (operand1
, 0)))
3956 || ((GET_CODE (operand1
) == SUBREG
3957 && GET_CODE (XEXP (operand1
, 0)) == MEM
3958 && !memory_address_p (DFmode
, XEXP (XEXP (operand1
, 0), 0)))))
3961 if (GET_CODE (operand1
) == SUBREG
)
3962 operand1
= XEXP (operand1
, 0);
3964 /* SCRATCH_REG will hold an address. We want
3965 it in SImode regardless of what mode it was originally given
3967 scratch_reg
= force_mode (SImode
, scratch_reg
);
3969 /* D might not fit in 14 bits either; for such cases load D into
3971 if (!memory_address_p (Pmode
, XEXP (operand1
, 0)))
3973 emit_move_insn (scratch_reg
, XEXP (XEXP (operand1
, 0), 1));
3974 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1
, 0)),
3976 XEXP (XEXP (operand1
, 0), 0),
3980 emit_move_insn (scratch_reg
, XEXP (operand1
, 0));
3981 emit_insn (gen_rtx_SET (operand0
, gen_rtx_MEM (mode
, scratch_reg
)));
3984 else if (fp_reg_operand (operand1
, mode
)
3985 && ((GET_CODE (operand0
) == MEM
3986 && ! memory_address_p (DFmode
, XEXP (operand0
, 0)))
3987 || ((GET_CODE (operand0
) == SUBREG
)
3988 && GET_CODE (XEXP (operand0
, 0)) == MEM
3989 && !memory_address_p (DFmode
, XEXP (XEXP (operand0
, 0), 0))))
3992 if (GET_CODE (operand0
) == SUBREG
)
3993 operand0
= XEXP (operand0
, 0);
3995 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3996 it in SIMODE regardless of what mode it was originally given
3998 scratch_reg
= force_mode (SImode
, scratch_reg
);
4000 /* D might not fit in 14 bits either; for such cases load D into
4002 if (!memory_address_p (Pmode
, XEXP (operand0
, 0)))
4004 emit_move_insn (scratch_reg
, XEXP (XEXP (operand0
, 0), 1));
4005 emit_move_insn (scratch_reg
, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0
,
4008 XEXP (XEXP (operand0
, 0),
4013 emit_move_insn (scratch_reg
, XEXP (operand0
, 0));
4014 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode
, scratch_reg
), operand1
));
4017 /* Handle secondary reloads for loads of FP registers from constant
4018 expressions by forcing the constant into memory.
4020 use scratch_reg to hold the address of the memory location.
4022 The proper fix is to change PREFERRED_RELOAD_CLASS to return
4023 NO_REGS when presented with a const_int and an register class
4024 containing only FP registers. Doing so unfortunately creates
4025 more problems than it solves. Fix this for 2.5. */
4026 else if (fp_reg_operand (operand0
, mode
)
4027 && CONSTANT_P (operand1
)
4032 /* SCRATCH_REG will hold an address and maybe the actual data. We want
4033 it in SIMODE regardless of what mode it was originally given
4035 scratch_reg
= force_mode (SImode
, scratch_reg
);
4037 /* Force the constant into memory and put the address of the
4038 memory location into scratch_reg. */
4039 xoperands
[0] = scratch_reg
;
4040 xoperands
[1] = XEXP (force_const_mem (mode
, operand1
), 0);
4041 emit_insn (gen_rtx_SET (scratch_reg
, xoperands
[1]));
4043 /* Now load the destination register. */
4044 emit_insn (gen_rtx_SET (operand0
, gen_rtx_MEM (mode
, scratch_reg
)));
4048 /* Now have insn-emit do whatever it normally does. */
4052 /* Split one or more DImode RTL references into pairs of SImode
4053 references. The RTL can be REG, offsettable MEM, integer constant, or
4054 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
4055 split and "num" is its length. lo_half and hi_half are output arrays
4056 that parallel "operands". */
4059 split_di (rtx operands
[], int num
, rtx lo_half
[], rtx hi_half
[])
4063 rtx op
= operands
[num
];
4065 /* simplify_subreg refuses to split volatile memory addresses,
4066 but we still have to handle it. */
4067 if (GET_CODE (op
) == MEM
)
4069 lo_half
[num
] = adjust_address (op
, SImode
, 4);
4070 hi_half
[num
] = adjust_address (op
, SImode
, 0);
4074 lo_half
[num
] = simplify_gen_subreg (SImode
, op
,
4075 GET_MODE (op
) == VOIDmode
4076 ? DImode
: GET_MODE (op
), 4);
4077 hi_half
[num
] = simplify_gen_subreg (SImode
, op
,
4078 GET_MODE (op
) == VOIDmode
4079 ? DImode
: GET_MODE (op
), 0);
4084 /* Split X into a base and a constant offset, storing them in *BASE
4085 and *OFFSET respectively. */
4088 m68k_split_offset (rtx x
, rtx
*base
, HOST_WIDE_INT
*offset
)
4091 if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4093 *offset
+= INTVAL (XEXP (x
, 1));
4099 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
4100 instruction. STORE_P says whether the move is a load or store.
4102 If the instruction uses post-increment or pre-decrement addressing,
4103 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
4104 adjustment. This adjustment will be made by the first element of
4105 PARALLEL, with the loads or stores starting at element 1. If the
4106 instruction does not use post-increment or pre-decrement addressing,
4107 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
4108 start at element 0. */
4111 m68k_movem_pattern_p (rtx pattern
, rtx automod_base
,
4112 HOST_WIDE_INT automod_offset
, bool store_p
)
4114 rtx base
, mem_base
, set
, mem
, reg
, last_reg
;
4115 HOST_WIDE_INT offset
, mem_offset
;
4117 enum reg_class rclass
;
4119 len
= XVECLEN (pattern
, 0);
4120 first
= (automod_base
!= NULL
);
4124 /* Stores must be pre-decrement and loads must be post-increment. */
4125 if (store_p
!= (automod_offset
< 0))
4128 /* Work out the base and offset for lowest memory location. */
4129 base
= automod_base
;
4130 offset
= (automod_offset
< 0 ? automod_offset
: 0);
4134 /* Allow any valid base and offset in the first access. */
4141 for (i
= first
; i
< len
; i
++)
4143 /* We need a plain SET. */
4144 set
= XVECEXP (pattern
, 0, i
);
4145 if (GET_CODE (set
) != SET
)
4148 /* Check that we have a memory location... */
4149 mem
= XEXP (set
, !store_p
);
4150 if (!MEM_P (mem
) || !memory_operand (mem
, VOIDmode
))
4153 /* ...with the right address. */
4156 m68k_split_offset (XEXP (mem
, 0), &base
, &offset
);
4157 /* The ColdFire instruction only allows (An) and (d16,An) modes.
4158 There are no mode restrictions for 680x0 besides the
4159 automodification rules enforced above. */
4161 && !m68k_legitimate_base_reg_p (base
, reload_completed
))
4166 m68k_split_offset (XEXP (mem
, 0), &mem_base
, &mem_offset
);
4167 if (!rtx_equal_p (base
, mem_base
) || offset
!= mem_offset
)
4171 /* Check that we have a register of the required mode and class. */
4172 reg
= XEXP (set
, store_p
);
4174 || !HARD_REGISTER_P (reg
)
4175 || GET_MODE (reg
) != reg_raw_mode
[REGNO (reg
)])
4180 /* The register must belong to RCLASS and have a higher number
4181 than the register in the previous SET. */
4182 if (!TEST_HARD_REG_BIT (reg_class_contents
[rclass
], REGNO (reg
))
4183 || REGNO (last_reg
) >= REGNO (reg
))
4188 /* Work out which register class we need. */
4189 if (INT_REGNO_P (REGNO (reg
)))
4190 rclass
= GENERAL_REGS
;
4191 else if (FP_REGNO_P (REGNO (reg
)))
4198 offset
+= GET_MODE_SIZE (GET_MODE (reg
));
4201 /* If we have an automodification, check whether the final offset is OK. */
4202 if (automod_base
&& offset
!= (automod_offset
< 0 ? 0 : automod_offset
))
4205 /* Reject unprofitable cases. */
4206 if (len
< first
+ (rclass
== FP_REGS
? MIN_FMOVEM_REGS
: MIN_MOVEM_REGS
))
4212 /* Return the assembly code template for a movem or fmovem instruction
4213 whose pattern is given by PATTERN. Store the template's operands
4216 If the instruction uses post-increment or pre-decrement addressing,
4217 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
4218 is true if this is a store instruction. */
4221 m68k_output_movem (rtx
*operands
, rtx pattern
,
4222 HOST_WIDE_INT automod_offset
, bool store_p
)
4227 gcc_assert (GET_CODE (pattern
) == PARALLEL
);
4229 first
= (automod_offset
!= 0);
4230 for (i
= first
; i
< XVECLEN (pattern
, 0); i
++)
4232 /* When using movem with pre-decrement addressing, register X + D0_REG
4233 is controlled by bit 15 - X. For all other addressing modes,
4234 register X + D0_REG is controlled by bit X. Confusingly, the
4235 register mask for fmovem is in the opposite order to that for
4239 gcc_assert (MEM_P (XEXP (XVECEXP (pattern
, 0, i
), !store_p
)));
4240 gcc_assert (REG_P (XEXP (XVECEXP (pattern
, 0, i
), store_p
)));
4241 regno
= REGNO (XEXP (XVECEXP (pattern
, 0, i
), store_p
));
4242 if (automod_offset
< 0)
4244 if (FP_REGNO_P (regno
))
4245 mask
|= 1 << (regno
- FP0_REG
);
4247 mask
|= 1 << (15 - (regno
- D0_REG
));
4251 if (FP_REGNO_P (regno
))
4252 mask
|= 1 << (7 - (regno
- FP0_REG
));
4254 mask
|= 1 << (regno
- D0_REG
);
4259 if (automod_offset
== 0)
4260 operands
[0] = XEXP (XEXP (XVECEXP (pattern
, 0, first
), !store_p
), 0);
4261 else if (automod_offset
< 0)
4262 operands
[0] = gen_rtx_PRE_DEC (Pmode
, SET_DEST (XVECEXP (pattern
, 0, 0)));
4264 operands
[0] = gen_rtx_POST_INC (Pmode
, SET_DEST (XVECEXP (pattern
, 0, 0)));
4265 operands
[1] = GEN_INT (mask
);
4266 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern
, 0, first
), store_p
))))
4269 return "fmovem %1,%a0";
4271 return "fmovem %a0,%1";
4276 return "movem%.l %1,%a0";
4278 return "movem%.l %a0,%1";
4282 /* Return a REG that occurs in ADDR with coefficient 1.
4283 ADDR can be effectively incremented by incrementing REG. */
4286 find_addr_reg (rtx addr
)
4288 while (GET_CODE (addr
) == PLUS
)
4290 if (GET_CODE (XEXP (addr
, 0)) == REG
)
4291 addr
= XEXP (addr
, 0);
4292 else if (GET_CODE (XEXP (addr
, 1)) == REG
)
4293 addr
= XEXP (addr
, 1);
4294 else if (CONSTANT_P (XEXP (addr
, 0)))
4295 addr
= XEXP (addr
, 1);
4296 else if (CONSTANT_P (XEXP (addr
, 1)))
4297 addr
= XEXP (addr
, 0);
4301 gcc_assert (GET_CODE (addr
) == REG
);
4305 /* Output assembler code to perform a 32-bit 3-operand add. */
4308 output_addsi3 (rtx
*operands
)
4310 if (! operands_match_p (operands
[0], operands
[1]))
4312 if (!ADDRESS_REG_P (operands
[1]))
4314 rtx tmp
= operands
[1];
4316 operands
[1] = operands
[2];
4320 /* These insns can result from reloads to access
4321 stack slots over 64k from the frame pointer. */
4322 if (GET_CODE (operands
[2]) == CONST_INT
4323 && (INTVAL (operands
[2]) < -32768 || INTVAL (operands
[2]) > 32767))
4324 return "move%.l %2,%0\n\tadd%.l %1,%0";
4325 if (GET_CODE (operands
[2]) == REG
)
4326 return MOTOROLA
? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4327 return MOTOROLA
? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4329 if (GET_CODE (operands
[2]) == CONST_INT
)
4331 if (INTVAL (operands
[2]) > 0
4332 && INTVAL (operands
[2]) <= 8)
4333 return "addq%.l %2,%0";
4334 if (INTVAL (operands
[2]) < 0
4335 && INTVAL (operands
[2]) >= -8)
4337 operands
[2] = GEN_INT (- INTVAL (operands
[2]));
4338 return "subq%.l %2,%0";
4340 /* On the CPU32 it is faster to use two addql instructions to
4341 add a small integer (8 < N <= 16) to a register.
4342 Likewise for subql. */
4343 if (TUNE_CPU32
&& REG_P (operands
[0]))
4345 if (INTVAL (operands
[2]) > 8
4346 && INTVAL (operands
[2]) <= 16)
4348 operands
[2] = GEN_INT (INTVAL (operands
[2]) - 8);
4349 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4351 if (INTVAL (operands
[2]) < -8
4352 && INTVAL (operands
[2]) >= -16)
4354 operands
[2] = GEN_INT (- INTVAL (operands
[2]) - 8);
4355 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4358 if (ADDRESS_REG_P (operands
[0])
4359 && INTVAL (operands
[2]) >= -0x8000
4360 && INTVAL (operands
[2]) < 0x8000)
4363 return "add%.w %2,%0";
4365 return MOTOROLA
? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4368 return "add%.l %2,%0";
4371 /* Emit a comparison between OP0 and OP1. Return true iff the comparison
4372 was reversed. SC1 is an SImode scratch reg, and SC2 a DImode scratch reg,
4373 as needed. CODE is the code of the comparison, we return it unchanged or
4374 swapped, as necessary. */
4376 m68k_output_compare_di (rtx op0
, rtx op1
, rtx sc1
, rtx sc2
, rtx_insn
*insn
,
4384 if (op1
== const0_rtx
)
4386 if (!REG_P (op0
) || ADDRESS_REG_P (op0
))
4392 output_move_double (xoperands
);
4393 output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", xoperands
);
4394 return swap_condition (code
);
4396 if (find_reg_note (insn
, REG_DEAD
, op0
))
4398 output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", ops
);
4399 return swap_condition (code
);
4403 /* 'sub' clears %1, and also clears the X cc bit.
4404 'tst' sets the Z cc bit according to the low part of the DImode
4406 'subx %1' (i.e. subx #0) acts as a (non-existent) tstx on the high
4408 output_asm_insn ("sub%.l %2,%2\n\ttst%.l %R0\n\tsubx%.l %2,%0", ops
);
4413 if (rtx_equal_p (sc2
, op0
))
4415 output_asm_insn ("sub%.l %R1,%R3\n\tsubx%.l %1,%3", ops
);
4420 output_asm_insn ("sub%.l %R0,%R3\n\tsubx%.l %0,%3", ops
);
4421 return swap_condition (code
);
4426 remember_compare_flags (rtx op0
, rtx op1
)
4428 if (side_effects_p (op0
) || side_effects_p (op1
))
4432 flags_compare_op0
= op0
;
4433 flags_compare_op1
= op1
;
4434 flags_operand1
= flags_operand2
= NULL_RTX
;
4435 flags_valid
= FLAGS_VALID_SET
;
4439 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4440 comparison. It is returned, potentially modified if necessary. */
4442 m68k_output_compare_si (rtx op0
, rtx op1
, rtx_code code
)
4444 rtx_code tmp
= m68k_find_flags_value (op0
, op1
, code
);
4448 remember_compare_flags (op0
, op1
);
4453 if (op1
== const0_rtx
&& (TARGET_68020
|| TARGET_COLDFIRE
|| !ADDRESS_REG_P (op0
)))
4454 output_asm_insn ("tst%.l %0", ops
);
4455 else if (GET_CODE (op0
) == MEM
&& GET_CODE (op1
) == MEM
)
4456 output_asm_insn ("cmpm%.l %1,%0", ops
);
4457 else if (REG_P (op1
)
4458 || (!REG_P (op0
) && GET_CODE (op0
) != MEM
))
4460 output_asm_insn ("cmp%.l %d0,%d1", ops
);
4461 std::swap (flags_compare_op0
, flags_compare_op1
);
4462 return swap_condition (code
);
4464 else if (!TARGET_COLDFIRE
4465 && ADDRESS_REG_P (op0
)
4466 && GET_CODE (op1
) == CONST_INT
4467 && INTVAL (op1
) < 0x8000
4468 && INTVAL (op1
) >= -0x8000)
4469 output_asm_insn ("cmp%.w %1,%0", ops
);
4471 output_asm_insn ("cmp%.l %d1,%d0", ops
);
4475 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4476 comparison. It is returned, potentially modified if necessary. */
4478 m68k_output_compare_hi (rtx op0
, rtx op1
, rtx_code code
)
4480 rtx_code tmp
= m68k_find_flags_value (op0
, op1
, code
);
4484 remember_compare_flags (op0
, op1
);
4489 if (op1
== const0_rtx
)
4490 output_asm_insn ("tst%.w %d0", ops
);
4491 else if (GET_CODE (op0
) == MEM
&& GET_CODE (op1
) == MEM
)
4492 output_asm_insn ("cmpm%.w %1,%0", ops
);
4493 else if ((REG_P (op1
) && !ADDRESS_REG_P (op1
))
4494 || (!REG_P (op0
) && GET_CODE (op0
) != MEM
))
4496 output_asm_insn ("cmp%.w %d0,%d1", ops
);
4497 std::swap (flags_compare_op0
, flags_compare_op1
);
4498 return swap_condition (code
);
4501 output_asm_insn ("cmp%.w %d1,%d0", ops
);
4505 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4506 comparison. It is returned, potentially modified if necessary. */
4508 m68k_output_compare_qi (rtx op0
, rtx op1
, rtx_code code
)
4510 rtx_code tmp
= m68k_find_flags_value (op0
, op1
, code
);
4514 remember_compare_flags (op0
, op1
);
4519 if (op1
== const0_rtx
)
4520 output_asm_insn ("tst%.b %d0", ops
);
4521 else if (GET_CODE (op0
) == MEM
&& GET_CODE (op1
) == MEM
)
4522 output_asm_insn ("cmpm%.b %1,%0", ops
);
4523 else if (REG_P (op1
) || (!REG_P (op0
) && GET_CODE (op0
) != MEM
))
4525 output_asm_insn ("cmp%.b %d0,%d1", ops
);
4526 std::swap (flags_compare_op0
, flags_compare_op1
);
4527 return swap_condition (code
);
4530 output_asm_insn ("cmp%.b %d1,%d0", ops
);
4534 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4535 comparison. It is returned, potentially modified if necessary. */
4537 m68k_output_compare_fp (rtx op0
, rtx op1
, rtx_code code
)
4539 rtx_code tmp
= m68k_find_flags_value (op0
, op1
, code
);
4547 remember_compare_flags (op0
, op1
);
4549 machine_mode mode
= GET_MODE (op0
);
4550 std::string prec
= mode
== SFmode
? "s" : mode
== DFmode
? "d" : "x";
4552 if (op1
== CONST0_RTX (GET_MODE (op0
)))
4556 if (TARGET_COLDFIRE_FPU
)
4557 output_asm_insn ("ftst%.d %0", ops
);
4559 output_asm_insn ("ftst%.x %0", ops
);
4562 output_asm_insn (("ftst%." + prec
+ " %0").c_str (), ops
);
4566 switch (which_alternative
)
4569 if (TARGET_COLDFIRE_FPU
)
4570 output_asm_insn ("fcmp%.d %1,%0", ops
);
4572 output_asm_insn ("fcmp%.x %1,%0", ops
);
4575 output_asm_insn (("fcmp%." + prec
+ " %f1,%0").c_str (), ops
);
4578 output_asm_insn (("fcmp%." + prec
+ " %0,%f1").c_str (), ops
);
4579 std::swap (flags_compare_op0
, flags_compare_op1
);
4580 return swap_condition (code
);
4582 /* This is the ftst case, handled earlier. */
4588 /* Return an output template for a branch with CODE. */
4590 m68k_output_branch_integer (rtx_code code
)
4623 /* Return an output template for a reversed branch with CODE. */
4625 m68k_output_branch_integer_rev (rtx_code code
)
4658 /* Return an output template for a scc instruction with CODE. */
4660 m68k_output_scc (rtx_code code
)
4693 /* Return an output template for a floating point branch
4694 instruction with CODE. */
4696 m68k_output_branch_float (rtx_code code
)
4733 /* Return an output template for a reversed floating point branch
4734 instruction with CODE. */
4736 m68k_output_branch_float_rev (rtx_code code
)
4773 /* Return an output template for a floating point scc
4774 instruction with CODE. */
4776 m68k_output_scc_float (rtx_code code
)
4816 output_move_const_double (rtx
*operands
)
4818 int code
= standard_68881_constant_p (operands
[1]);
4822 static char buf
[40];
4824 sprintf (buf
, "fmovecr #0x%x,%%0", code
& 0xff);
4827 return "fmove%.d %1,%0";
4831 output_move_const_single (rtx
*operands
)
4833 int code
= standard_68881_constant_p (operands
[1]);
4837 static char buf
[40];
4839 sprintf (buf
, "fmovecr #0x%x,%%0", code
& 0xff);
4842 return "fmove%.s %f1,%0";
4845 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4846 from the "fmovecr" instruction.
4847 The value, anded with 0xff, gives the code to use in fmovecr
4848 to get the desired constant. */
4850 /* This code has been fixed for cross-compilation. */
4852 static int inited_68881_table
= 0;
4854 static const char *const strings_68881
[7] = {
4864 static const int codes_68881
[7] = {
4874 REAL_VALUE_TYPE values_68881
[7];
4876 /* Set up values_68881 array by converting the decimal values
4877 strings_68881 to binary. */
4880 init_68881_table (void)
4887 for (i
= 0; i
< 7; i
++)
4891 r
= REAL_VALUE_ATOF (strings_68881
[i
], mode
);
4892 values_68881
[i
] = r
;
4894 inited_68881_table
= 1;
4898 standard_68881_constant_p (rtx x
)
4900 const REAL_VALUE_TYPE
*r
;
4903 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4904 used at all on those chips. */
4908 if (! inited_68881_table
)
4909 init_68881_table ();
4911 r
= CONST_DOUBLE_REAL_VALUE (x
);
4913 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4914 for (i
= 0; i
< 6; i
++)
4916 if (real_identical (r
, &values_68881
[i
]))
4917 return (codes_68881
[i
]);
4920 if (GET_MODE (x
) == SFmode
)
4923 if (real_equal (r
, &values_68881
[6]))
4924 return (codes_68881
[6]);
4926 /* larger powers of ten in the constants ram are not used
4927 because they are not equal to a `double' C constant. */
4931 /* If X is a floating-point constant, return the logarithm of X base 2,
4932 or 0 if X is not a power of 2. */
4935 floating_exact_log2 (rtx x
)
4937 const REAL_VALUE_TYPE
*r
;
4941 r
= CONST_DOUBLE_REAL_VALUE (x
);
4943 if (real_less (r
, &dconst1
))
4946 exp
= real_exponent (r
);
4947 real_2expN (&r1
, exp
, DFmode
);
4948 if (real_equal (&r1
, r
))
4954 /* A C compound statement to output to stdio stream STREAM the
4955 assembler syntax for an instruction operand X. X is an RTL
4958 CODE is a value that can be used to specify one of several ways
4959 of printing the operand. It is used when identical operands
4960 must be printed differently depending on the context. CODE
4961 comes from the `%' specification that was used to request
4962 printing of the operand. If the specification was just `%DIGIT'
4963 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4964 is the ASCII code for LTR.
4966 If X is a register, this macro should print the register's name.
4967 The names can be found in an array `reg_names' whose type is
4968 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4970 When the machine description has a specification `%PUNCT' (a `%'
4971 followed by a punctuation character), this macro is called with
4972 a null pointer for X and the punctuation character for CODE.
4974 The m68k specific codes are:
4976 '.' for dot needed in Motorola-style opcode names.
4977 '-' for an operand pushing on the stack:
4978 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4979 '+' for an operand pushing on the stack:
4980 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4981 '@' for a reference to the top word on the stack:
4982 sp@, (sp) or (%sp) depending on the style of syntax.
4983 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4984 but & in SGS syntax).
4985 '!' for the cc register (used in an `and to cc' insn).
4986 '$' for the letter `s' in an op code, but only on the 68040.
4987 '&' for the letter `d' in an op code, but only on the 68040.
4988 '/' for register prefix needed by longlong.h.
4989 '?' for m68k_library_id_string
4991 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4992 'd' to force memory addressing to be absolute, not relative.
4993 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4994 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4995 or print pair of registers as rx:ry.
4996 'p' print an address with @PLTPC attached, but only if the operand
4997 is not locally-bound. */
5000 print_operand (FILE *file
, rtx op
, int letter
)
5003 m68k_adjust_decorated_operand (op
);
5008 fprintf (file
, ".");
5010 else if (letter
== '#')
5011 asm_fprintf (file
, "%I");
5012 else if (letter
== '-')
5013 asm_fprintf (file
, MOTOROLA
? "-(%Rsp)" : "%Rsp@-");
5014 else if (letter
== '+')
5015 asm_fprintf (file
, MOTOROLA
? "(%Rsp)+" : "%Rsp@+");
5016 else if (letter
== '@')
5017 asm_fprintf (file
, MOTOROLA
? "(%Rsp)" : "%Rsp@");
5018 else if (letter
== '!')
5019 asm_fprintf (file
, "%Rfpcr");
5020 else if (letter
== '$')
5023 fprintf (file
, "s");
5025 else if (letter
== '&')
5028 fprintf (file
, "d");
5030 else if (letter
== '/')
5031 asm_fprintf (file
, "%R");
5032 else if (letter
== '?')
5033 asm_fprintf (file
, m68k_library_id_string
);
5034 else if (letter
== 'p')
5036 output_addr_const (file
, op
);
5037 if (!(GET_CODE (op
) == SYMBOL_REF
&& SYMBOL_REF_LOCAL_P (op
)))
5038 fprintf (file
, "@PLTPC");
5040 else if (GET_CODE (op
) == REG
)
5043 /* Print out the second register name of a register pair.
5044 I.e., R (6) => 7. */
5045 fputs (M68K_REGNAME(REGNO (op
) + 1), file
);
5047 fputs (M68K_REGNAME(REGNO (op
)), file
);
5049 else if (GET_CODE (op
) == MEM
)
5051 output_address (GET_MODE (op
), XEXP (op
, 0));
5052 if (letter
== 'd' && ! TARGET_68020
5053 && CONSTANT_ADDRESS_P (XEXP (op
, 0))
5054 && !(GET_CODE (XEXP (op
, 0)) == CONST_INT
5055 && INTVAL (XEXP (op
, 0)) < 0x8000
5056 && INTVAL (XEXP (op
, 0)) >= -0x8000))
5057 fprintf (file
, MOTOROLA
? ".l" : ":l");
5059 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == SFmode
)
5062 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5063 asm_fprintf (file
, "%I0x%lx", l
& 0xFFFFFFFF);
5065 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == XFmode
)
5068 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5069 asm_fprintf (file
, "%I0x%lx%08lx%08lx", l
[0] & 0xFFFFFFFF,
5070 l
[1] & 0xFFFFFFFF, l
[2] & 0xFFFFFFFF);
5072 else if (GET_CODE (op
) == CONST_DOUBLE
&& GET_MODE (op
) == DFmode
)
5075 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
5076 asm_fprintf (file
, "%I0x%lx%08lx", l
[0] & 0xFFFFFFFF, l
[1] & 0xFFFFFFFF);
5080 /* Use `print_operand_address' instead of `output_addr_const'
5081 to ensure that we print relevant PIC stuff. */
5082 asm_fprintf (file
, "%I");
5084 && (GET_CODE (op
) == SYMBOL_REF
|| GET_CODE (op
) == CONST
))
5085 print_operand_address (file
, op
);
5087 output_addr_const (file
, op
);
5091 /* Return string for TLS relocation RELOC. */
5094 m68k_get_reloc_decoration (enum m68k_reloc reloc
)
5096 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
5097 gcc_assert (MOTOROLA
|| reloc
== RELOC_GOT
);
5104 if (flag_pic
== 1 && TARGET_68020
)
5146 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
5149 m68k_output_addr_const_extra (FILE *file
, rtx x
)
5151 if (GET_CODE (x
) == UNSPEC
)
5153 switch (XINT (x
, 1))
5155 case UNSPEC_RELOC16
:
5156 case UNSPEC_RELOC32
:
5157 output_addr_const (file
, XVECEXP (x
, 0, 0));
5158 fputs (m68k_get_reloc_decoration
5159 ((enum m68k_reloc
) INTVAL (XVECEXP (x
, 0, 1))), file
);
5170 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
5173 m68k_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
5175 gcc_assert (size
== 4);
5176 fputs ("\t.long\t", file
);
5177 output_addr_const (file
, x
);
5178 fputs ("@TLSLDO+0x8000", file
);
5181 /* In the name of slightly smaller debug output, and to cater to
5182 general assembler lossage, recognize various UNSPEC sequences
5183 and turn them back into a direct symbol reference. */
5186 m68k_delegitimize_address (rtx orig_x
)
5189 struct m68k_address addr
;
5192 orig_x
= delegitimize_mem_from_attrs (orig_x
);
5197 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
5200 if (!m68k_decompose_address (GET_MODE (x
), x
, false, &addr
)
5201 || addr
.offset
== NULL_RTX
5202 || GET_CODE (addr
.offset
) != CONST
)
5205 unspec
= XEXP (addr
.offset
, 0);
5206 if (GET_CODE (unspec
) == PLUS
&& CONST_INT_P (XEXP (unspec
, 1)))
5207 unspec
= XEXP (unspec
, 0);
5208 if (GET_CODE (unspec
) != UNSPEC
5209 || (XINT (unspec
, 1) != UNSPEC_RELOC16
5210 && XINT (unspec
, 1) != UNSPEC_RELOC32
))
5212 x
= XVECEXP (unspec
, 0, 0);
5213 gcc_assert (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
);
5214 if (unspec
!= XEXP (addr
.offset
, 0))
5215 x
= gen_rtx_PLUS (Pmode
, x
, XEXP (XEXP (addr
.offset
, 0), 1));
5218 rtx idx
= addr
.index
;
5219 if (addr
.scale
!= 1)
5220 idx
= gen_rtx_MULT (Pmode
, idx
, GEN_INT (addr
.scale
));
5221 x
= gen_rtx_PLUS (Pmode
, idx
, x
);
5224 x
= gen_rtx_PLUS (Pmode
, addr
.base
, x
);
5226 x
= replace_equiv_address_nv (orig_x
, x
);
5231 /* A C compound statement to output to stdio stream STREAM the
5232 assembler syntax for an instruction operand that is a memory
5233 reference whose address is ADDR. ADDR is an RTL expression.
5235 Note that this contains a kludge that knows that the only reason
5236 we have an address (plus (label_ref...) (reg...)) when not generating
5237 PIC code is in the insn before a tablejump, and we know that m68k.md
5238 generates a label LInnn: on such an insn.
5240 It is possible for PIC to generate a (plus (label_ref...) (reg...))
5241 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
5243 This routine is responsible for distinguishing between -fpic and -fPIC
5244 style relocations in an address. When generating -fpic code the
5245 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
5246 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
5249 print_operand_address (FILE *file
, rtx addr
)
5251 struct m68k_address address
;
5253 m68k_adjust_decorated_operand (addr
);
5255 if (!m68k_decompose_address (QImode
, addr
, true, &address
))
5258 if (address
.code
== PRE_DEC
)
5259 fprintf (file
, MOTOROLA
? "-(%s)" : "%s@-",
5260 M68K_REGNAME (REGNO (address
.base
)));
5261 else if (address
.code
== POST_INC
)
5262 fprintf (file
, MOTOROLA
? "(%s)+" : "%s@+",
5263 M68K_REGNAME (REGNO (address
.base
)));
5264 else if (!address
.base
&& !address
.index
)
5266 /* A constant address. */
5267 gcc_assert (address
.offset
== addr
);
5268 if (GET_CODE (addr
) == CONST_INT
)
5270 /* (xxx).w or (xxx).l. */
5271 if (IN_RANGE (INTVAL (addr
), -0x8000, 0x7fff))
5272 fprintf (file
, MOTOROLA
? "%d.w" : "%d:w", (int) INTVAL (addr
));
5274 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (addr
));
5276 else if (TARGET_PCREL
)
5278 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
5280 output_addr_const (file
, addr
);
5281 asm_fprintf (file
, flag_pic
== 1 ? ":w,%Rpc)" : ":l,%Rpc)");
5285 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
5286 name ends in `.<letter>', as the last 2 characters can be
5287 mistaken as a size suffix. Put the name in parentheses. */
5288 if (GET_CODE (addr
) == SYMBOL_REF
5289 && strlen (XSTR (addr
, 0)) > 2
5290 && XSTR (addr
, 0)[strlen (XSTR (addr
, 0)) - 2] == '.')
5293 output_addr_const (file
, addr
);
5297 output_addr_const (file
, addr
);
5304 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
5305 label being accessed, otherwise it is -1. */
5306 labelno
= (address
.offset
5308 && GET_CODE (address
.offset
) == LABEL_REF
5309 ? CODE_LABEL_NUMBER (XEXP (address
.offset
, 0))
5313 /* Print the "offset(base" component. */
5315 asm_fprintf (file
, "%LL%d(%Rpc,", labelno
);
5319 output_addr_const (file
, address
.offset
);
5323 fputs (M68K_REGNAME (REGNO (address
.base
)), file
);
5325 /* Print the ",index" component, if any. */
5330 fprintf (file
, "%s.%c",
5331 M68K_REGNAME (REGNO (address
.index
)),
5332 GET_MODE (address
.index
) == HImode
? 'w' : 'l');
5333 if (address
.scale
!= 1)
5334 fprintf (file
, "*%d", address
.scale
);
5338 else /* !MOTOROLA */
5340 if (!address
.offset
&& !address
.index
)
5341 fprintf (file
, "%s@", M68K_REGNAME (REGNO (address
.base
)));
5344 /* Print the "base@(offset" component. */
5346 asm_fprintf (file
, "%Rpc@(%LL%d", labelno
);
5350 fputs (M68K_REGNAME (REGNO (address
.base
)), file
);
5351 fprintf (file
, "@(");
5353 output_addr_const (file
, address
.offset
);
5355 /* Print the ",index" component, if any. */
5358 fprintf (file
, ",%s:%c",
5359 M68K_REGNAME (REGNO (address
.index
)),
5360 GET_MODE (address
.index
) == HImode
? 'w' : 'l');
5361 if (address
.scale
!= 1)
5362 fprintf (file
, ":%d", address
.scale
);
5370 /* Check for cases where a clr insns can be omitted from code using
5371 strict_low_part sets. For example, the second clrl here is not needed:
5372 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
5374 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
5375 insn we are checking for redundancy. TARGET is the register set by the
5379 strict_low_part_peephole_ok (machine_mode mode
, rtx_insn
*first_insn
,
5382 rtx_insn
*p
= first_insn
;
5384 while ((p
= PREV_INSN (p
)))
5386 if (NOTE_INSN_BASIC_BLOCK_P (p
))
5392 /* If it isn't an insn, then give up. */
5396 if (reg_set_p (target
, p
))
5398 rtx set
= single_set (p
);
5401 /* If it isn't an easy to recognize insn, then give up. */
5405 dest
= SET_DEST (set
);
5407 /* If this sets the entire target register to zero, then our
5408 first_insn is redundant. */
5409 if (rtx_equal_p (dest
, target
)
5410 && SET_SRC (set
) == const0_rtx
)
5412 else if (GET_CODE (dest
) == STRICT_LOW_PART
5413 && GET_CODE (XEXP (dest
, 0)) == REG
5414 && REGNO (XEXP (dest
, 0)) == REGNO (target
)
5415 && (GET_MODE_SIZE (GET_MODE (XEXP (dest
, 0)))
5416 <= GET_MODE_SIZE (mode
)))
5417 /* This is a strict low part set which modifies less than
5418 we are using, so it is safe. */
5428 /* Operand predicates for implementing asymmetric pc-relative addressing
5429 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
5430 when used as a source operand, but not as a destination operand.
5432 We model this by restricting the meaning of the basic predicates
5433 (general_operand, memory_operand, etc) to forbid the use of this
5434 addressing mode, and then define the following predicates that permit
5435 this addressing mode. These predicates can then be used for the
5436 source operands of the appropriate instructions.
5438 n.b. While it is theoretically possible to change all machine patterns
5439 to use this addressing more where permitted by the architecture,
5440 it has only been implemented for "common" cases: SImode, HImode, and
5441 QImode operands, and only for the principle operations that would
5442 require this addressing mode: data movement and simple integer operations.
5444 In parallel with these new predicates, two new constraint letters
5445 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
5446 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
5447 In the pcrel case 's' is only valid in combination with 'a' registers.
5448 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
5449 of how these constraints are used.
5451 The use of these predicates is strictly optional, though patterns that
5452 don't will cause an extra reload register to be allocated where one
5455 lea (abc:w,%pc),%a0 ; need to reload address
5456 moveq &1,%d1 ; since write to pc-relative space
5457 movel %d1,%a0@ ; is not allowed
5459 lea (abc:w,%pc),%a1 ; no need to reload address here
5460 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
5462 For more info, consult tiemann@cygnus.com.
5465 All of the ugliness with predicates and constraints is due to the
5466 simple fact that the m68k does not allow a pc-relative addressing
5467 mode as a destination. gcc does not distinguish between source and
5468 destination addresses. Hence, if we claim that pc-relative address
5469 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
5470 end up with invalid code. To get around this problem, we left
5471 pc-relative modes as invalid addresses, and then added special
5472 predicates and constraints to accept them.
5474 A cleaner way to handle this is to modify gcc to distinguish
5475 between source and destination addresses. We can then say that
5476 pc-relative is a valid source address but not a valid destination
5477 address, and hopefully avoid a lot of the predicate and constraint
5478 hackery. Unfortunately, this would be a pretty big change. It would
5479 be a useful change for a number of ports, but there aren't any current
5480 plans to undertake this.
5482 ***************************************************************************/
5486 output_andsi3 (rtx
*operands
)
5490 if (GET_CODE (operands
[2]) == CONST_INT
5491 && (INTVAL (operands
[2]) | 0xffff) == -1
5492 && (DATA_REG_P (operands
[0])
5493 || offsettable_memref_p (operands
[0]))
5494 && !TARGET_COLDFIRE
)
5496 if (GET_CODE (operands
[0]) != REG
)
5497 operands
[0] = adjust_address (operands
[0], HImode
, 2);
5498 operands
[2] = GEN_INT (INTVAL (operands
[2]) & 0xffff);
5499 if (operands
[2] == const0_rtx
)
5501 return "and%.w %2,%0";
5503 if (GET_CODE (operands
[2]) == CONST_INT
5504 && (logval
= exact_log2 (~ INTVAL (operands
[2]) & 0xffffffff)) >= 0
5505 && (DATA_REG_P (operands
[0])
5506 || offsettable_memref_p (operands
[0])))
5508 if (DATA_REG_P (operands
[0]))
5509 operands
[1] = GEN_INT (logval
);
5512 operands
[0] = adjust_address (operands
[0], QImode
, 3 - (logval
/ 8));
5513 operands
[1] = GEN_INT (logval
% 8);
5515 return "bclr %1,%0";
5517 /* Only a standard logical operation on the whole word sets the
5518 condition codes in a way we can use. */
5519 if (!side_effects_p (operands
[0]))
5520 flags_operand1
= operands
[0];
5521 flags_valid
= FLAGS_VALID_YES
;
5522 return "and%.l %2,%0";
5526 output_iorsi3 (rtx
*operands
)
5530 if (GET_CODE (operands
[2]) == CONST_INT
5531 && INTVAL (operands
[2]) >> 16 == 0
5532 && (DATA_REG_P (operands
[0])
5533 || offsettable_memref_p (operands
[0]))
5534 && !TARGET_COLDFIRE
)
5536 if (GET_CODE (operands
[0]) != REG
)
5537 operands
[0] = adjust_address (operands
[0], HImode
, 2);
5538 if (INTVAL (operands
[2]) == 0xffff)
5539 return "mov%.w %2,%0";
5540 return "or%.w %2,%0";
5542 if (GET_CODE (operands
[2]) == CONST_INT
5543 && (logval
= exact_log2 (INTVAL (operands
[2]) & 0xffffffff)) >= 0
5544 && (DATA_REG_P (operands
[0])
5545 || offsettable_memref_p (operands
[0])))
5547 if (DATA_REG_P (operands
[0]))
5548 operands
[1] = GEN_INT (logval
);
5551 operands
[0] = adjust_address (operands
[0], QImode
, 3 - (logval
/ 8));
5552 operands
[1] = GEN_INT (logval
% 8);
5554 return "bset %1,%0";
5556 /* Only a standard logical operation on the whole word sets the
5557 condition codes in a way we can use. */
5558 if (!side_effects_p (operands
[0]))
5559 flags_operand1
= operands
[0];
5560 flags_valid
= FLAGS_VALID_YES
;
5561 return "or%.l %2,%0";
5565 output_xorsi3 (rtx
*operands
)
5569 if (GET_CODE (operands
[2]) == CONST_INT
5570 && INTVAL (operands
[2]) >> 16 == 0
5571 && (offsettable_memref_p (operands
[0]) || DATA_REG_P (operands
[0]))
5572 && !TARGET_COLDFIRE
)
5574 if (! DATA_REG_P (operands
[0]))
5575 operands
[0] = adjust_address (operands
[0], HImode
, 2);
5576 if (INTVAL (operands
[2]) == 0xffff)
5578 return "eor%.w %2,%0";
5580 if (GET_CODE (operands
[2]) == CONST_INT
5581 && (logval
= exact_log2 (INTVAL (operands
[2]) & 0xffffffff)) >= 0
5582 && (DATA_REG_P (operands
[0])
5583 || offsettable_memref_p (operands
[0])))
5585 if (DATA_REG_P (operands
[0]))
5586 operands
[1] = GEN_INT (logval
);
5589 operands
[0] = adjust_address (operands
[0], QImode
, 3 - (logval
/ 8));
5590 operands
[1] = GEN_INT (logval
% 8);
5592 return "bchg %1,%0";
5594 /* Only a standard logical operation on the whole word sets the
5595 condition codes in a way we can use. */
5596 if (!side_effects_p (operands
[0]))
5597 flags_operand1
= operands
[0];
5598 flags_valid
= FLAGS_VALID_YES
;
5599 return "eor%.l %2,%0";
5602 /* Return the instruction that should be used for a call to address X,
5603 which is known to be in operand 0. */
5608 if (symbolic_operand (x
, VOIDmode
))
5609 return m68k_symbolic_call
;
5614 /* Likewise sibling calls. */
5617 output_sibcall (rtx x
)
5619 if (symbolic_operand (x
, VOIDmode
))
5620 return m68k_symbolic_jump
;
5626 m68k_output_mi_thunk (FILE *file
, tree thunk ATTRIBUTE_UNUSED
,
5627 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
5630 const char *fnname
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk
));
5631 rtx this_slot
, offset
, addr
, mem
, tmp
;
5634 /* Avoid clobbering the struct value reg by using the
5635 static chain reg as a temporary. */
5636 tmp
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
5638 /* Pretend to be a post-reload pass while generating rtl. */
5639 reload_completed
= 1;
5641 /* The "this" pointer is stored at 4(%sp). */
5642 this_slot
= gen_rtx_MEM (Pmode
, plus_constant (Pmode
,
5643 stack_pointer_rtx
, 4));
5645 /* Add DELTA to THIS. */
5648 /* Make the offset a legitimate operand for memory addition. */
5649 offset
= GEN_INT (delta
);
5650 if ((delta
< -8 || delta
> 8)
5651 && (TARGET_COLDFIRE
|| USE_MOVQ (delta
)))
5653 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
), offset
);
5654 offset
= gen_rtx_REG (Pmode
, D0_REG
);
5656 emit_insn (gen_add3_insn (copy_rtx (this_slot
),
5657 copy_rtx (this_slot
), offset
));
5660 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5661 if (vcall_offset
!= 0)
5663 /* Set the static chain register to *THIS. */
5664 emit_move_insn (tmp
, this_slot
);
5665 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
5667 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5668 addr
= plus_constant (Pmode
, tmp
, vcall_offset
);
5669 if (!m68k_legitimate_address_p (Pmode
, addr
, true))
5671 emit_insn (gen_rtx_SET (tmp
, addr
));
5675 /* Load the offset into %d0 and add it to THIS. */
5676 emit_move_insn (gen_rtx_REG (Pmode
, D0_REG
),
5677 gen_rtx_MEM (Pmode
, addr
));
5678 emit_insn (gen_add3_insn (copy_rtx (this_slot
),
5679 copy_rtx (this_slot
),
5680 gen_rtx_REG (Pmode
, D0_REG
)));
5683 /* Jump to the target function. Use a sibcall if direct jumps are
5684 allowed, otherwise load the address into a register first. */
5685 mem
= DECL_RTL (function
);
5686 if (!sibcall_operand (XEXP (mem
, 0), VOIDmode
))
5688 gcc_assert (flag_pic
);
5690 if (!TARGET_SEP_DATA
)
5692 /* Use the static chain register as a temporary (call-clobbered)
5693 GOT pointer for this function. We can use the static chain
5694 register because it isn't live on entry to the thunk. */
5695 SET_REGNO (pic_offset_table_rtx
, STATIC_CHAIN_REGNUM
);
5696 emit_insn (gen_load_got (pic_offset_table_rtx
));
5698 legitimize_pic_address (XEXP (mem
, 0), Pmode
, tmp
);
5699 mem
= replace_equiv_address (mem
, tmp
);
5701 insn
= emit_call_insn (gen_sibcall (mem
, const0_rtx
));
5702 SIBLING_CALL_P (insn
) = 1;
5704 /* Run just enough of rest_of_compilation. */
5705 insn
= get_insns ();
5706 split_all_insns_noflow ();
5707 assemble_start_function (thunk
, fnname
);
5708 final_start_function (insn
, file
, 1);
5709 final (insn
, file
, 1);
5710 final_end_function ();
5711 assemble_end_function (thunk
, fnname
);
5713 /* Clean up the vars set above. */
5714 reload_completed
= 0;
5716 /* Restore the original PIC register. */
5718 SET_REGNO (pic_offset_table_rtx
, PIC_REG
);
5721 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5724 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED
,
5725 int incoming ATTRIBUTE_UNUSED
)
5727 return gen_rtx_REG (Pmode
, M68K_STRUCT_VALUE_REGNUM
);
5730 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5732 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED
,
5733 unsigned int new_reg
)
5736 /* Interrupt functions can only use registers that have already been
5737 saved by the prologue, even if they would normally be
5740 if ((m68k_get_function_kind (current_function_decl
)
5741 == m68k_fk_interrupt_handler
)
5742 && !df_regs_ever_live_p (new_reg
))
5748 /* Implement TARGET_HARD_REGNO_NREGS.
5750 On the m68k, ordinary registers hold 32 bits worth;
5751 for the 68881 registers, a single register is always enough for
5752 anything that can be stored in them at all. */
5755 m68k_hard_regno_nregs (unsigned int regno
, machine_mode mode
)
5758 return GET_MODE_NUNITS (mode
);
5759 return CEIL (GET_MODE_SIZE (mode
), UNITS_PER_WORD
);
5762 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5763 registers can hold any mode, but restrict the 68881 registers to
5764 floating-point modes. */
5767 m68k_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
5769 if (DATA_REGNO_P (regno
))
5771 /* Data Registers, can hold aggregate if fits in. */
5772 if (regno
+ GET_MODE_SIZE (mode
) / 4 <= 8)
5775 else if (ADDRESS_REGNO_P (regno
))
5777 if (regno
+ GET_MODE_SIZE (mode
) / 4 <= 16)
5780 else if (FP_REGNO_P (regno
))
5782 /* FPU registers, hold float or complex float of long double or
5784 if ((GET_MODE_CLASS (mode
) == MODE_FLOAT
5785 || GET_MODE_CLASS (mode
) == MODE_COMPLEX_FLOAT
)
5786 && GET_MODE_UNIT_SIZE (mode
) <= TARGET_FP_REG_SIZE
)
5792 /* Implement TARGET_MODES_TIEABLE_P. */
5795 m68k_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
5797 return (!TARGET_HARD_FLOAT
5798 || ((GET_MODE_CLASS (mode1
) == MODE_FLOAT
5799 || GET_MODE_CLASS (mode1
) == MODE_COMPLEX_FLOAT
)
5800 == (GET_MODE_CLASS (mode2
) == MODE_FLOAT
5801 || GET_MODE_CLASS (mode2
) == MODE_COMPLEX_FLOAT
)));
5804 /* Implement SECONDARY_RELOAD_CLASS. */
5807 m68k_secondary_reload_class (enum reg_class rclass
,
5808 machine_mode mode
, rtx x
)
5812 regno
= true_regnum (x
);
5814 /* If one operand of a movqi is an address register, the other
5815 operand must be a general register or constant. Other types
5816 of operand must be reloaded through a data register. */
5817 if (GET_MODE_SIZE (mode
) == 1
5818 && reg_classes_intersect_p (rclass
, ADDR_REGS
)
5819 && !(INT_REGNO_P (regno
) || CONSTANT_P (x
)))
5822 /* PC-relative addresses must be loaded into an address register first. */
5824 && !reg_class_subset_p (rclass
, ADDR_REGS
)
5825 && symbolic_operand (x
, VOIDmode
))
5831 /* Implement PREFERRED_RELOAD_CLASS. */
5834 m68k_preferred_reload_class (rtx x
, enum reg_class rclass
)
5836 enum reg_class secondary_class
;
5838 /* If RCLASS might need a secondary reload, try restricting it to
5839 a class that doesn't. */
5840 secondary_class
= m68k_secondary_reload_class (rclass
, GET_MODE (x
), x
);
5841 if (secondary_class
!= NO_REGS
5842 && reg_class_subset_p (secondary_class
, rclass
))
5843 return secondary_class
;
5845 /* Prefer to use moveq for in-range constants. */
5846 if (GET_CODE (x
) == CONST_INT
5847 && reg_class_subset_p (DATA_REGS
, rclass
)
5848 && IN_RANGE (INTVAL (x
), -0x80, 0x7f))
5851 /* ??? Do we really need this now? */
5852 if (GET_CODE (x
) == CONST_DOUBLE
5853 && GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
5855 if (TARGET_HARD_FLOAT
&& reg_class_subset_p (FP_REGS
, rclass
))
5864 /* Return floating point values in a 68881 register. This makes 68881 code
5865 a little bit faster. It also makes -msoft-float code incompatible with
5866 hard-float code, so people have to be careful not to mix the two.
5867 For ColdFire it was decided the ABI incompatibility is undesirable.
5868 If there is need for a hard-float ABI it is probably worth doing it
5869 properly and also passing function arguments in FP registers. */
5871 m68k_libcall_value (machine_mode mode
)
5878 return gen_rtx_REG (mode
, FP0_REG
);
5884 return gen_rtx_REG (mode
, m68k_libcall_value_in_a0_p
? A0_REG
: D0_REG
);
5887 /* Location in which function value is returned.
5888 NOTE: Due to differences in ABIs, don't call this function directly,
5889 use FUNCTION_VALUE instead. */
5891 m68k_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
)
5895 mode
= TYPE_MODE (valtype
);
5901 return gen_rtx_REG (mode
, FP0_REG
);
5907 /* If the function returns a pointer, push that into %a0. */
5908 if (func
&& POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func
))))
5909 /* For compatibility with the large body of existing code which
5910 does not always properly declare external functions returning
5911 pointer types, the m68k/SVR4 convention is to copy the value
5912 returned for pointer functions from a0 to d0 in the function
5913 epilogue, so that callers that have neglected to properly
5914 declare the callee can still find the correct return value in
5916 return gen_rtx_PARALLEL
5919 gen_rtx_EXPR_LIST (VOIDmode
,
5920 gen_rtx_REG (mode
, A0_REG
),
5922 gen_rtx_EXPR_LIST (VOIDmode
,
5923 gen_rtx_REG (mode
, D0_REG
),
5925 else if (POINTER_TYPE_P (valtype
))
5926 return gen_rtx_REG (mode
, A0_REG
);
5928 return gen_rtx_REG (mode
, D0_REG
);
5931 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5932 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5934 m68k_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
5936 machine_mode mode
= TYPE_MODE (type
);
5938 if (mode
== BLKmode
)
5941 /* If TYPE's known alignment is less than the alignment of MODE that
5942 would contain the structure, then return in memory. We need to
5943 do so to maintain the compatibility between code compiled with
5944 -mstrict-align and that compiled with -mno-strict-align. */
5945 if (AGGREGATE_TYPE_P (type
)
5946 && TYPE_ALIGN (type
) < GET_MODE_ALIGNMENT (mode
))
5953 /* CPU to schedule the program for. */
5954 enum attr_cpu m68k_sched_cpu
;
5956 /* MAC to schedule the program for. */
5957 enum attr_mac m68k_sched_mac
;
5965 /* Integer register. */
5971 /* Implicit mem reference (e.g. stack). */
5974 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5977 /* Memory with offset but without indexing. EA mode 5. */
5980 /* Memory with indexing. EA mode 6. */
5983 /* Memory referenced by absolute address. EA mode 7. */
5986 /* Immediate operand that doesn't require extension word. */
5989 /* Immediate 16 bit operand. */
5992 /* Immediate 32 bit operand. */
5996 /* Return type of memory ADDR_RTX refers to. */
5997 static enum attr_op_type
5998 sched_address_type (machine_mode mode
, rtx addr_rtx
)
6000 struct m68k_address address
;
6002 if (symbolic_operand (addr_rtx
, VOIDmode
))
6003 return OP_TYPE_MEM7
;
6005 if (!m68k_decompose_address (mode
, addr_rtx
,
6006 reload_completed
, &address
))
6008 gcc_assert (!reload_completed
);
6009 /* Reload will likely fix the address to be in the register. */
6010 return OP_TYPE_MEM234
;
6013 if (address
.scale
!= 0)
6014 return OP_TYPE_MEM6
;
6016 if (address
.base
!= NULL_RTX
)
6018 if (address
.offset
== NULL_RTX
)
6019 return OP_TYPE_MEM234
;
6021 return OP_TYPE_MEM5
;
6024 gcc_assert (address
.offset
!= NULL_RTX
);
6026 return OP_TYPE_MEM7
;
6029 /* Return X or Y (depending on OPX_P) operand of INSN. */
6031 sched_get_operand (rtx_insn
*insn
, bool opx_p
)
6035 if (recog_memoized (insn
) < 0)
6038 extract_constrain_insn_cached (insn
);
6041 i
= get_attr_opx (insn
);
6043 i
= get_attr_opy (insn
);
6045 if (i
>= recog_data
.n_operands
)
6048 return recog_data
.operand
[i
];
6051 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
6052 If ADDRESS_P is true, return type of memory location operand refers to. */
6053 static enum attr_op_type
6054 sched_attr_op_type (rtx_insn
*insn
, bool opx_p
, bool address_p
)
6058 op
= sched_get_operand (insn
, opx_p
);
6062 gcc_assert (!reload_completed
);
6067 return sched_address_type (QImode
, op
);
6069 if (memory_operand (op
, VOIDmode
))
6070 return sched_address_type (GET_MODE (op
), XEXP (op
, 0));
6072 if (register_operand (op
, VOIDmode
))
6074 if ((!reload_completed
&& FLOAT_MODE_P (GET_MODE (op
)))
6075 || (reload_completed
&& FP_REG_P (op
)))
6081 if (GET_CODE (op
) == CONST_INT
)
6087 /* Check for quick constants. */
6088 switch (get_attr_type (insn
))
6091 if (IN_RANGE (ival
, 1, 8) || IN_RANGE (ival
, -8, -1))
6092 return OP_TYPE_IMM_Q
;
6094 gcc_assert (!reload_completed
);
6098 if (USE_MOVQ (ival
))
6099 return OP_TYPE_IMM_Q
;
6101 gcc_assert (!reload_completed
);
6105 if (valid_mov3q_const (ival
))
6106 return OP_TYPE_IMM_Q
;
6108 gcc_assert (!reload_completed
);
6115 if (IN_RANGE (ival
, -0x8000, 0x7fff))
6116 return OP_TYPE_IMM_W
;
6118 return OP_TYPE_IMM_L
;
6121 if (GET_CODE (op
) == CONST_DOUBLE
)
6123 switch (GET_MODE (op
))
6126 return OP_TYPE_IMM_W
;
6130 return OP_TYPE_IMM_L
;
6137 if (GET_CODE (op
) == CONST
6138 || symbolic_operand (op
, VOIDmode
)
6141 switch (GET_MODE (op
))
6144 return OP_TYPE_IMM_Q
;
6147 return OP_TYPE_IMM_W
;
6150 return OP_TYPE_IMM_L
;
6153 if (symbolic_operand (m68k_unwrap_symbol (op
, false), VOIDmode
))
6155 return OP_TYPE_IMM_W
;
6157 return OP_TYPE_IMM_L
;
6161 gcc_assert (!reload_completed
);
6163 if (FLOAT_MODE_P (GET_MODE (op
)))
6169 /* Implement opx_type attribute.
6170 Return type of INSN's operand X.
6171 If ADDRESS_P is true, return type of memory location operand refers to. */
6173 m68k_sched_attr_opx_type (rtx_insn
*insn
, int address_p
)
6175 switch (sched_attr_op_type (insn
, true, address_p
!= 0))
6181 return OPX_TYPE_FPN
;
6184 return OPX_TYPE_MEM1
;
6186 case OP_TYPE_MEM234
:
6187 return OPX_TYPE_MEM234
;
6190 return OPX_TYPE_MEM5
;
6193 return OPX_TYPE_MEM6
;
6196 return OPX_TYPE_MEM7
;
6199 return OPX_TYPE_IMM_Q
;
6202 return OPX_TYPE_IMM_W
;
6205 return OPX_TYPE_IMM_L
;
6212 /* Implement opy_type attribute.
6213 Return type of INSN's operand Y.
6214 If ADDRESS_P is true, return type of memory location operand refers to. */
6216 m68k_sched_attr_opy_type (rtx_insn
*insn
, int address_p
)
6218 switch (sched_attr_op_type (insn
, false, address_p
!= 0))
6224 return OPY_TYPE_FPN
;
6227 return OPY_TYPE_MEM1
;
6229 case OP_TYPE_MEM234
:
6230 return OPY_TYPE_MEM234
;
6233 return OPY_TYPE_MEM5
;
6236 return OPY_TYPE_MEM6
;
6239 return OPY_TYPE_MEM7
;
6242 return OPY_TYPE_IMM_Q
;
6245 return OPY_TYPE_IMM_W
;
6248 return OPY_TYPE_IMM_L
;
6255 /* Return size of INSN as int. */
6257 sched_get_attr_size_int (rtx_insn
*insn
)
6261 switch (get_attr_type (insn
))
6264 /* There should be no references to m68k_sched_attr_size for 'ignore'
6278 switch (get_attr_opx_type (insn
))
6284 case OPX_TYPE_MEM234
:
6285 case OPY_TYPE_IMM_Q
:
6290 /* Here we assume that most absolute references are short. */
6292 case OPY_TYPE_IMM_W
:
6296 case OPY_TYPE_IMM_L
:
6304 switch (get_attr_opy_type (insn
))
6310 case OPY_TYPE_MEM234
:
6311 case OPY_TYPE_IMM_Q
:
6316 /* Here we assume that most absolute references are short. */
6318 case OPY_TYPE_IMM_W
:
6322 case OPY_TYPE_IMM_L
:
6332 gcc_assert (!reload_completed
);
6340 /* Return size of INSN as attribute enum value. */
6342 m68k_sched_attr_size (rtx_insn
*insn
)
6344 switch (sched_get_attr_size_int (insn
))
6360 /* Return operand X or Y (depending on OPX_P) of INSN,
6361 if it is a MEM, or NULL overwise. */
6362 static enum attr_op_type
6363 sched_get_opxy_mem_type (rtx_insn
*insn
, bool opx_p
)
6367 switch (get_attr_opx_type (insn
))
6372 case OPX_TYPE_IMM_Q
:
6373 case OPX_TYPE_IMM_W
:
6374 case OPX_TYPE_IMM_L
:
6378 case OPX_TYPE_MEM234
:
6381 return OP_TYPE_MEM1
;
6384 return OP_TYPE_MEM6
;
6392 switch (get_attr_opy_type (insn
))
6397 case OPY_TYPE_IMM_Q
:
6398 case OPY_TYPE_IMM_W
:
6399 case OPY_TYPE_IMM_L
:
6403 case OPY_TYPE_MEM234
:
6406 return OP_TYPE_MEM1
;
6409 return OP_TYPE_MEM6
;
6417 /* Implement op_mem attribute. */
6419 m68k_sched_attr_op_mem (rtx_insn
*insn
)
6421 enum attr_op_type opx
;
6422 enum attr_op_type opy
;
6424 opx
= sched_get_opxy_mem_type (insn
, true);
6425 opy
= sched_get_opxy_mem_type (insn
, false);
6427 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_RN
)
6430 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_MEM1
)
6432 switch (get_attr_opx_access (insn
))
6448 if (opy
== OP_TYPE_RN
&& opx
== OP_TYPE_MEM6
)
6450 switch (get_attr_opx_access (insn
))
6466 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_RN
)
6469 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_MEM1
)
6471 switch (get_attr_opx_access (insn
))
6477 gcc_assert (!reload_completed
);
6482 if (opy
== OP_TYPE_MEM1
&& opx
== OP_TYPE_MEM6
)
6484 switch (get_attr_opx_access (insn
))
6490 gcc_assert (!reload_completed
);
6495 if (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_RN
)
6498 if (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_MEM1
)
6500 switch (get_attr_opx_access (insn
))
6506 gcc_assert (!reload_completed
);
6511 gcc_assert (opy
== OP_TYPE_MEM6
&& opx
== OP_TYPE_MEM6
);
6512 gcc_assert (!reload_completed
);
6516 /* Data for ColdFire V4 index bypass.
6517 Producer modifies register that is used as index in consumer with
6521 /* Producer instruction. */
6524 /* Consumer instruction. */
6527 /* Scale of indexed memory access within consumer.
6528 Or zero if bypass should not be effective at the moment. */
6530 } sched_cfv4_bypass_data
;
6532 /* An empty state that is used in m68k_sched_adjust_cost. */
6533 static state_t sched_adjust_cost_state
;
6535 /* Implement adjust_cost scheduler hook.
6536 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
6538 m68k_sched_adjust_cost (rtx_insn
*insn
, int, rtx_insn
*def_insn
, int cost
,
6543 if (recog_memoized (def_insn
) < 0
6544 || recog_memoized (insn
) < 0)
6547 if (sched_cfv4_bypass_data
.scale
== 1)
6548 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
6550 /* haifa-sched.cc: insn_cost () calls bypass_p () just before
6551 targetm.sched.adjust_cost (). Hence, we can be relatively sure
6552 that the data in sched_cfv4_bypass_data is up to date. */
6553 gcc_assert (sched_cfv4_bypass_data
.pro
== def_insn
6554 && sched_cfv4_bypass_data
.con
== insn
);
6559 sched_cfv4_bypass_data
.pro
= NULL
;
6560 sched_cfv4_bypass_data
.con
= NULL
;
6561 sched_cfv4_bypass_data
.scale
= 0;
6564 gcc_assert (sched_cfv4_bypass_data
.pro
== NULL
6565 && sched_cfv4_bypass_data
.con
== NULL
6566 && sched_cfv4_bypass_data
.scale
== 0);
6568 /* Don't try to issue INSN earlier than DFA permits.
6569 This is especially useful for instructions that write to memory,
6570 as their true dependence (default) latency is better to be set to 0
6571 to workaround alias analysis limitations.
6572 This is, in fact, a machine independent tweak, so, probably,
6573 it should be moved to haifa-sched.cc: insn_cost (). */
6574 delay
= min_insn_conflict_delay (sched_adjust_cost_state
, def_insn
, insn
);
6581 /* Return maximal number of insns that can be scheduled on a single cycle. */
6583 m68k_sched_issue_rate (void)
6585 switch (m68k_sched_cpu
)
6601 /* Maximal length of instruction for current CPU.
6602 E.g. it is 3 for any ColdFire core. */
6603 static int max_insn_size
;
6605 /* Data to model instruction buffer of CPU. */
6608 /* True if instruction buffer model is modeled for current CPU. */
6611 /* Size of the instruction buffer in words. */
6614 /* Number of filled words in the instruction buffer. */
6617 /* Additional information about instruction buffer for CPUs that have
6618 a buffer of instruction records, rather then a plain buffer
6619 of instruction words. */
6620 struct _sched_ib_records
6622 /* Size of buffer in records. */
6625 /* Array to hold data on adjustments made to the size of the buffer. */
6628 /* Index of the above array. */
6632 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6636 static struct _sched_ib sched_ib
;
6638 /* ID of memory unit. */
6639 static int sched_mem_unit_code
;
6641 /* Implementation of the targetm.sched.variable_issue () hook.
6642 It is called after INSN was issued. It returns the number of insns
6643 that can possibly get scheduled on the current cycle.
6644 It is used here to determine the effect of INSN on the instruction
6647 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED
,
6648 int sched_verbose ATTRIBUTE_UNUSED
,
6649 rtx_insn
*insn
, int can_issue_more
)
6653 if (recog_memoized (insn
) >= 0 && get_attr_type (insn
) != TYPE_IGNORE
)
6655 switch (m68k_sched_cpu
)
6659 insn_size
= sched_get_attr_size_int (insn
);
6663 insn_size
= sched_get_attr_size_int (insn
);
6665 /* ColdFire V3 and V4 cores have instruction buffers that can
6666 accumulate up to 8 instructions regardless of instructions'
6667 sizes. So we should take care not to "prefetch" 24 one-word
6668 or 12 two-words instructions.
6669 To model this behavior we temporarily decrease size of the
6670 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6674 adjust
= max_insn_size
- insn_size
;
6675 sched_ib
.size
-= adjust
;
6677 if (sched_ib
.filled
> sched_ib
.size
)
6678 sched_ib
.filled
= sched_ib
.size
;
6680 sched_ib
.records
.adjust
[sched_ib
.records
.adjust_index
] = adjust
;
6683 ++sched_ib
.records
.adjust_index
;
6684 if (sched_ib
.records
.adjust_index
== sched_ib
.records
.n_insns
)
6685 sched_ib
.records
.adjust_index
= 0;
6687 /* Undo adjustment we did 7 instructions ago. */
6689 += sched_ib
.records
.adjust
[sched_ib
.records
.adjust_index
];
6694 gcc_assert (!sched_ib
.enabled_p
);
6702 if (insn_size
> sched_ib
.filled
)
6703 /* Scheduling for register pressure does not always take DFA into
6704 account. Workaround instruction buffer not being filled enough. */
6706 gcc_assert (sched_pressure
== SCHED_PRESSURE_WEIGHTED
);
6707 insn_size
= sched_ib
.filled
;
6712 else if (GET_CODE (PATTERN (insn
)) == ASM_INPUT
6713 || asm_noperands (PATTERN (insn
)) >= 0)
6714 insn_size
= sched_ib
.filled
;
6718 sched_ib
.filled
-= insn_size
;
6720 return can_issue_more
;
6723 /* Return how many instructions should scheduler lookahead to choose the
6726 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6728 return m68k_sched_issue_rate () - 1;
6731 /* Implementation of targetm.sched.init_global () hook.
6732 It is invoked once per scheduling pass and is used here
6733 to initialize scheduler constants. */
6735 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED
,
6736 int sched_verbose ATTRIBUTE_UNUSED
,
6737 int n_insns ATTRIBUTE_UNUSED
)
6739 /* Check that all instructions have DFA reservations and
6740 that all instructions can be issued from a clean state. */
6746 state
= alloca (state_size ());
6748 for (insn
= get_insns (); insn
!= NULL
; insn
= NEXT_INSN (insn
))
6750 if (INSN_P (insn
) && recog_memoized (insn
) >= 0)
6752 gcc_assert (insn_has_dfa_reservation_p (insn
));
6754 state_reset (state
);
6755 if (state_transition (state
, insn
) >= 0)
6761 /* Setup target cpu. */
6763 /* ColdFire V4 has a set of features to keep its instruction buffer full
6764 (e.g., a separate memory bus for instructions) and, hence, we do not model
6765 buffer for this CPU. */
6766 sched_ib
.enabled_p
= (m68k_sched_cpu
!= CPU_CFV4
);
6768 switch (m68k_sched_cpu
)
6771 sched_ib
.filled
= 0;
6778 sched_ib
.records
.n_insns
= 0;
6779 sched_ib
.records
.adjust
= NULL
;
6784 sched_ib
.records
.n_insns
= 8;
6785 sched_ib
.records
.adjust
= XNEWVEC (int, sched_ib
.records
.n_insns
);
6792 sched_mem_unit_code
= get_cpu_unit_code ("cf_mem1");
6794 sched_adjust_cost_state
= xmalloc (state_size ());
6795 state_reset (sched_adjust_cost_state
);
6798 emit_insn (gen_ib ());
6799 sched_ib
.insn
= get_insns ();
6803 /* Scheduling pass is now finished. Free/reset static variables. */
6805 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED
,
6806 int verbose ATTRIBUTE_UNUSED
)
6808 sched_ib
.insn
= NULL
;
6810 free (sched_adjust_cost_state
);
6811 sched_adjust_cost_state
= NULL
;
6813 sched_mem_unit_code
= 0;
6815 free (sched_ib
.records
.adjust
);
6816 sched_ib
.records
.adjust
= NULL
;
6817 sched_ib
.records
.n_insns
= 0;
6821 /* Implementation of targetm.sched.init () hook.
6822 It is invoked each time scheduler starts on the new block (basic block or
6823 extended basic block). */
6825 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED
,
6826 int sched_verbose ATTRIBUTE_UNUSED
,
6827 int n_insns ATTRIBUTE_UNUSED
)
6829 switch (m68k_sched_cpu
)
6837 sched_ib
.size
= sched_ib
.records
.n_insns
* max_insn_size
;
6839 memset (sched_ib
.records
.adjust
, 0,
6840 sched_ib
.records
.n_insns
* sizeof (*sched_ib
.records
.adjust
));
6841 sched_ib
.records
.adjust_index
= 0;
6845 gcc_assert (!sched_ib
.enabled_p
);
6853 if (sched_ib
.enabled_p
)
6854 /* haifa-sched.cc: schedule_block () calls advance_cycle () just before
6855 the first cycle. Workaround that. */
6856 sched_ib
.filled
= -2;
6859 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6860 It is invoked just before current cycle finishes and is used here
6861 to track if instruction buffer got its two words this cycle. */
6863 m68k_sched_dfa_pre_advance_cycle (void)
6865 if (!sched_ib
.enabled_p
)
6868 if (!cpu_unit_reservation_p (curr_state
, sched_mem_unit_code
))
6870 sched_ib
.filled
+= 2;
6872 if (sched_ib
.filled
> sched_ib
.size
)
6873 sched_ib
.filled
= sched_ib
.size
;
6877 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6878 It is invoked just after new cycle begins and is used here
6879 to setup number of filled words in the instruction buffer so that
6880 instructions which won't have all their words prefetched would be
6881 stalled for a cycle. */
6883 m68k_sched_dfa_post_advance_cycle (void)
6887 if (!sched_ib
.enabled_p
)
6890 /* Setup number of prefetched instruction words in the instruction
6892 i
= max_insn_size
- sched_ib
.filled
;
6896 if (state_transition (curr_state
, sched_ib
.insn
) >= 0)
6897 /* Pick up scheduler state. */
6902 /* Return X or Y (depending on OPX_P) operand of INSN,
6903 if it is an integer register, or NULL overwise. */
6905 sched_get_reg_operand (rtx_insn
*insn
, bool opx_p
)
6911 if (get_attr_opx_type (insn
) == OPX_TYPE_RN
)
6913 op
= sched_get_operand (insn
, true);
6914 gcc_assert (op
!= NULL
);
6916 if (!reload_completed
&& !REG_P (op
))
6922 if (get_attr_opy_type (insn
) == OPY_TYPE_RN
)
6924 op
= sched_get_operand (insn
, false);
6925 gcc_assert (op
!= NULL
);
6927 if (!reload_completed
&& !REG_P (op
))
6935 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6938 sched_mem_operand_p (rtx_insn
*insn
, bool opx_p
)
6940 switch (sched_get_opxy_mem_type (insn
, opx_p
))
6951 /* Return X or Y (depending on OPX_P) operand of INSN,
6952 if it is a MEM, or NULL overwise. */
6954 sched_get_mem_operand (rtx_insn
*insn
, bool must_read_p
, bool must_write_p
)
6974 if (opy_p
&& sched_mem_operand_p (insn
, false))
6975 return sched_get_operand (insn
, false);
6977 if (opx_p
&& sched_mem_operand_p (insn
, true))
6978 return sched_get_operand (insn
, true);
6984 /* Return non-zero if PRO modifies register used as part of
6987 m68k_sched_address_bypass_p (rtx_insn
*pro
, rtx_insn
*con
)
6992 pro_x
= sched_get_reg_operand (pro
, true);
6996 con_mem_read
= sched_get_mem_operand (con
, true, false);
6997 gcc_assert (con_mem_read
!= NULL
);
6999 if (reg_mentioned_p (pro_x
, con_mem_read
))
7005 /* Helper function for m68k_sched_indexed_address_bypass_p.
7006 if PRO modifies register used as index in CON,
7007 return scale of indexed memory access in CON. Return zero overwise. */
7009 sched_get_indexed_address_scale (rtx_insn
*pro
, rtx_insn
*con
)
7013 struct m68k_address address
;
7015 reg
= sched_get_reg_operand (pro
, true);
7019 mem
= sched_get_mem_operand (con
, true, false);
7020 gcc_assert (mem
!= NULL
&& MEM_P (mem
));
7022 if (!m68k_decompose_address (GET_MODE (mem
), XEXP (mem
, 0), reload_completed
,
7026 if (REGNO (reg
) == REGNO (address
.index
))
7028 gcc_assert (address
.scale
!= 0);
7029 return address
.scale
;
7035 /* Return non-zero if PRO modifies register used
7036 as index with scale 2 or 4 in CON. */
7038 m68k_sched_indexed_address_bypass_p (rtx_insn
*pro
, rtx_insn
*con
)
7040 gcc_assert (sched_cfv4_bypass_data
.pro
== NULL
7041 && sched_cfv4_bypass_data
.con
== NULL
7042 && sched_cfv4_bypass_data
.scale
== 0);
7044 switch (sched_get_indexed_address_scale (pro
, con
))
7047 /* We can't have a variable latency bypass, so
7048 remember to adjust the insn cost in adjust_cost hook. */
7049 sched_cfv4_bypass_data
.pro
= pro
;
7050 sched_cfv4_bypass_data
.con
= con
;
7051 sched_cfv4_bypass_data
.scale
= 1;
7063 /* We generate a two-instructions program at M_TRAMP :
7064 movea.l &CHAIN_VALUE,%a0
7066 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
7069 m68k_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
7071 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
7074 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM
));
7076 mem
= adjust_address (m_tramp
, HImode
, 0);
7077 emit_move_insn (mem
, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM
-8) << 9)));
7078 mem
= adjust_address (m_tramp
, SImode
, 2);
7079 emit_move_insn (mem
, chain_value
);
7081 mem
= adjust_address (m_tramp
, HImode
, 6);
7082 emit_move_insn (mem
, GEN_INT(0x4EF9));
7083 mem
= adjust_address (m_tramp
, SImode
, 8);
7084 emit_move_insn (mem
, fnaddr
);
7086 FINALIZE_TRAMPOLINE (XEXP (m_tramp
, 0));
7089 /* On the 68000, the RTS insn cannot pop anything.
7090 On the 68010, the RTD insn may be used to pop them if the number
7091 of args is fixed, but if the number is variable then the caller
7092 must pop them all. RTD can't be used for library calls now
7093 because the library is compiled with the Unix compiler.
7094 Use of RTD is a selectable option, since it is incompatible with
7095 standard Unix calling sequences. If the option is not selected,
7096 the caller must always pop the args. */
7099 m68k_return_pops_args (tree fundecl
, tree funtype
, poly_int64 size
)
7103 || TREE_CODE (fundecl
) != IDENTIFIER_NODE
)
7104 && (!stdarg_p (funtype
)))
7105 ? (HOST_WIDE_INT
) size
: 0);
7108 /* Make sure everything's fine if we *don't* have a given processor.
7109 This assumes that putting a register in fixed_regs will keep the
7110 compiler's mitts completely off it. We don't bother to zero it out
7111 of register classes. */
7114 m68k_conditional_register_usage (void)
7118 if (!TARGET_HARD_FLOAT
)
7120 x
= reg_class_contents
[FP_REGS
];
7121 for (i
= 0; i
< FIRST_PSEUDO_REGISTER
; i
++)
7122 if (TEST_HARD_REG_BIT (x
, i
))
7123 fixed_regs
[i
] = call_used_regs
[i
] = 1;
7126 fixed_regs
[PIC_REG
] = call_used_regs
[PIC_REG
] = 1;
7130 m68k_init_sync_libfuncs (void)
7132 init_sync_libfuncs (UNITS_PER_WORD
);
7135 /* Implements EPILOGUE_USES. All registers are live on exit from an
7136 interrupt routine. */
7138 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED
)
7140 return (reload_completed
7141 && (m68k_get_function_kind (current_function_decl
)
7142 == m68k_fk_interrupt_handler
));
7146 /* Implement TARGET_C_EXCESS_PRECISION.
7148 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
7149 instructions, we get proper intermediate rounding, otherwise we
7150 get extended precision results. */
7152 static enum flt_eval_method
7153 m68k_excess_precision (enum excess_precision_type type
)
7157 case EXCESS_PRECISION_TYPE_FAST
:
7158 /* The fastest type to promote to will always be the native type,
7159 whether that occurs with implicit excess precision or
7161 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT
;
7162 case EXCESS_PRECISION_TYPE_STANDARD
:
7163 case EXCESS_PRECISION_TYPE_IMPLICIT
:
7164 /* Otherwise, the excess precision we want when we are
7165 in a standards compliant mode, and the implicit precision we
7166 provide can be identical. */
7167 if (TARGET_68040
|| ! TARGET_68881
)
7168 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT
;
7170 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE
;
7171 case EXCESS_PRECISION_TYPE_FLOAT16
:
7172 error ("%<-fexcess-precision=16%> is not supported on this target");
7177 return FLT_EVAL_METHOD_UNPREDICTABLE
;
7180 /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes
7181 a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */
7184 m68k_push_rounding (poly_int64 bytes
)
7186 if (TARGET_COLDFIRE
)
7188 return (bytes
+ 1) & ~1;
7191 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
7194 m68k_promote_function_mode (const_tree type
, machine_mode mode
,
7195 int *punsignedp ATTRIBUTE_UNUSED
,
7196 const_tree fntype ATTRIBUTE_UNUSED
,
7199 /* Promote libcall arguments narrower than int to match the normal C
7200 ABI (for which promotions are handled via
7201 TARGET_PROMOTE_PROTOTYPES). */
7202 if (type
== NULL_TREE
&& !for_return
&& (mode
== QImode
|| mode
== HImode
))
7207 /* Implement TARGET_ZERO_CALL_USED_REGS. */
7210 m68k_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs
)
7212 rtx zero_fpreg
= NULL_RTX
;
7214 for (unsigned int regno
= 0; regno
< FIRST_PSEUDO_REGISTER
; regno
++)
7215 if (TEST_HARD_REG_BIT (need_zeroed_hardregs
, regno
))
7219 if (INT_REGNO_P (regno
))
7221 reg
= regno_reg_rtx
[regno
];
7222 zero
= CONST0_RTX (SImode
);
7224 else if (FP_REGNO_P (regno
))
7226 reg
= gen_raw_REG (SFmode
, regno
);
7227 if (zero_fpreg
== NULL_RTX
)
7229 /* On the 040/060 clearing an FP reg loads a large
7230 immediate. To reduce code size use the first
7231 cleared FP reg to clear remaining ones. Don't do
7232 this on cores which use fmovecr. */
7233 zero
= CONST0_RTX (SFmode
);
7243 emit_move_insn (reg
, zero
);
7246 return need_zeroed_hardregs
;
7249 /* Implement TARGET_C_MODE_FOR_FLOATING_TYPE. Return XFmode or DFmode
7250 for TI_LONG_DOUBLE_TYPE which is for long double type, go with the
7251 default one for the others. */
7254 m68k_c_mode_for_floating_type (enum tree_index ti
)
7256 if (ti
== TI_LONG_DOUBLE_TYPE
)
7257 return LONG_DOUBLE_TYPE_MODE
;
7258 return default_mode_for_floating_type (ti
);
7261 /* Implement TARGET_LRA_P. */
7269 #include "gt-m68k.h"