[PR testsuite/116860] Testsuite adjustment for recently added tests
[official-gcc.git] / gcc / config / m68k / m68k.cc
blobd8fa6e00de1d6c2ca7fc40b40c129fdddafd2c1c
1 /* Subroutines for insn-output.cc for Motorola 68000 family.
2 Copyright (C) 1987-2025 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
22 #include "config.h"
23 #define INCLUDE_STRING
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "cfghooks.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "attribs.h"
31 #include "rtl.h"
32 #include "df.h"
33 #include "alias.h"
34 #include "fold-const.h"
35 #include "calls.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "regs.h"
39 #include "insn-config.h"
40 #include "conditions.h"
41 #include "output.h"
42 #include "insn-attr.h"
43 #include "recog.h"
44 #include "diagnostic-core.h"
45 #include "flags.h"
46 #include "expmed.h"
47 #include "dojump.h"
48 #include "explow.h"
49 #include "memmodel.h"
50 #include "emit-rtl.h"
51 #include "stmt.h"
52 #include "expr.h"
53 #include "reload.h"
54 #include "tm_p.h"
55 #include "target.h"
56 #include "debug.h"
57 #include "cfgrtl.h"
58 #include "cfganal.h"
59 #include "lcm.h"
60 #include "cfgbuild.h"
61 #include "cfgcleanup.h"
62 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
63 #include "sched-int.h"
64 #include "insn-codes.h"
65 #include "opts.h"
66 #include "optabs.h"
67 #include "builtins.h"
68 #include "rtl-iter.h"
69 #include "toplev.h"
71 /* This file should be included last. */
72 #include "target-def.h"
74 enum reg_class regno_reg_class[] =
76 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
77 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
78 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
79 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
80 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
81 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
82 ADDR_REGS
86 /* The minimum number of integer registers that we want to save with the
87 movem instruction. Using two movel instructions instead of a single
88 moveml is about 15% faster for the 68020 and 68030 at no expense in
89 code size. */
90 #define MIN_MOVEM_REGS 3
92 /* The minimum number of floating point registers that we want to save
93 with the fmovem instruction. */
94 #define MIN_FMOVEM_REGS 1
96 /* Structure describing stack frame layout. */
97 struct m68k_frame
99 /* Stack pointer to frame pointer offset. */
100 HOST_WIDE_INT offset;
102 /* Offset of FPU registers. */
103 HOST_WIDE_INT foffset;
105 /* Frame size in bytes (rounded up). */
106 HOST_WIDE_INT size;
108 /* Data and address register. */
109 int reg_no;
110 unsigned int reg_mask;
112 /* FPU registers. */
113 int fpu_no;
114 unsigned int fpu_mask;
116 /* Offsets relative to ARG_POINTER. */
117 HOST_WIDE_INT frame_pointer_offset;
118 HOST_WIDE_INT stack_pointer_offset;
120 /* Function which the above information refers to. */
121 int funcdef_no;
124 /* Current frame information calculated by m68k_compute_frame_layout(). */
125 static struct m68k_frame current_frame;
127 /* Structure describing an m68k address.
129 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
130 with null fields evaluating to 0. Here:
132 - BASE satisfies m68k_legitimate_base_reg_p
133 - INDEX satisfies m68k_legitimate_index_reg_p
134 - OFFSET satisfies m68k_legitimate_constant_address_p
136 INDEX is either HImode or SImode. The other fields are SImode.
138 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
139 the address is (BASE)+. */
140 struct m68k_address {
141 enum rtx_code code;
142 rtx base;
143 rtx index;
144 rtx offset;
145 int scale;
148 static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
149 unsigned int);
150 static int m68k_sched_issue_rate (void);
151 static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
152 static void m68k_sched_md_init_global (FILE *, int, int);
153 static void m68k_sched_md_finish_global (FILE *, int);
154 static void m68k_sched_md_init (FILE *, int, int);
155 static void m68k_sched_dfa_pre_advance_cycle (void);
156 static void m68k_sched_dfa_post_advance_cycle (void);
157 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
159 static bool m68k_can_eliminate (const int, const int);
160 static void m68k_conditional_register_usage (void);
161 static bool m68k_legitimate_address_p (machine_mode, rtx, bool,
162 code_helper = ERROR_MARK);
163 static void m68k_option_override (void);
164 static void m68k_override_options_after_change (void);
165 static rtx find_addr_reg (rtx);
166 static const char *singlemove_string (rtx *);
167 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
168 HOST_WIDE_INT, tree);
169 static rtx m68k_struct_value_rtx (tree, int);
170 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
171 tree args, int flags,
172 bool *no_add_attrs);
173 static void m68k_compute_frame_layout (void);
174 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
175 static bool m68k_ok_for_sibcall_p (tree, tree);
176 static bool m68k_tls_symbol_p (rtx);
177 static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
178 static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
179 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
180 static bool m68k_return_in_memory (const_tree, const_tree);
181 #endif
182 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
183 static void m68k_trampoline_init (rtx, tree, rtx);
184 static poly_int64 m68k_return_pops_args (tree, tree, poly_int64);
185 static rtx m68k_delegitimize_address (rtx);
186 static void m68k_function_arg_advance (cumulative_args_t,
187 const function_arg_info &);
188 static rtx m68k_function_arg (cumulative_args_t, const function_arg_info &);
189 static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
190 static bool m68k_output_addr_const_extra (FILE *, rtx);
191 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
192 static enum flt_eval_method
193 m68k_excess_precision (enum excess_precision_type);
194 static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode);
195 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode);
196 static bool m68k_modes_tieable_p (machine_mode, machine_mode);
197 static machine_mode m68k_promote_function_mode (const_tree, machine_mode,
198 int *, const_tree, int);
199 static void m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int);
200 static HARD_REG_SET m68k_zero_call_used_regs (HARD_REG_SET);
201 static machine_mode m68k_c_mode_for_floating_type (enum tree_index);
202 static bool m68k_use_lra_p (void);
204 /* Initialize the GCC target structure. */
206 #if INT_OP_GROUP == INT_OP_DOT_WORD
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
209 #endif
211 #if INT_OP_GROUP == INT_OP_NO_DOT
212 #undef TARGET_ASM_BYTE_OP
213 #define TARGET_ASM_BYTE_OP "\tbyte\t"
214 #undef TARGET_ASM_ALIGNED_HI_OP
215 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
216 #undef TARGET_ASM_ALIGNED_SI_OP
217 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
218 #endif
220 #if INT_OP_GROUP == INT_OP_DC
221 #undef TARGET_ASM_BYTE_OP
222 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
223 #undef TARGET_ASM_ALIGNED_HI_OP
224 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
225 #undef TARGET_ASM_ALIGNED_SI_OP
226 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
227 #endif
229 #undef TARGET_ASM_UNALIGNED_HI_OP
230 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
231 #undef TARGET_ASM_UNALIGNED_SI_OP
232 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
234 #undef TARGET_ASM_OUTPUT_MI_THUNK
235 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
236 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
237 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
239 #undef TARGET_ASM_FILE_START_APP_OFF
240 #define TARGET_ASM_FILE_START_APP_OFF true
242 #undef TARGET_LEGITIMIZE_ADDRESS
243 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
245 #undef TARGET_SCHED_ADJUST_COST
246 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
248 #undef TARGET_SCHED_ISSUE_RATE
249 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
251 #undef TARGET_SCHED_VARIABLE_ISSUE
252 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
254 #undef TARGET_SCHED_INIT_GLOBAL
255 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
257 #undef TARGET_SCHED_FINISH_GLOBAL
258 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
260 #undef TARGET_SCHED_INIT
261 #define TARGET_SCHED_INIT m68k_sched_md_init
263 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
264 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
266 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
267 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
269 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
270 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
271 m68k_sched_first_cycle_multipass_dfa_lookahead
273 #undef TARGET_OPTION_OVERRIDE
274 #define TARGET_OPTION_OVERRIDE m68k_option_override
276 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
277 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
279 #undef TARGET_RTX_COSTS
280 #define TARGET_RTX_COSTS m68k_rtx_costs
282 #undef TARGET_ATTRIBUTE_TABLE
283 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
285 #undef TARGET_PROMOTE_PROTOTYPES
286 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
288 #undef TARGET_STRUCT_VALUE_RTX
289 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
291 #undef TARGET_CANNOT_FORCE_CONST_MEM
292 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
294 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
295 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
297 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
298 #undef TARGET_RETURN_IN_MEMORY
299 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
300 #endif
302 #ifdef HAVE_AS_TLS
303 #undef TARGET_HAVE_TLS
304 #define TARGET_HAVE_TLS (true)
306 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
307 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
308 #endif
310 #undef TARGET_LRA_P
311 #define TARGET_LRA_P m68k_use_lra_p
313 #undef TARGET_LEGITIMATE_ADDRESS_P
314 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
316 #undef TARGET_CAN_ELIMINATE
317 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
319 #undef TARGET_CONDITIONAL_REGISTER_USAGE
320 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
322 #undef TARGET_TRAMPOLINE_INIT
323 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
325 #undef TARGET_RETURN_POPS_ARGS
326 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
328 #undef TARGET_DELEGITIMIZE_ADDRESS
329 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
331 #undef TARGET_FUNCTION_ARG
332 #define TARGET_FUNCTION_ARG m68k_function_arg
334 #undef TARGET_FUNCTION_ARG_ADVANCE
335 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
337 #undef TARGET_LEGITIMATE_CONSTANT_P
338 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
340 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
341 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
343 #undef TARGET_C_EXCESS_PRECISION
344 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
346 /* The value stored by TAS. */
347 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
348 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
350 #undef TARGET_HARD_REGNO_NREGS
351 #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
352 #undef TARGET_HARD_REGNO_MODE_OK
353 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
355 #undef TARGET_MODES_TIEABLE_P
356 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
358 #undef TARGET_PROMOTE_FUNCTION_MODE
359 #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode
361 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
362 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
364 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
365 #define TARGET_ASM_FINAL_POSTSCAN_INSN m68k_asm_final_postscan_insn
367 #undef TARGET_ZERO_CALL_USED_REGS
368 #define TARGET_ZERO_CALL_USED_REGS m68k_zero_call_used_regs
370 #undef TARGET_C_MODE_FOR_FLOATING_TYPE
371 #define TARGET_C_MODE_FOR_FLOATING_TYPE m68k_c_mode_for_floating_type
373 TARGET_GNU_ATTRIBUTES (m68k_attribute_table,
375 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
376 affects_type_identity, handler, exclude } */
377 { "interrupt", 0, 0, true, false, false, false,
378 m68k_handle_fndecl_attribute, NULL },
379 { "interrupt_handler", 0, 0, true, false, false, false,
380 m68k_handle_fndecl_attribute, NULL },
381 { "interrupt_thread", 0, 0, true, false, false, false,
382 m68k_handle_fndecl_attribute, NULL }
385 #undef TARGET_DOCUMENTATION_NAME
386 #define TARGET_DOCUMENTATION_NAME "m68k"
388 struct gcc_target targetm = TARGET_INITIALIZER;
390 /* Base flags for 68k ISAs. */
391 #define FL_FOR_isa_00 FL_ISA_68000
392 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
393 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
394 generated 68881 code for 68020 and 68030 targets unless explicitly told
395 not to. */
396 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
397 | FL_BITFIELD | FL_68881 | FL_CAS)
398 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
399 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
401 /* Base flags for ColdFire ISAs. */
402 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
403 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
404 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
405 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
406 /* ISA_C is not upwardly compatible with ISA_B. */
407 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
409 enum m68k_isa
411 /* Traditional 68000 instruction sets. */
412 isa_00,
413 isa_10,
414 isa_20,
415 isa_40,
416 isa_cpu32,
417 /* ColdFire instruction set variants. */
418 isa_a,
419 isa_aplus,
420 isa_b,
421 isa_c,
422 isa_max
425 /* Information about one of the -march, -mcpu or -mtune arguments. */
426 struct m68k_target_selection
428 /* The argument being described. */
429 const char *name;
431 /* For -mcpu, this is the device selected by the option.
432 For -mtune and -march, it is a representative device
433 for the microarchitecture or ISA respectively. */
434 enum target_device device;
436 /* The M68K_DEVICE fields associated with DEVICE. See the comment
437 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
438 const char *family;
439 enum uarch_type microarch;
440 enum m68k_isa isa;
441 unsigned long flags;
444 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
445 static const struct m68k_target_selection all_devices[] =
447 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
448 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
449 #include "m68k-devices.def"
450 #undef M68K_DEVICE
451 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
454 /* A list of all ISAs, mapping each one to a representative device.
455 Used for -march selection. */
456 static const struct m68k_target_selection all_isas[] =
458 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
459 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
460 #include "m68k-isas.def"
461 #undef M68K_ISA
462 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
465 /* A list of all microarchitectures, mapping each one to a representative
466 device. Used for -mtune selection. */
467 static const struct m68k_target_selection all_microarchs[] =
469 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
470 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
471 #include "m68k-microarchs.def"
472 #undef M68K_MICROARCH
473 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
476 /* The entries associated with the -mcpu, -march and -mtune settings,
477 or null for options that have not been used. */
478 const struct m68k_target_selection *m68k_cpu_entry;
479 const struct m68k_target_selection *m68k_arch_entry;
480 const struct m68k_target_selection *m68k_tune_entry;
482 /* Which CPU we are generating code for. */
483 enum target_device m68k_cpu;
485 /* Which microarchitecture to tune for. */
486 enum uarch_type m68k_tune;
488 /* Which FPU to use. */
489 enum fpu_type m68k_fpu;
491 /* The set of FL_* flags that apply to the target processor. */
492 unsigned int m68k_cpu_flags;
494 /* The set of FL_* flags that apply to the processor to be tuned for. */
495 unsigned int m68k_tune_flags;
497 /* Asm templates for calling or jumping to an arbitrary symbolic address,
498 or NULL if such calls or jumps are not supported. The address is held
499 in operand 0. */
500 const char *m68k_symbolic_call;
501 const char *m68k_symbolic_jump;
503 /* Enum variable that corresponds to m68k_symbolic_call values. */
504 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
507 /* Implement TARGET_OPTION_OVERRIDE. */
509 static void
510 m68k_option_override (void)
512 const struct m68k_target_selection *entry;
513 unsigned long target_mask;
515 if (OPTION_SET_P (m68k_arch_option))
516 m68k_arch_entry = &all_isas[m68k_arch_option];
518 if (OPTION_SET_P (m68k_cpu_option))
519 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
521 if (OPTION_SET_P (m68k_tune_option))
522 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
524 /* User can choose:
526 -mcpu=
527 -march=
528 -mtune=
530 -march=ARCH should generate code that runs any processor
531 implementing architecture ARCH. -mcpu=CPU should override -march
532 and should generate code that runs on processor CPU, making free
533 use of any instructions that CPU understands. -mtune=UARCH applies
534 on top of -mcpu or -march and optimizes the code for UARCH. It does
535 not change the target architecture. */
536 if (m68k_cpu_entry)
538 /* Complain if the -march setting is for a different microarchitecture,
539 or includes flags that the -mcpu setting doesn't. */
540 if (m68k_arch_entry
541 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
542 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
543 warning (0, "%<-mcpu=%s%> conflicts with %<-march=%s%>",
544 m68k_cpu_entry->name, m68k_arch_entry->name);
546 entry = m68k_cpu_entry;
548 else
549 entry = m68k_arch_entry;
551 if (!entry)
552 entry = all_devices + TARGET_CPU_DEFAULT;
554 m68k_cpu_flags = entry->flags;
556 /* Use the architecture setting to derive default values for
557 certain flags. */
558 target_mask = 0;
560 /* ColdFire is lenient about alignment. */
561 if (!TARGET_COLDFIRE)
562 target_mask |= MASK_STRICT_ALIGNMENT;
564 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
565 target_mask |= MASK_BITFIELD;
566 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
567 target_mask |= MASK_CF_HWDIV;
568 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
569 target_mask |= MASK_HARD_FLOAT;
570 target_flags |= target_mask & ~target_flags_explicit;
572 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
573 m68k_cpu = entry->device;
574 if (m68k_tune_entry)
576 m68k_tune = m68k_tune_entry->microarch;
577 m68k_tune_flags = m68k_tune_entry->flags;
579 #ifdef M68K_DEFAULT_TUNE
580 else if (!m68k_cpu_entry && !m68k_arch_entry)
582 enum target_device dev;
583 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
584 m68k_tune_flags = all_devices[dev].flags;
586 #endif
587 else
589 m68k_tune = entry->microarch;
590 m68k_tune_flags = entry->flags;
593 /* Set the type of FPU. */
594 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
595 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
596 : FPUTYPE_68881);
598 /* Sanity check to ensure that msep-data and mid-sahred-library are not
599 * both specified together. Doing so simply doesn't make sense.
601 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
602 error ("cannot specify both %<-msep-data%> and %<-mid-shared-library%>");
604 /* If we're generating code for a separate A5 relative data segment,
605 * we've got to enable -fPIC as well. This might be relaxable to
606 * -fpic but it hasn't been tested properly.
608 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
609 flag_pic = 2;
611 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
612 error if the target does not support them. */
613 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
614 error ("%<-mpcrel%> %<-fPIC%> is not currently supported on selected cpu");
616 /* ??? A historic way of turning on pic, or is this intended to
617 be an embedded thing that doesn't have the same name binding
618 significance that it does on hosted ELF systems? */
619 if (TARGET_PCREL && flag_pic == 0)
620 flag_pic = 1;
622 if (!flag_pic)
624 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
626 m68k_symbolic_jump = "jra %a0";
628 else if (TARGET_ID_SHARED_LIBRARY)
629 /* All addresses must be loaded from the GOT. */
631 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
633 if (TARGET_PCREL)
634 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
635 else
636 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
638 if (TARGET_ISAC)
639 /* No unconditional long branch */;
640 else if (TARGET_PCREL)
641 m68k_symbolic_jump = "bra%.l %c0";
642 else
643 m68k_symbolic_jump = "bra%.l %p0";
644 /* Turn off function cse if we are doing PIC. We always want
645 function call to be done as `bsr foo@PLTPC'. */
646 /* ??? It's traditional to do this for -mpcrel too, but it isn't
647 clear how intentional that is. */
648 flag_no_function_cse = 1;
651 switch (m68k_symbolic_call_var)
653 case M68K_SYMBOLIC_CALL_JSR:
654 m68k_symbolic_call = "jsr %a0";
655 break;
657 case M68K_SYMBOLIC_CALL_BSR_C:
658 m68k_symbolic_call = "bsr%.l %c0";
659 break;
661 case M68K_SYMBOLIC_CALL_BSR_P:
662 m68k_symbolic_call = "bsr%.l %p0";
663 break;
665 case M68K_SYMBOLIC_CALL_NONE:
666 gcc_assert (m68k_symbolic_call == NULL);
667 break;
669 default:
670 gcc_unreachable ();
673 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
674 parse_alignment_opts ();
675 int label_alignment = align_labels.levels[0].get_value ();
676 if (label_alignment > 2)
678 warning (0, "%<-falign-labels=%d%> is not supported", label_alignment);
679 str_align_labels = "1";
682 int loop_alignment = align_loops.levels[0].get_value ();
683 if (loop_alignment > 2)
685 warning (0, "%<-falign-loops=%d%> is not supported", loop_alignment);
686 str_align_loops = "1";
688 #endif
690 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
691 && !TARGET_68020)
693 warning (0, "%<-fstack-limit-%> options are not supported on this cpu");
694 opt_fstack_limit_symbol_arg = NULL;
695 opt_fstack_limit_register_no = -1;
698 SUBTARGET_OVERRIDE_OPTIONS;
700 /* Setup scheduling options. */
701 if (TUNE_CFV1)
702 m68k_sched_cpu = CPU_CFV1;
703 else if (TUNE_CFV2)
704 m68k_sched_cpu = CPU_CFV2;
705 else if (TUNE_CFV3)
706 m68k_sched_cpu = CPU_CFV3;
707 else if (TUNE_CFV4)
708 m68k_sched_cpu = CPU_CFV4;
709 else
711 m68k_sched_cpu = CPU_UNKNOWN;
712 flag_schedule_insns = 0;
713 flag_schedule_insns_after_reload = 0;
714 flag_modulo_sched = 0;
715 flag_live_range_shrinkage = 0;
718 if (m68k_sched_cpu != CPU_UNKNOWN)
720 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
721 m68k_sched_mac = MAC_CF_EMAC;
722 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
723 m68k_sched_mac = MAC_CF_MAC;
724 else
725 m68k_sched_mac = MAC_NO;
729 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
731 static void
732 m68k_override_options_after_change (void)
734 if (m68k_sched_cpu == CPU_UNKNOWN)
736 flag_schedule_insns = 0;
737 flag_schedule_insns_after_reload = 0;
738 flag_modulo_sched = 0;
739 flag_live_range_shrinkage = 0;
743 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
744 given argument and NAME is the argument passed to -mcpu. Return NULL
745 if -mcpu was not passed. */
747 const char *
748 m68k_cpp_cpu_ident (const char *prefix)
750 if (!m68k_cpu_entry)
751 return NULL;
752 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
755 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
756 given argument and NAME is the name of the representative device for
757 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
759 const char *
760 m68k_cpp_cpu_family (const char *prefix)
762 if (!m68k_cpu_entry)
763 return NULL;
764 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
767 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
768 "interrupt_handler" attribute and interrupt_thread if FUNC has an
769 "interrupt_thread" attribute. Otherwise, return
770 m68k_fk_normal_function. */
772 enum m68k_function_kind
773 m68k_get_function_kind (tree func)
775 tree a;
777 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
779 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
780 if (a != NULL_TREE)
781 return m68k_fk_interrupt_handler;
783 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
784 if (a != NULL_TREE)
785 return m68k_fk_interrupt_handler;
787 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
788 if (a != NULL_TREE)
789 return m68k_fk_interrupt_thread;
791 return m68k_fk_normal_function;
794 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
795 struct attribute_spec.handler. */
796 static tree
797 m68k_handle_fndecl_attribute (tree *node, tree name,
798 tree args ATTRIBUTE_UNUSED,
799 int flags ATTRIBUTE_UNUSED,
800 bool *no_add_attrs)
802 if (TREE_CODE (*node) != FUNCTION_DECL)
804 warning (OPT_Wattributes, "%qE attribute only applies to functions",
805 name);
806 *no_add_attrs = true;
809 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
811 error ("multiple interrupt attributes not allowed");
812 *no_add_attrs = true;
815 if (!TARGET_FIDOA
816 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
818 error ("%<interrupt_thread%> is available only on fido");
819 *no_add_attrs = true;
822 return NULL_TREE;
825 static void
826 m68k_compute_frame_layout (void)
828 int regno, saved;
829 unsigned int mask;
830 enum m68k_function_kind func_kind =
831 m68k_get_function_kind (current_function_decl);
832 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
833 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
835 /* Only compute the frame once per function.
836 Don't cache information until reload has been completed. */
837 if (current_frame.funcdef_no == current_function_funcdef_no
838 && reload_completed)
839 return;
841 current_frame.size = (get_frame_size () + 3) & -4;
843 mask = saved = 0;
845 /* Interrupt thread does not need to save any register. */
846 if (!interrupt_thread)
847 for (regno = 0; regno < 16; regno++)
848 if (m68k_save_reg (regno, interrupt_handler))
850 mask |= 1 << (regno - D0_REG);
851 saved++;
853 current_frame.offset = saved * 4;
854 current_frame.reg_no = saved;
855 current_frame.reg_mask = mask;
857 current_frame.foffset = 0;
858 mask = saved = 0;
859 if (TARGET_HARD_FLOAT)
861 /* Interrupt thread does not need to save any register. */
862 if (!interrupt_thread)
863 for (regno = 16; regno < 24; regno++)
864 if (m68k_save_reg (regno, interrupt_handler))
866 mask |= 1 << (regno - FP0_REG);
867 saved++;
869 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
870 current_frame.offset += current_frame.foffset;
872 current_frame.fpu_no = saved;
873 current_frame.fpu_mask = mask;
875 /* Remember what function this frame refers to. */
876 current_frame.funcdef_no = current_function_funcdef_no;
879 /* Worker function for TARGET_CAN_ELIMINATE. */
881 bool
882 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
884 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
887 HOST_WIDE_INT
888 m68k_initial_elimination_offset (int from, int to)
890 int argptr_offset;
891 /* The arg pointer points 8 bytes before the start of the arguments,
892 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
893 frame pointer in most frames. */
894 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
895 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
896 return argptr_offset;
898 m68k_compute_frame_layout ();
900 gcc_assert (to == STACK_POINTER_REGNUM);
901 switch (from)
903 case ARG_POINTER_REGNUM:
904 return current_frame.offset + current_frame.size - argptr_offset;
905 case FRAME_POINTER_REGNUM:
906 return current_frame.offset + current_frame.size;
907 default:
908 gcc_unreachable ();
912 /* Refer to the array `regs_ever_live' to determine which registers
913 to save; `regs_ever_live[I]' is nonzero if register number I
914 is ever used in the function. This function is responsible for
915 knowing which registers should not be saved even if used.
916 Return true if we need to save REGNO. */
918 static bool
919 m68k_save_reg (unsigned int regno, bool interrupt_handler)
921 if (flag_pic && regno == PIC_REG)
923 if (crtl->saves_all_registers)
924 return true;
925 if (crtl->uses_pic_offset_table)
926 return true;
927 /* Reload may introduce constant pool references into a function
928 that thitherto didn't need a PIC register. Note that the test
929 above will not catch that case because we will only set
930 crtl->uses_pic_offset_table when emitting
931 the address reloads. */
932 if (crtl->uses_const_pool)
933 return true;
936 if (crtl->calls_eh_return)
938 unsigned int i;
939 for (i = 0; ; i++)
941 unsigned int test = EH_RETURN_DATA_REGNO (i);
942 if (test == INVALID_REGNUM)
943 break;
944 if (test == regno)
945 return true;
949 /* Fixed regs we never touch. */
950 if (fixed_regs[regno])
951 return false;
953 /* The frame pointer (if it is such) is handled specially. */
954 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
955 return false;
957 /* Interrupt handlers must also save call_used_regs
958 if they are live or when calling nested functions. */
959 if (interrupt_handler)
961 if (df_regs_ever_live_p (regno))
962 return true;
964 if (!crtl->is_leaf && call_used_or_fixed_reg_p (regno))
965 return true;
968 /* Never need to save registers that aren't touched. */
969 if (!df_regs_ever_live_p (regno))
970 return false;
972 /* Otherwise save everything that isn't call-clobbered. */
973 return !call_used_or_fixed_reg_p (regno);
976 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
977 the lowest memory address. COUNT is the number of registers to be
978 moved, with register REGNO + I being moved if bit I of MASK is set.
979 STORE_P specifies the direction of the move and ADJUST_STACK_P says
980 whether or not this is pre-decrement (if STORE_P) or post-increment
981 (if !STORE_P) operation. */
983 static rtx_insn *
984 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
985 unsigned int count, unsigned int regno,
986 unsigned int mask, bool store_p, bool adjust_stack_p)
988 int i;
989 rtx body, addr, src, operands[2];
990 machine_mode mode;
992 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
993 mode = reg_raw_mode[regno];
994 i = 0;
996 if (adjust_stack_p)
998 src = plus_constant (Pmode, base,
999 (count
1000 * GET_MODE_SIZE (mode)
1001 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
1002 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
1005 for (; mask != 0; mask >>= 1, regno++)
1006 if (mask & 1)
1008 addr = plus_constant (Pmode, base, offset);
1009 operands[!store_p] = gen_frame_mem (mode, addr);
1010 operands[store_p] = gen_rtx_REG (mode, regno);
1011 XVECEXP (body, 0, i++)
1012 = gen_rtx_SET (operands[0], operands[1]);
1013 offset += GET_MODE_SIZE (mode);
1015 gcc_assert (i == XVECLEN (body, 0));
1017 return emit_insn (body);
1020 /* Make INSN a frame-related instruction. */
1022 static void
1023 m68k_set_frame_related (rtx_insn *insn)
1025 rtx body;
1026 int i;
1028 RTX_FRAME_RELATED_P (insn) = 1;
1029 body = PATTERN (insn);
1030 if (GET_CODE (body) == PARALLEL)
1031 for (i = 0; i < XVECLEN (body, 0); i++)
1032 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1035 /* Emit RTL for the "prologue" define_expand. */
1037 void
1038 m68k_expand_prologue (void)
1040 HOST_WIDE_INT fsize_with_regs;
1041 rtx limit, src, dest;
1043 m68k_compute_frame_layout ();
1045 if (flag_stack_usage_info)
1046 current_function_static_stack_size
1047 = current_frame.size + current_frame.offset;
1049 /* If the stack limit is a symbol, we can check it here,
1050 before actually allocating the space. */
1051 if (crtl->limit_stack
1052 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1054 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1055 if (!m68k_legitimate_constant_p (Pmode, limit))
1057 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1058 limit = gen_rtx_REG (Pmode, D0_REG);
1060 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1061 stack_pointer_rtx, limit),
1062 stack_pointer_rtx, limit,
1063 const1_rtx));
1066 fsize_with_regs = current_frame.size;
1067 if (TARGET_COLDFIRE)
1069 /* ColdFire's move multiple instructions do not allow pre-decrement
1070 addressing. Add the size of movem saves to the initial stack
1071 allocation instead. */
1072 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1073 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1074 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1075 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1078 if (frame_pointer_needed)
1080 if (fsize_with_regs == 0 && TUNE_68040)
1082 /* On the 68040, two separate moves are faster than link.w 0. */
1083 dest = gen_frame_mem (Pmode,
1084 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1085 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1086 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1087 stack_pointer_rtx));
1089 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1090 m68k_set_frame_related
1091 (emit_insn (gen_link (frame_pointer_rtx,
1092 GEN_INT (-4 - fsize_with_regs))));
1093 else
1095 m68k_set_frame_related
1096 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1097 m68k_set_frame_related
1098 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1099 stack_pointer_rtx,
1100 GEN_INT (-fsize_with_regs))));
1103 /* If the frame pointer is needed, emit a special barrier that
1104 will prevent the scheduler from moving stores to the frame
1105 before the stack adjustment. */
1106 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1108 else if (fsize_with_regs != 0)
1109 m68k_set_frame_related
1110 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1111 stack_pointer_rtx,
1112 GEN_INT (-fsize_with_regs))));
1114 if (current_frame.fpu_mask)
1116 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1117 if (TARGET_68881)
1118 m68k_set_frame_related
1119 (m68k_emit_movem (stack_pointer_rtx,
1120 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1121 current_frame.fpu_no, FP0_REG,
1122 current_frame.fpu_mask, true, true));
1123 else
1125 int offset;
1127 /* If we're using moveml to save the integer registers,
1128 the stack pointer will point to the bottom of the moveml
1129 save area. Find the stack offset of the first FP register. */
1130 if (current_frame.reg_no < MIN_MOVEM_REGS)
1131 offset = 0;
1132 else
1133 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1134 m68k_set_frame_related
1135 (m68k_emit_movem (stack_pointer_rtx, offset,
1136 current_frame.fpu_no, FP0_REG,
1137 current_frame.fpu_mask, true, false));
1141 /* If the stack limit is not a symbol, check it here.
1142 This has the disadvantage that it may be too late... */
1143 if (crtl->limit_stack)
1145 if (REG_P (stack_limit_rtx))
1146 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1147 stack_limit_rtx),
1148 stack_pointer_rtx, stack_limit_rtx,
1149 const1_rtx));
1151 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1152 warning (0, "stack limit expression is not supported");
1155 if (current_frame.reg_no < MIN_MOVEM_REGS)
1157 /* Store each register separately in the same order moveml does. */
1158 int i;
1160 for (i = 16; i-- > 0; )
1161 if (current_frame.reg_mask & (1 << i))
1163 src = gen_rtx_REG (SImode, D0_REG + i);
1164 dest = gen_frame_mem (SImode,
1165 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1166 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1169 else
1171 if (TARGET_COLDFIRE)
1172 /* The required register save space has already been allocated.
1173 The first register should be stored at (%sp). */
1174 m68k_set_frame_related
1175 (m68k_emit_movem (stack_pointer_rtx, 0,
1176 current_frame.reg_no, D0_REG,
1177 current_frame.reg_mask, true, false));
1178 else
1179 m68k_set_frame_related
1180 (m68k_emit_movem (stack_pointer_rtx,
1181 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1182 current_frame.reg_no, D0_REG,
1183 current_frame.reg_mask, true, true));
1186 if (!TARGET_SEP_DATA
1187 && crtl->uses_pic_offset_table)
1188 emit_insn (gen_load_got (pic_offset_table_rtx));
1191 /* Return true if a simple (return) instruction is sufficient for this
1192 instruction (i.e. if no epilogue is needed). */
1194 bool
1195 m68k_use_return_insn (void)
1197 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1198 return false;
1200 m68k_compute_frame_layout ();
1201 return current_frame.offset == 0;
1204 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1205 SIBCALL_P says which.
1207 The function epilogue should not depend on the current stack pointer!
1208 It should use the frame pointer only, if there is a frame pointer.
1209 This is mandatory because of alloca; we also take advantage of it to
1210 omit stack adjustments before returning. */
1212 void
1213 m68k_expand_epilogue (bool sibcall_p)
1215 HOST_WIDE_INT fsize, fsize_with_regs;
1216 bool big, restore_from_sp;
1218 m68k_compute_frame_layout ();
1220 fsize = current_frame.size;
1221 big = false;
1222 restore_from_sp = false;
1224 /* FIXME : crtl->is_leaf below is too strong.
1225 What we really need to know there is if there could be pending
1226 stack adjustment needed at that point. */
1227 restore_from_sp = (!frame_pointer_needed
1228 || (!cfun->calls_alloca && crtl->is_leaf));
1230 /* fsize_with_regs is the size we need to adjust the sp when
1231 popping the frame. */
1232 fsize_with_regs = fsize;
1233 if (TARGET_COLDFIRE && restore_from_sp)
1235 /* ColdFire's move multiple instructions do not allow post-increment
1236 addressing. Add the size of movem loads to the final deallocation
1237 instead. */
1238 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1239 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1240 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1241 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1244 if (current_frame.offset + fsize >= 0x8000
1245 && !restore_from_sp
1246 && (current_frame.reg_mask || current_frame.fpu_mask))
1248 if (TARGET_COLDFIRE
1249 && (current_frame.reg_no >= MIN_MOVEM_REGS
1250 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1252 /* ColdFire's move multiple instructions do not support the
1253 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1254 stack-based restore. */
1255 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1256 GEN_INT (-(current_frame.offset + fsize)));
1257 emit_insn (gen_blockage ());
1258 emit_insn (gen_addsi3 (stack_pointer_rtx,
1259 gen_rtx_REG (Pmode, A1_REG),
1260 frame_pointer_rtx));
1261 restore_from_sp = true;
1263 else
1265 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1266 fsize = 0;
1267 big = true;
1271 if (current_frame.reg_no < MIN_MOVEM_REGS)
1273 /* Restore each register separately in the same order moveml does. */
1274 int i;
1275 HOST_WIDE_INT offset;
1277 offset = current_frame.offset + fsize;
1278 for (i = 0; i < 16; i++)
1279 if (current_frame.reg_mask & (1 << i))
1281 rtx addr;
1283 if (big)
1285 /* Generate the address -OFFSET(%fp,%a1.l). */
1286 addr = gen_rtx_REG (Pmode, A1_REG);
1287 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1288 addr = plus_constant (Pmode, addr, -offset);
1290 else if (restore_from_sp)
1291 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1292 else
1293 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1294 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1295 gen_frame_mem (SImode, addr));
1296 offset -= GET_MODE_SIZE (SImode);
1299 else if (current_frame.reg_mask)
1301 if (big)
1302 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1303 gen_rtx_REG (Pmode, A1_REG),
1304 frame_pointer_rtx),
1305 -(current_frame.offset + fsize),
1306 current_frame.reg_no, D0_REG,
1307 current_frame.reg_mask, false, false);
1308 else if (restore_from_sp)
1309 m68k_emit_movem (stack_pointer_rtx, 0,
1310 current_frame.reg_no, D0_REG,
1311 current_frame.reg_mask, false,
1312 !TARGET_COLDFIRE);
1313 else
1314 m68k_emit_movem (frame_pointer_rtx,
1315 -(current_frame.offset + fsize),
1316 current_frame.reg_no, D0_REG,
1317 current_frame.reg_mask, false, false);
1320 if (current_frame.fpu_no > 0)
1322 if (big)
1323 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1324 gen_rtx_REG (Pmode, A1_REG),
1325 frame_pointer_rtx),
1326 -(current_frame.foffset + fsize),
1327 current_frame.fpu_no, FP0_REG,
1328 current_frame.fpu_mask, false, false);
1329 else if (restore_from_sp)
1331 if (TARGET_COLDFIRE)
1333 int offset;
1335 /* If we used moveml to restore the integer registers, the
1336 stack pointer will still point to the bottom of the moveml
1337 save area. Find the stack offset of the first FP
1338 register. */
1339 if (current_frame.reg_no < MIN_MOVEM_REGS)
1340 offset = 0;
1341 else
1342 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1343 m68k_emit_movem (stack_pointer_rtx, offset,
1344 current_frame.fpu_no, FP0_REG,
1345 current_frame.fpu_mask, false, false);
1347 else
1348 m68k_emit_movem (stack_pointer_rtx, 0,
1349 current_frame.fpu_no, FP0_REG,
1350 current_frame.fpu_mask, false, true);
1352 else
1353 m68k_emit_movem (frame_pointer_rtx,
1354 -(current_frame.foffset + fsize),
1355 current_frame.fpu_no, FP0_REG,
1356 current_frame.fpu_mask, false, false);
1359 emit_insn (gen_blockage ());
1360 if (frame_pointer_needed)
1361 emit_insn (gen_unlink (frame_pointer_rtx));
1362 else if (fsize_with_regs)
1363 emit_insn (gen_addsi3 (stack_pointer_rtx,
1364 stack_pointer_rtx,
1365 GEN_INT (fsize_with_regs)));
1367 if (crtl->calls_eh_return)
1368 emit_insn (gen_addsi3 (stack_pointer_rtx,
1369 stack_pointer_rtx,
1370 EH_RETURN_STACKADJ_RTX));
1372 if (!sibcall_p)
1373 emit_jump_insn (ret_rtx);
1376 /* Return true if PARALLEL contains register REGNO. */
1377 static bool
1378 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1380 int i;
1382 if (REG_P (parallel) && REGNO (parallel) == regno)
1383 return true;
1385 if (GET_CODE (parallel) != PARALLEL)
1386 return false;
1388 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1390 const_rtx x;
1392 x = XEXP (XVECEXP (parallel, 0, i), 0);
1393 if (REG_P (x) && REGNO (x) == regno)
1394 return true;
1397 return false;
1400 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1402 static bool
1403 m68k_ok_for_sibcall_p (tree decl, tree exp)
1405 enum m68k_function_kind kind;
1407 /* We cannot use sibcalls for nested functions because we use the
1408 static chain register for indirect calls. */
1409 if (CALL_EXPR_STATIC_CHAIN (exp))
1410 return false;
1412 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1414 /* Check that the return value locations are the same. For
1415 example that we aren't returning a value from the sibling in
1416 a D0 register but then need to transfer it to a A0 register. */
1417 rtx cfun_value;
1418 rtx call_value;
1420 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1421 cfun->decl);
1422 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1424 /* Check that the values are equal or that the result the callee
1425 function returns is superset of what the current function returns. */
1426 if (!(rtx_equal_p (cfun_value, call_value)
1427 || (REG_P (cfun_value)
1428 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1429 return false;
1432 kind = m68k_get_function_kind (current_function_decl);
1433 if (kind == m68k_fk_normal_function)
1434 /* We can always sibcall from a normal function, because it's
1435 undefined if it is calling an interrupt function. */
1436 return true;
1438 /* Otherwise we can only sibcall if the function kind is known to be
1439 the same. */
1440 if (decl && m68k_get_function_kind (decl) == kind)
1441 return true;
1443 return false;
1446 /* On the m68k all args are always pushed. */
1448 static rtx
1449 m68k_function_arg (cumulative_args_t, const function_arg_info &)
1451 return NULL_RTX;
1454 static void
1455 m68k_function_arg_advance (cumulative_args_t cum_v,
1456 const function_arg_info &arg)
1458 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1460 *cum += (arg.promoted_size_in_bytes () + 3) & ~3;
1463 /* Convert X to a legitimate function call memory reference and return the
1464 result. */
1467 m68k_legitimize_call_address (rtx x)
1469 gcc_assert (MEM_P (x));
1470 if (call_operand (XEXP (x, 0), VOIDmode))
1471 return x;
1472 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1475 /* Likewise for sibling calls. */
1478 m68k_legitimize_sibcall_address (rtx x)
1480 gcc_assert (MEM_P (x));
1481 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1482 return x;
1484 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1485 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1488 /* Convert X to a legitimate address and return it if successful. Otherwise
1489 return X.
1491 For the 68000, we handle X+REG by loading X into a register R and
1492 using R+REG. R will go in an address reg and indexing will be used.
1493 However, if REG is a broken-out memory address or multiplication,
1494 nothing needs to be done because REG can certainly go in an address reg. */
1496 static rtx
1497 m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
1499 if (m68k_tls_symbol_p (x))
1500 return m68k_legitimize_tls_address (x);
1502 if (GET_CODE (x) == PLUS)
1504 int ch = (x) != (oldx);
1505 int copied = 0;
1507 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1509 if (GET_CODE (XEXP (x, 0)) == MULT
1510 || GET_CODE (XEXP (x, 0)) == ASHIFT)
1512 COPY_ONCE (x);
1513 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1515 if (GET_CODE (XEXP (x, 1)) == MULT
1516 || GET_CODE (XEXP (x, 1)) == ASHIFT)
1518 COPY_ONCE (x);
1519 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1521 if (ch)
1523 if (GET_CODE (XEXP (x, 1)) == REG
1524 && GET_CODE (XEXP (x, 0)) == REG)
1526 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1528 COPY_ONCE (x);
1529 x = force_operand (x, 0);
1531 return x;
1533 if (memory_address_p (mode, x))
1534 return x;
1536 if (GET_CODE (XEXP (x, 0)) == REG
1537 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1538 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1539 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1541 rtx temp = gen_reg_rtx (Pmode);
1542 rtx val = force_operand (XEXP (x, 1), 0);
1543 emit_move_insn (temp, val);
1544 COPY_ONCE (x);
1545 XEXP (x, 1) = temp;
1546 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1547 && GET_CODE (XEXP (x, 0)) == REG)
1548 x = force_operand (x, 0);
1550 else if (GET_CODE (XEXP (x, 1)) == REG
1551 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1552 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1553 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1555 rtx temp = gen_reg_rtx (Pmode);
1556 rtx val = force_operand (XEXP (x, 0), 0);
1557 emit_move_insn (temp, val);
1558 COPY_ONCE (x);
1559 XEXP (x, 0) = temp;
1560 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1561 && GET_CODE (XEXP (x, 1)) == REG)
1562 x = force_operand (x, 0);
1566 return x;
1569 /* For eliding comparisons, we remember how the flags were set.
1570 FLAGS_COMPARE_OP0 and FLAGS_COMPARE_OP1 are remembered for a direct
1571 comparison, they take priority. FLAGS_OPERAND1 and FLAGS_OPERAND2
1572 are used in more cases, they are a fallback for comparisons against
1573 zero after a move or arithmetic insn.
1574 FLAGS_VALID is set to FLAGS_VALID_NO if we should not use any of
1575 these values. */
1577 static rtx flags_compare_op0, flags_compare_op1;
1578 static rtx flags_operand1, flags_operand2;
1579 static attr_flags_valid flags_valid = FLAGS_VALID_NO;
1581 /* Return a code other than UNKNOWN if we can elide a CODE comparison of
1582 OP0 with OP1. */
1584 rtx_code
1585 m68k_find_flags_value (rtx op0, rtx op1, rtx_code code)
1587 if (flags_compare_op0 != NULL_RTX)
1589 if (rtx_equal_p (op0, flags_compare_op0)
1590 && rtx_equal_p (op1, flags_compare_op1))
1591 return code;
1592 if (rtx_equal_p (op0, flags_compare_op1)
1593 && rtx_equal_p (op1, flags_compare_op0))
1594 return swap_condition (code);
1595 return UNKNOWN;
1598 machine_mode mode = GET_MODE (op0);
1599 if (op1 != CONST0_RTX (mode))
1600 return UNKNOWN;
1601 /* Comparisons against 0 with these two should have been optimized out. */
1602 gcc_assert (code != LTU && code != GEU);
1603 if (flags_valid == FLAGS_VALID_NOOV && (code == GT || code == LE))
1604 return UNKNOWN;
1605 if (rtx_equal_p (flags_operand1, op0) || rtx_equal_p (flags_operand2, op0))
1606 return (FLOAT_MODE_P (mode) ? code
1607 : code == GE ? PLUS : code == LT ? MINUS : code);
1608 /* See if we are testing whether the high part of a DImode value is
1609 positive or negative and we have the full value as a remembered
1610 operand. */
1611 if (code != GE && code != LT)
1612 return UNKNOWN;
1613 if (mode == SImode
1614 && flags_operand1 != NULL_RTX && GET_MODE (flags_operand1) == DImode
1615 && REG_P (flags_operand1) && REG_P (op0)
1616 && hard_regno_nregs (REGNO (flags_operand1), DImode) == 2
1617 && REGNO (flags_operand1) == REGNO (op0))
1618 return code == GE ? PLUS : MINUS;
1619 if (mode == SImode
1620 && flags_operand2 != NULL_RTX && GET_MODE (flags_operand2) == DImode
1621 && REG_P (flags_operand2) && REG_P (op0)
1622 && hard_regno_nregs (REGNO (flags_operand2), DImode) == 2
1623 && REGNO (flags_operand2) == REGNO (op0))
1624 return code == GE ? PLUS : MINUS;
1625 return UNKNOWN;
1628 /* Called through CC_STATUS_INIT, which is invoked by final whenever a
1629 label is encountered. */
1631 void
1632 m68k_init_cc ()
1634 flags_compare_op0 = flags_compare_op1 = NULL_RTX;
1635 flags_operand1 = flags_operand2 = NULL_RTX;
1636 flags_valid = FLAGS_VALID_NO;
1639 /* Update flags for a move operation with OPERANDS. Called for move
1640 operations where attr_flags_valid returns "set". */
1642 static void
1643 handle_flags_for_move (rtx *operands)
1645 flags_compare_op0 = flags_compare_op1 = NULL_RTX;
1646 if (!ADDRESS_REG_P (operands[0]))
1648 flags_valid = FLAGS_VALID_MOVE;
1649 flags_operand1 = side_effects_p (operands[0]) ? NULL_RTX : operands[0];
1650 if (side_effects_p (operands[1])
1651 /* ??? For mem->mem moves, this can discard the source as a
1652 valid compare operand. If you assume aligned moves, this
1653 is unnecessary, but in theory, we could have an unaligned
1654 move overwriting parts of its source. */
1655 || modified_in_p (operands[1], current_output_insn))
1656 flags_operand2 = NULL_RTX;
1657 else
1658 flags_operand2 = operands[1];
1659 return;
1661 if (flags_operand1 != NULL_RTX
1662 && modified_in_p (flags_operand1, current_output_insn))
1663 flags_operand1 = NULL_RTX;
1664 if (flags_operand2 != NULL_RTX
1665 && modified_in_p (flags_operand2, current_output_insn))
1666 flags_operand2 = NULL_RTX;
1669 /* Process INSN to remember flag operands if possible. */
1671 static void
1672 m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int)
1674 enum attr_flags_valid v = get_attr_flags_valid (insn);
1675 if (v == FLAGS_VALID_SET)
1676 return;
1677 /* Comparisons use FLAGS_VALID_SET, so we can be sure we need to clear these
1678 now. */
1679 flags_compare_op0 = flags_compare_op1 = NULL_RTX;
1681 if (v == FLAGS_VALID_NO)
1683 flags_operand1 = flags_operand2 = NULL_RTX;
1684 return;
1686 else if (v == FLAGS_VALID_UNCHANGED)
1688 if (flags_operand1 != NULL_RTX && modified_in_p (flags_operand1, insn))
1689 flags_operand1 = NULL_RTX;
1690 if (flags_operand2 != NULL_RTX && modified_in_p (flags_operand2, insn))
1691 flags_operand2 = NULL_RTX;
1692 return;
1695 flags_valid = v;
1696 rtx set = single_set (insn);
1697 rtx dest = SET_DEST (set);
1698 rtx src = SET_SRC (set);
1699 if (side_effects_p (dest))
1700 dest = NULL_RTX;
1702 switch (v)
1704 case FLAGS_VALID_YES:
1705 case FLAGS_VALID_NOOV:
1706 flags_operand1 = dest;
1707 flags_operand2 = NULL_RTX;
1708 break;
1709 case FLAGS_VALID_MOVE:
1710 /* fmoves to memory or data registers do not set the condition
1711 codes. Normal moves _do_ set the condition codes, but not in
1712 a way that is appropriate for comparison with 0, because -0.0
1713 would be treated as a negative nonzero number. Note that it
1714 isn't appropriate to conditionalize this restriction on
1715 HONOR_SIGNED_ZEROS because that macro merely indicates whether
1716 we care about the difference between -0.0 and +0.0. */
1717 if (dest != NULL_RTX
1718 && !FP_REG_P (dest)
1719 && (FP_REG_P (src)
1720 || GET_CODE (src) == FIX
1721 || FLOAT_MODE_P (GET_MODE (dest))))
1722 flags_operand1 = flags_operand2 = NULL_RTX;
1723 else
1725 flags_operand1 = dest;
1726 if (GET_MODE (src) != VOIDmode && !side_effects_p (src)
1727 && !modified_in_p (src, insn))
1728 flags_operand2 = src;
1729 else
1730 flags_operand2 = NULL_RTX;
1732 break;
1733 default:
1734 gcc_unreachable ();
1736 return;
1739 /* Output a dbCC; jCC sequence. Note we do not handle the
1740 floating point version of this sequence (Fdbcc).
1741 OPERANDS are as in the two peepholes. CODE is the code
1742 returned by m68k_output_branch_<mode>. */
1744 void
1745 output_dbcc_and_branch (rtx *operands, rtx_code code)
1747 switch (code)
1749 case EQ:
1750 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1751 break;
1753 case NE:
1754 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1755 break;
1757 case GT:
1758 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1759 break;
1761 case GTU:
1762 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1763 break;
1765 case LT:
1766 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1767 break;
1769 case LTU:
1770 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1771 break;
1773 case GE:
1774 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1775 break;
1777 case GEU:
1778 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1779 break;
1781 case LE:
1782 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1783 break;
1785 case LEU:
1786 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1787 break;
1789 case PLUS:
1790 output_asm_insn ("dbpl %0,%l1\n\tjle %l2", operands);
1791 break;
1793 case MINUS:
1794 output_asm_insn ("dbmi %0,%l1\n\tjle %l2", operands);
1795 break;
1797 default:
1798 gcc_unreachable ();
1801 /* If the decrement is to be done in SImode, then we have
1802 to compensate for the fact that dbcc decrements in HImode. */
1803 switch (GET_MODE (operands[0]))
1805 case E_SImode:
1806 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1807 break;
1809 case E_HImode:
1810 break;
1812 default:
1813 gcc_unreachable ();
1817 const char *
1818 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1820 rtx loperands[7];
1821 enum rtx_code op_code = GET_CODE (op);
1823 /* This does not produce a useful cc. */
1824 CC_STATUS_INIT;
1826 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1827 below. Swap the operands and change the op if these requirements
1828 are not fulfilled. */
1829 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1831 rtx tmp = operand1;
1833 operand1 = operand2;
1834 operand2 = tmp;
1835 op_code = swap_condition (op_code);
1837 loperands[0] = operand1;
1838 if (GET_CODE (operand1) == REG)
1839 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1840 else
1841 loperands[1] = adjust_address (operand1, SImode, 4);
1842 if (operand2 != const0_rtx)
1844 loperands[2] = operand2;
1845 if (GET_CODE (operand2) == REG)
1846 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1847 else
1848 loperands[3] = adjust_address (operand2, SImode, 4);
1850 loperands[4] = gen_label_rtx ();
1851 if (operand2 != const0_rtx)
1852 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1853 else
1855 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1856 output_asm_insn ("tst%.l %0", loperands);
1857 else
1858 output_asm_insn ("cmp%.w #0,%0", loperands);
1860 output_asm_insn ("jne %l4", loperands);
1862 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1863 output_asm_insn ("tst%.l %1", loperands);
1864 else
1865 output_asm_insn ("cmp%.w #0,%1", loperands);
1868 loperands[5] = dest;
1870 switch (op_code)
1872 case EQ:
1873 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1874 CODE_LABEL_NUMBER (loperands[4]));
1875 output_asm_insn ("seq %5", loperands);
1876 break;
1878 case NE:
1879 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1880 CODE_LABEL_NUMBER (loperands[4]));
1881 output_asm_insn ("sne %5", loperands);
1882 break;
1884 case GT:
1885 loperands[6] = gen_label_rtx ();
1886 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1887 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1888 CODE_LABEL_NUMBER (loperands[4]));
1889 output_asm_insn ("sgt %5", loperands);
1890 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1891 CODE_LABEL_NUMBER (loperands[6]));
1892 break;
1894 case GTU:
1895 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1896 CODE_LABEL_NUMBER (loperands[4]));
1897 output_asm_insn ("shi %5", loperands);
1898 break;
1900 case LT:
1901 loperands[6] = gen_label_rtx ();
1902 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1903 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1904 CODE_LABEL_NUMBER (loperands[4]));
1905 output_asm_insn ("slt %5", loperands);
1906 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1907 CODE_LABEL_NUMBER (loperands[6]));
1908 break;
1910 case LTU:
1911 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1912 CODE_LABEL_NUMBER (loperands[4]));
1913 output_asm_insn ("scs %5", loperands);
1914 break;
1916 case GE:
1917 loperands[6] = gen_label_rtx ();
1918 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1919 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1920 CODE_LABEL_NUMBER (loperands[4]));
1921 output_asm_insn ("sge %5", loperands);
1922 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1923 CODE_LABEL_NUMBER (loperands[6]));
1924 break;
1926 case GEU:
1927 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1928 CODE_LABEL_NUMBER (loperands[4]));
1929 output_asm_insn ("scc %5", loperands);
1930 break;
1932 case LE:
1933 loperands[6] = gen_label_rtx ();
1934 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1935 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1936 CODE_LABEL_NUMBER (loperands[4]));
1937 output_asm_insn ("sle %5", loperands);
1938 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1939 CODE_LABEL_NUMBER (loperands[6]));
1940 break;
1942 case LEU:
1943 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1944 CODE_LABEL_NUMBER (loperands[4]));
1945 output_asm_insn ("sls %5", loperands);
1946 break;
1948 default:
1949 gcc_unreachable ();
1951 return "";
1954 rtx_code
1955 m68k_output_btst (rtx countop, rtx dataop, rtx_code code, int signpos)
1957 rtx ops[2];
1958 ops[0] = countop;
1959 ops[1] = dataop;
1961 if (GET_CODE (countop) == CONST_INT)
1963 int count = INTVAL (countop);
1964 /* If COUNT is bigger than size of storage unit in use,
1965 advance to the containing unit of same size. */
1966 if (count > signpos)
1968 int offset = (count & ~signpos) / 8;
1969 count = count & signpos;
1970 ops[1] = dataop = adjust_address (dataop, QImode, offset);
1973 if (code == EQ || code == NE)
1975 if (count == 31)
1977 output_asm_insn ("tst%.l %1", ops);
1978 return code == EQ ? PLUS : MINUS;
1980 if (count == 15)
1982 output_asm_insn ("tst%.w %1", ops);
1983 return code == EQ ? PLUS : MINUS;
1985 if (count == 7)
1987 output_asm_insn ("tst%.b %1", ops);
1988 return code == EQ ? PLUS : MINUS;
1991 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1992 On some m68k variants unfortunately that's slower than btst.
1993 On 68000 and higher, that should also work for all HImode operands. */
1994 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1996 if (count == 3 && DATA_REG_P (ops[1]) && (code == EQ || code == NE))
1998 output_asm_insn ("move%.w %1,%%ccr", ops);
1999 return code == EQ ? PLUS : MINUS;
2001 if (count == 2 && DATA_REG_P (ops[1]) && (code == EQ || code == NE))
2003 output_asm_insn ("move%.w %1,%%ccr", ops);
2004 return code == EQ ? NE : EQ;
2006 /* count == 1 followed by bvc/bvs and
2007 count == 0 followed by bcc/bcs are also possible, but need
2008 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
2011 output_asm_insn ("btst %0,%1", ops);
2012 return code;
2015 /* Output a bftst instruction for a zero_extract with ZXOP0, ZXOP1 and ZXOP2
2016 operands. CODE is the code of the comparison, and we return the code to
2017 be actually used in the jump. */
2019 rtx_code
2020 m68k_output_bftst (rtx zxop0, rtx zxop1, rtx zxop2, rtx_code code)
2022 if (zxop1 == const1_rtx && GET_CODE (zxop2) == CONST_INT)
2024 int width = GET_CODE (zxop0) == REG ? 31 : 7;
2025 /* Pass 1000 as SIGNPOS argument so that btst will
2026 not think we are testing the sign bit for an `and'
2027 and assume that nonzero implies a negative result. */
2028 return m68k_output_btst (GEN_INT (width - INTVAL (zxop2)), zxop0, code, 1000);
2030 rtx ops[3] = { zxop0, zxop1, zxop2 };
2031 output_asm_insn ("bftst %0{%b2:%b1}", ops);
2032 return code;
2035 /* Return true if X is a legitimate base register. STRICT_P says
2036 whether we need strict checking. */
2038 bool
2039 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
2041 /* Allow SUBREG everywhere we allow REG. This results in better code. */
2042 if (!strict_p && GET_CODE (x) == SUBREG)
2043 x = SUBREG_REG (x);
2045 return (REG_P (x)
2046 && (strict_p
2047 ? REGNO_OK_FOR_BASE_P (REGNO (x))
2048 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
2051 /* Return true if X is a legitimate index register. STRICT_P says
2052 whether we need strict checking. */
2054 bool
2055 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
2057 if (!strict_p && GET_CODE (x) == SUBREG)
2058 x = SUBREG_REG (x);
2060 return (REG_P (x)
2061 && (strict_p
2062 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
2063 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
2066 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
2067 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
2068 ADDRESS if so. STRICT_P says whether we need strict checking. */
2070 static bool
2071 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
2073 int scale;
2075 /* Check for a scale factor. */
2076 scale = 1;
2077 if (TARGET_68020 || TARGET_COLDFIRE)
2079 if (GET_CODE (x) == MULT
2080 && GET_CODE (XEXP (x, 1)) == CONST_INT
2081 && (INTVAL (XEXP (x, 1)) == 2
2082 || INTVAL (XEXP (x, 1)) == 4
2083 || (INTVAL (XEXP (x, 1)) == 8
2084 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
2086 scale = INTVAL (XEXP (x, 1));
2087 x = XEXP (x, 0);
2089 /* LRA uses ASHIFT instead of MULT outside of MEM. */
2090 else if (GET_CODE (x) == ASHIFT
2091 && GET_CODE (XEXP (x, 1)) == CONST_INT
2092 && (INTVAL (XEXP (x, 1)) == 1
2093 || INTVAL (XEXP (x, 1)) == 2
2094 || (INTVAL (XEXP (x, 1)) == 3
2095 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
2097 scale = 1 << INTVAL (XEXP (x, 1));
2098 x = XEXP (x, 0);
2102 /* Check for a word extension. */
2103 if (!TARGET_COLDFIRE
2104 && GET_CODE (x) == SIGN_EXTEND
2105 && GET_MODE (XEXP (x, 0)) == HImode)
2106 x = XEXP (x, 0);
2108 if (m68k_legitimate_index_reg_p (x, strict_p))
2110 address->scale = scale;
2111 address->index = x;
2112 return true;
2115 return false;
2118 /* Return true if X is an illegitimate symbolic constant. */
2120 bool
2121 m68k_illegitimate_symbolic_constant_p (rtx x)
2123 rtx base, offset;
2125 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
2127 split_const (x, &base, &offset);
2128 if (GET_CODE (base) == SYMBOL_REF
2129 && !offset_within_block_p (base, INTVAL (offset)))
2130 return true;
2132 return m68k_tls_reference_p (x, false);
2135 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2137 static bool
2138 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2140 return m68k_illegitimate_symbolic_constant_p (x);
2143 /* Return true if X is a legitimate constant address that can reach
2144 bytes in the range [X, X + REACH). STRICT_P says whether we need
2145 strict checking. */
2147 static bool
2148 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
2150 rtx base, offset;
2152 if (!CONSTANT_ADDRESS_P (x))
2153 return false;
2155 if (flag_pic
2156 && !(strict_p && TARGET_PCREL)
2157 && symbolic_operand (x, VOIDmode))
2158 return false;
2160 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
2162 split_const (x, &base, &offset);
2163 if (GET_CODE (base) == SYMBOL_REF
2164 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
2165 return false;
2168 return !m68k_tls_reference_p (x, false);
2171 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
2172 labels will become jump tables. */
2174 static bool
2175 m68k_jump_table_ref_p (rtx x)
2177 if (GET_CODE (x) != LABEL_REF)
2178 return false;
2180 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
2181 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
2182 return true;
2184 insn = next_nonnote_insn (insn);
2185 return insn && JUMP_TABLE_DATA_P (insn);
2188 /* Return true if X is a legitimate address for values of mode MODE.
2189 STRICT_P says whether strict checking is needed. If the address
2190 is valid, describe its components in *ADDRESS. */
2192 static bool
2193 m68k_decompose_address (machine_mode mode, rtx x,
2194 bool strict_p, struct m68k_address *address)
2196 unsigned int reach;
2198 memset (address, 0, sizeof (*address));
2200 if (mode == BLKmode)
2201 reach = 1;
2202 else
2203 reach = GET_MODE_SIZE (mode);
2205 /* Check for (An) (mode 2). */
2206 if (m68k_legitimate_base_reg_p (x, strict_p))
2208 address->base = x;
2209 return true;
2212 /* Check for -(An) and (An)+ (modes 3 and 4). */
2213 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2214 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2216 address->code = GET_CODE (x);
2217 address->base = XEXP (x, 0);
2218 return true;
2221 /* Check for (d16,An) (mode 5). */
2222 if (GET_CODE (x) == PLUS
2223 && GET_CODE (XEXP (x, 1)) == CONST_INT
2224 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2225 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2227 address->base = XEXP (x, 0);
2228 address->offset = XEXP (x, 1);
2229 return true;
2232 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2233 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2234 addresses. */
2235 if (GET_CODE (x) == PLUS
2236 && XEXP (x, 0) == pic_offset_table_rtx)
2238 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2239 they are invalid in this context. */
2240 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2242 address->base = XEXP (x, 0);
2243 address->offset = XEXP (x, 1);
2244 return true;
2248 /* The ColdFire FPU only accepts addressing modes 2-5. */
2249 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2250 return false;
2252 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2253 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2254 All these modes are variations of mode 7. */
2255 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2257 address->offset = x;
2258 return true;
2261 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2262 tablejumps.
2264 ??? do_tablejump creates these addresses before placing the target
2265 label, so we have to assume that unplaced labels are jump table
2266 references. It seems unlikely that we would ever generate indexed
2267 accesses to unplaced labels in other cases. Do not accept it in
2268 PIC mode, since the label address will need to be loaded from memory. */
2269 if (GET_CODE (x) == PLUS
2270 && !flag_pic
2271 && m68k_jump_table_ref_p (XEXP (x, 1))
2272 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2274 address->offset = XEXP (x, 1);
2275 return true;
2278 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2279 (bd,An,Xn.SIZE*SCALE) addresses. */
2281 if (TARGET_68020)
2283 /* Check for a nonzero base displacement. */
2284 if (GET_CODE (x) == PLUS
2285 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2287 address->offset = XEXP (x, 1);
2288 x = XEXP (x, 0);
2291 /* Check for a suppressed index register. */
2292 if (m68k_legitimate_base_reg_p (x, strict_p))
2294 address->base = x;
2295 return true;
2298 /* Check for a suppressed base register. Do not allow this case
2299 for non-symbolic offsets as it effectively gives gcc freedom
2300 to treat data registers as base registers, which can generate
2301 worse code. */
2302 if (address->offset
2303 && symbolic_operand (address->offset, VOIDmode)
2304 && m68k_decompose_index (x, strict_p, address))
2305 return true;
2307 else
2309 /* Check for a nonzero base displacement. */
2310 if (GET_CODE (x) == PLUS
2311 && GET_CODE (XEXP (x, 1)) == CONST_INT
2312 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2314 address->offset = XEXP (x, 1);
2315 x = XEXP (x, 0);
2319 /* We now expect the sum of a base and an index. */
2320 if (GET_CODE (x) == PLUS)
2322 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2323 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2325 address->base = XEXP (x, 0);
2326 return true;
2329 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2330 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2332 address->base = XEXP (x, 1);
2333 return true;
2336 return false;
2339 /* Return true if X is a legitimate address for values of mode MODE.
2340 STRICT_P says whether strict checking is needed. */
2342 bool
2343 m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p, code_helper)
2345 struct m68k_address address;
2347 return m68k_decompose_address (mode, x, strict_p, &address);
2350 /* Return true if X is a memory, describing its address in ADDRESS if so.
2351 Apply strict checking if called during or after reload. */
2353 static bool
2354 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2356 return (MEM_P (x)
2357 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2358 (reload_in_progress || lra_in_progress
2359 || reload_completed),
2360 address));
2363 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2365 bool
2366 m68k_legitimate_constant_p (machine_mode mode, rtx x)
2368 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2371 /* Return true if X matches the 'Q' constraint. It must be a memory
2372 with a base address and no constant offset or index. */
2374 bool
2375 m68k_matches_q_p (rtx x)
2377 struct m68k_address address;
2379 return (m68k_legitimate_mem_p (x, &address)
2380 && address.code == UNKNOWN
2381 && address.base
2382 && !address.offset
2383 && !address.index);
2386 /* Return true if X matches the 'U' constraint. It must be a base address
2387 with a constant offset and no index. */
2389 bool
2390 m68k_matches_u_p (rtx x)
2392 struct m68k_address address;
2394 return (m68k_legitimate_mem_p (x, &address)
2395 && address.code == UNKNOWN
2396 && address.base
2397 && address.offset
2398 && !address.index);
2401 /* Return GOT pointer. */
2403 static rtx
2404 m68k_get_gp (void)
2406 if (pic_offset_table_rtx == NULL_RTX)
2407 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2409 crtl->uses_pic_offset_table = 1;
2411 return pic_offset_table_rtx;
2414 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2415 wrappers. */
2416 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2417 RELOC_TLSIE, RELOC_TLSLE };
2419 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2421 /* Wrap symbol X into unspec representing relocation RELOC.
2422 BASE_REG - register that should be added to the result.
2423 TEMP_REG - if non-null, temporary register. */
2425 static rtx
2426 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2428 bool use_x_p;
2430 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2432 if (TARGET_COLDFIRE && use_x_p)
2433 /* When compiling with -mx{got, tls} switch the code will look like this:
2435 move.l <X>@<RELOC>,<TEMP_REG>
2436 add.l <BASE_REG>,<TEMP_REG> */
2438 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2439 to put @RELOC after reference. */
2440 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2441 UNSPEC_RELOC32);
2442 x = gen_rtx_CONST (Pmode, x);
2444 if (temp_reg == NULL)
2446 gcc_assert (can_create_pseudo_p ());
2447 temp_reg = gen_reg_rtx (Pmode);
2450 emit_move_insn (temp_reg, x);
2451 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2452 x = temp_reg;
2454 else
2456 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2457 UNSPEC_RELOC16);
2458 x = gen_rtx_CONST (Pmode, x);
2460 x = gen_rtx_PLUS (Pmode, base_reg, x);
2463 return x;
2466 /* Helper for m68k_unwrap_symbol.
2467 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2468 sets *RELOC_PTR to relocation type for the symbol. */
2470 static rtx
2471 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2472 enum m68k_reloc *reloc_ptr)
2474 if (GET_CODE (orig) == CONST)
2476 rtx x;
2477 enum m68k_reloc dummy;
2479 x = XEXP (orig, 0);
2481 if (reloc_ptr == NULL)
2482 reloc_ptr = &dummy;
2484 /* Handle an addend. */
2485 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2486 && CONST_INT_P (XEXP (x, 1)))
2487 x = XEXP (x, 0);
2489 if (GET_CODE (x) == UNSPEC)
2491 switch (XINT (x, 1))
2493 case UNSPEC_RELOC16:
2494 orig = XVECEXP (x, 0, 0);
2495 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2496 break;
2498 case UNSPEC_RELOC32:
2499 if (unwrap_reloc32_p)
2501 orig = XVECEXP (x, 0, 0);
2502 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2504 break;
2506 default:
2507 break;
2512 return orig;
2515 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2516 UNSPEC_RELOC32 wrappers. */
2519 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2521 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2524 /* Adjust decorated address operand before outputing assembler for it. */
2526 static void
2527 m68k_adjust_decorated_operand (rtx op)
2529 /* Combine and, possibly, other optimizations may do good job
2530 converting
2531 (const (unspec [(symbol)]))
2532 into
2533 (const (plus (unspec [(symbol)])
2534 (const_int N))).
2535 The problem with this is emitting @TLS or @GOT decorations.
2536 The decoration is emitted when processing (unspec), so the
2537 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2539 It seems that the easiest solution to this is to convert such
2540 operands to
2541 (const (unspec [(plus (symbol)
2542 (const_int N))])).
2543 Note, that the top level of operand remains intact, so we don't have
2544 to patch up anything outside of the operand. */
2546 subrtx_var_iterator::array_type array;
2547 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2549 rtx x = *iter;
2550 if (m68k_unwrap_symbol (x, true) != x)
2552 rtx plus;
2554 gcc_assert (GET_CODE (x) == CONST);
2555 plus = XEXP (x, 0);
2557 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2559 rtx unspec;
2560 rtx addend;
2562 unspec = XEXP (plus, 0);
2563 gcc_assert (GET_CODE (unspec) == UNSPEC);
2564 addend = XEXP (plus, 1);
2565 gcc_assert (CONST_INT_P (addend));
2567 /* We now have all the pieces, rearrange them. */
2569 /* Move symbol to plus. */
2570 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2572 /* Move plus inside unspec. */
2573 XVECEXP (unspec, 0, 0) = plus;
2575 /* Move unspec to top level of const. */
2576 XEXP (x, 0) = unspec;
2578 iter.skip_subrtxes ();
2583 /* Prescan insn before outputing assembler for it. */
2585 void
2586 m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
2587 rtx *operands, int n_operands)
2589 int i;
2591 for (i = 0; i < n_operands; ++i)
2592 m68k_adjust_decorated_operand (operands[i]);
2595 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2596 If REG is non-null, use it; generate new pseudo otherwise. */
2598 static rtx
2599 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2601 rtx_insn *insn;
2603 if (reg == NULL_RTX)
2605 gcc_assert (can_create_pseudo_p ());
2606 reg = gen_reg_rtx (Pmode);
2609 insn = emit_move_insn (reg, x);
2610 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2611 by loop. */
2612 set_unique_reg_note (insn, REG_EQUAL, orig);
2614 return reg;
2617 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2618 GOT slot. */
2620 static rtx
2621 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2623 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2625 x = gen_rtx_MEM (Pmode, x);
2626 MEM_READONLY_P (x) = 1;
2628 return x;
2631 /* Legitimize PIC addresses. If the address is already
2632 position-independent, we return ORIG. Newly generated
2633 position-independent addresses go to REG. If we need more
2634 than one register, we lose.
2636 An address is legitimized by making an indirect reference
2637 through the Global Offset Table with the name of the symbol
2638 used as an offset.
2640 The assembler and linker are responsible for placing the
2641 address of the symbol in the GOT. The function prologue
2642 is responsible for initializing a5 to the starting address
2643 of the GOT.
2645 The assembler is also responsible for translating a symbol name
2646 into a constant displacement from the start of the GOT.
2648 A quick example may make things a little clearer:
2650 When not generating PIC code to store the value 12345 into _foo
2651 we would generate the following code:
2653 movel #12345, _foo
2655 When generating PIC two transformations are made. First, the compiler
2656 loads the address of foo into a register. So the first transformation makes:
2658 lea _foo, a0
2659 movel #12345, a0@
2661 The code in movsi will intercept the lea instruction and call this
2662 routine which will transform the instructions into:
2664 movel a5@(_foo:w), a0
2665 movel #12345, a0@
2668 That (in a nutshell) is how *all* symbol and label references are
2669 handled. */
2672 legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
2673 rtx reg)
2675 rtx pic_ref = orig;
2677 /* First handle a simple SYMBOL_REF or LABEL_REF */
2678 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2680 gcc_assert (reg);
2682 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2683 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2685 else if (GET_CODE (orig) == CONST)
2687 rtx base;
2689 /* Make sure this has not already been legitimized. */
2690 if (m68k_unwrap_symbol (orig, true) != orig)
2691 return orig;
2693 gcc_assert (reg);
2695 /* legitimize both operands of the PLUS */
2696 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2698 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2699 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2700 base == reg ? 0 : reg);
2702 if (GET_CODE (orig) == CONST_INT)
2703 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2704 else
2705 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2708 return pic_ref;
2711 /* The __tls_get_addr symbol. */
2712 static GTY(()) rtx m68k_tls_get_addr;
2714 /* Return SYMBOL_REF for __tls_get_addr. */
2716 static rtx
2717 m68k_get_tls_get_addr (void)
2719 if (m68k_tls_get_addr == NULL_RTX)
2720 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2722 return m68k_tls_get_addr;
2725 /* Return libcall result in A0 instead of usual D0. */
2726 static bool m68k_libcall_value_in_a0_p = false;
2728 /* Emit instruction sequence that calls __tls_get_addr. X is
2729 the TLS symbol we are referencing and RELOC is the symbol type to use
2730 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2731 emitted. A pseudo register with result of __tls_get_addr call is
2732 returned. */
2734 static rtx
2735 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2737 rtx a0;
2738 rtx_insn *insns;
2739 rtx dest;
2741 /* Emit the call sequence. */
2742 start_sequence ();
2744 /* FIXME: Unfortunately, emit_library_call_value does not
2745 consider (plus (%a5) (const (unspec))) to be a good enough
2746 operand for push, so it forces it into a register. The bad
2747 thing about this is that combiner, due to copy propagation and other
2748 optimizations, sometimes cannot later fix this. As a consequence,
2749 additional register may be allocated resulting in a spill.
2750 For reference, see args processing loops in
2751 calls.cc:emit_library_call_value_1.
2752 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2753 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2755 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2756 is the simpliest way of generating a call. The difference between
2757 __tls_get_addr() and libcall is that the result is returned in D0
2758 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2759 which temporarily switches returning the result to A0. */
2761 m68k_libcall_value_in_a0_p = true;
2762 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2763 Pmode, x, Pmode);
2764 m68k_libcall_value_in_a0_p = false;
2766 insns = get_insns ();
2767 end_sequence ();
2769 gcc_assert (can_create_pseudo_p ());
2770 dest = gen_reg_rtx (Pmode);
2771 emit_libcall_block (insns, dest, a0, eqv);
2773 return dest;
2776 /* The __tls_get_addr symbol. */
2777 static GTY(()) rtx m68k_read_tp;
2779 /* Return SYMBOL_REF for __m68k_read_tp. */
2781 static rtx
2782 m68k_get_m68k_read_tp (void)
2784 if (m68k_read_tp == NULL_RTX)
2785 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2787 return m68k_read_tp;
2790 /* Emit instruction sequence that calls __m68k_read_tp.
2791 A pseudo register with result of __m68k_read_tp call is returned. */
2793 static rtx
2794 m68k_call_m68k_read_tp (void)
2796 rtx a0;
2797 rtx eqv;
2798 rtx_insn *insns;
2799 rtx dest;
2801 start_sequence ();
2803 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2804 is the simpliest way of generating a call. The difference between
2805 __m68k_read_tp() and libcall is that the result is returned in D0
2806 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2807 which temporarily switches returning the result to A0. */
2809 /* Emit the call sequence. */
2810 m68k_libcall_value_in_a0_p = true;
2811 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2812 Pmode);
2813 m68k_libcall_value_in_a0_p = false;
2814 insns = get_insns ();
2815 end_sequence ();
2817 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2818 share the m68k_read_tp result with other IE/LE model accesses. */
2819 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2821 gcc_assert (can_create_pseudo_p ());
2822 dest = gen_reg_rtx (Pmode);
2823 emit_libcall_block (insns, dest, a0, eqv);
2825 return dest;
2828 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2829 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2830 ColdFire. */
2833 m68k_legitimize_tls_address (rtx orig)
2835 switch (SYMBOL_REF_TLS_MODEL (orig))
2837 case TLS_MODEL_GLOBAL_DYNAMIC:
2838 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2839 break;
2841 case TLS_MODEL_LOCAL_DYNAMIC:
2843 rtx eqv;
2844 rtx a0;
2845 rtx x;
2847 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2848 share the LDM result with other LD model accesses. */
2849 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2850 UNSPEC_RELOC32);
2852 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2854 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2856 if (can_create_pseudo_p ())
2857 x = m68k_move_to_reg (x, orig, NULL_RTX);
2859 orig = x;
2860 break;
2863 case TLS_MODEL_INITIAL_EXEC:
2865 rtx a0;
2866 rtx x;
2868 a0 = m68k_call_m68k_read_tp ();
2870 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2871 x = gen_rtx_PLUS (Pmode, x, a0);
2873 if (can_create_pseudo_p ())
2874 x = m68k_move_to_reg (x, orig, NULL_RTX);
2876 orig = x;
2877 break;
2880 case TLS_MODEL_LOCAL_EXEC:
2882 rtx a0;
2883 rtx x;
2885 a0 = m68k_call_m68k_read_tp ();
2887 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2889 if (can_create_pseudo_p ())
2890 x = m68k_move_to_reg (x, orig, NULL_RTX);
2892 orig = x;
2893 break;
2896 default:
2897 gcc_unreachable ();
2900 return orig;
2903 /* Return true if X is a TLS symbol. */
2905 static bool
2906 m68k_tls_symbol_p (rtx x)
2908 if (!TARGET_HAVE_TLS)
2909 return false;
2911 if (GET_CODE (x) != SYMBOL_REF)
2912 return false;
2914 return SYMBOL_REF_TLS_MODEL (x) != 0;
2917 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2918 though illegitimate one.
2919 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2921 bool
2922 m68k_tls_reference_p (rtx x, bool legitimate_p)
2924 if (!TARGET_HAVE_TLS)
2925 return false;
2927 if (!legitimate_p)
2929 subrtx_var_iterator::array_type array;
2930 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2932 rtx x = *iter;
2934 /* Note: this is not the same as m68k_tls_symbol_p. */
2935 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2936 return true;
2938 /* Don't recurse into legitimate TLS references. */
2939 if (m68k_tls_reference_p (x, true))
2940 iter.skip_subrtxes ();
2942 return false;
2944 else
2946 enum m68k_reloc reloc = RELOC_GOT;
2948 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2949 && TLS_RELOC_P (reloc));
2955 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2957 /* Return the type of move that should be used for integer I. */
2959 M68K_CONST_METHOD
2960 m68k_const_method (HOST_WIDE_INT i)
2962 unsigned u;
2964 if (USE_MOVQ (i))
2965 return MOVQ;
2967 /* The ColdFire doesn't have byte or word operations. */
2968 /* FIXME: This may not be useful for the m68060 either. */
2969 if (!TARGET_COLDFIRE)
2971 /* if -256 < N < 256 but N is not in range for a moveq
2972 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2973 if (USE_MOVQ (i ^ 0xff))
2974 return NOTB;
2975 /* Likewise, try with not.w */
2976 if (USE_MOVQ (i ^ 0xffff))
2977 return NOTW;
2978 /* This is the only value where neg.w is useful */
2979 if (i == -65408)
2980 return NEGW;
2983 /* Try also with swap. */
2984 u = i;
2985 if (USE_MOVQ ((u >> 16) | (u << 16)))
2986 return SWAP;
2988 if (TARGET_ISAB)
2990 /* Try using MVZ/MVS with an immediate value to load constants. */
2991 if (i >= 0 && i <= 65535)
2992 return MVZ;
2993 if (i >= -32768 && i <= 32767)
2994 return MVS;
2997 /* Otherwise, use move.l */
2998 return MOVL;
3001 /* Return the cost of moving constant I into a data register. */
3003 static int
3004 const_int_cost (HOST_WIDE_INT i)
3006 switch (m68k_const_method (i))
3008 case MOVQ:
3009 /* Constants between -128 and 127 are cheap due to moveq. */
3010 return 0;
3011 case MVZ:
3012 case MVS:
3013 case NOTB:
3014 case NOTW:
3015 case NEGW:
3016 case SWAP:
3017 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
3018 return 1;
3019 case MOVL:
3020 return 2;
3021 default:
3022 gcc_unreachable ();
3026 static bool
3027 m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
3028 int opno ATTRIBUTE_UNUSED,
3029 int *total, bool speed ATTRIBUTE_UNUSED)
3031 int code = GET_CODE (x);
3033 switch (code)
3035 case CONST_INT:
3036 /* Constant zero is super cheap due to clr instruction. */
3037 if (x == const0_rtx)
3038 *total = 0;
3039 else
3040 *total = const_int_cost (INTVAL (x));
3041 return true;
3043 case CONST:
3044 case LABEL_REF:
3045 case SYMBOL_REF:
3046 *total = 3;
3047 return true;
3049 case CONST_DOUBLE:
3050 /* Make 0.0 cheaper than other floating constants to
3051 encourage creating tstsf and tstdf insns. */
3052 if ((GET_RTX_CLASS (outer_code) == RTX_COMPARE
3053 || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE)
3054 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
3055 *total = 4;
3056 else
3057 *total = 5;
3058 return true;
3060 /* These are vaguely right for a 68020. */
3061 /* The costs for long multiply have been adjusted to work properly
3062 in synth_mult on the 68020, relative to an average of the time
3063 for add and the time for shift, taking away a little more because
3064 sometimes move insns are needed. */
3065 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
3066 terms. */
3067 #define MULL_COST \
3068 (TUNE_68060 ? 2 \
3069 : TUNE_68040 ? 5 \
3070 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
3071 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
3072 : TUNE_CFV2 ? 8 \
3073 : TARGET_COLDFIRE ? 3 : 13)
3075 #define MULW_COST \
3076 (TUNE_68060 ? 2 \
3077 : TUNE_68040 ? 3 \
3078 : TUNE_68000_10 ? 5 \
3079 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
3080 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
3081 : TUNE_CFV2 ? 8 \
3082 : TARGET_COLDFIRE ? 2 : 8)
3084 #define DIVW_COST \
3085 (TARGET_CF_HWDIV ? 11 \
3086 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3088 case PLUS:
3089 /* An lea costs about three times as much as a simple add. */
3090 if (mode == SImode
3091 && GET_CODE (XEXP (x, 1)) == REG
3092 && ((GET_CODE (XEXP (x, 0)) == MULT
3093 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
3094 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3095 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
3096 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
3097 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
3098 || (GET_CODE (XEXP (x, 0)) == ASHIFT
3099 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
3100 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3101 && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1))
3102 <= 3))))
3104 /* lea an@(dx:l:i),am */
3105 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
3106 return true;
3108 return false;
3110 case ASHIFT:
3111 case ASHIFTRT:
3112 case LSHIFTRT:
3113 if (TUNE_68060)
3115 *total = COSTS_N_INSNS(1);
3116 return true;
3118 if (TUNE_68000_10)
3120 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3122 if (INTVAL (XEXP (x, 1)) < 16)
3123 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
3124 else
3125 /* We're using clrw + swap for these cases. */
3126 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
3128 else
3129 *total = COSTS_N_INSNS (10); /* Worst case. */
3130 return true;
3132 /* A shift by a big integer takes an extra instruction. */
3133 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3134 && (INTVAL (XEXP (x, 1)) == 16))
3136 *total = COSTS_N_INSNS (2); /* clrw;swap */
3137 return true;
3139 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3140 && !(INTVAL (XEXP (x, 1)) > 0
3141 && INTVAL (XEXP (x, 1)) <= 8))
3143 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3144 return true;
3146 return false;
3148 case MULT:
3149 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3150 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
3151 && mode == SImode)
3152 *total = COSTS_N_INSNS (MULW_COST);
3153 else if (mode == QImode || mode == HImode)
3154 *total = COSTS_N_INSNS (MULW_COST);
3155 else
3156 *total = COSTS_N_INSNS (MULL_COST);
3157 return true;
3159 case DIV:
3160 case UDIV:
3161 case MOD:
3162 case UMOD:
3163 if (mode == QImode || mode == HImode)
3164 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
3165 else if (TARGET_CF_HWDIV)
3166 *total = COSTS_N_INSNS (18);
3167 else
3168 *total = COSTS_N_INSNS (43); /* div.l */
3169 return true;
3171 case ZERO_EXTRACT:
3172 if (GET_RTX_CLASS (outer_code) == RTX_COMPARE
3173 || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE)
3174 *total = 0;
3175 return false;
3177 default:
3178 return false;
3182 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
3183 OPERANDS[0]. */
3185 static const char *
3186 output_move_const_into_data_reg (rtx *operands)
3188 HOST_WIDE_INT i;
3190 i = INTVAL (operands[1]);
3191 switch (m68k_const_method (i))
3193 case MVZ:
3194 return "mvzw %1,%0";
3195 case MVS:
3196 return "mvsw %1,%0";
3197 case MOVQ:
3198 return "moveq %1,%0";
3199 case NOTB:
3200 CC_STATUS_INIT;
3201 operands[1] = GEN_INT (i ^ 0xff);
3202 return "moveq %1,%0\n\tnot%.b %0";
3203 case NOTW:
3204 CC_STATUS_INIT;
3205 operands[1] = GEN_INT (i ^ 0xffff);
3206 return "moveq %1,%0\n\tnot%.w %0";
3207 case NEGW:
3208 CC_STATUS_INIT;
3209 return "moveq #-128,%0\n\tneg%.w %0";
3210 case SWAP:
3212 unsigned u = i;
3214 operands[1] = GEN_INT ((u << 16) | (u >> 16));
3215 return "moveq %1,%0\n\tswap %0";
3217 case MOVL:
3218 return "move%.l %1,%0";
3219 default:
3220 gcc_unreachable ();
3224 /* Return true if I can be handled by ISA B's mov3q instruction. */
3226 bool
3227 valid_mov3q_const (HOST_WIDE_INT i)
3229 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3232 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3233 I is the value of OPERANDS[1]. */
3235 static const char *
3236 output_move_simode_const (rtx *operands)
3238 rtx dest;
3239 HOST_WIDE_INT src;
3241 dest = operands[0];
3242 src = INTVAL (operands[1]);
3243 if (src == 0
3244 && (DATA_REG_P (dest) || MEM_P (dest))
3245 /* clr insns on 68000 read before writing. */
3246 && ((TARGET_68010 || TARGET_COLDFIRE)
3247 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3248 return "clr%.l %0";
3249 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3250 return "mov3q%.l %1,%0";
3251 else if (src == 0 && ADDRESS_REG_P (dest))
3252 return "sub%.l %0,%0";
3253 else if (DATA_REG_P (dest))
3254 return output_move_const_into_data_reg (operands);
3255 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3257 if (valid_mov3q_const (src))
3258 return "mov3q%.l %1,%0";
3259 return "move%.w %1,%0";
3261 else if (MEM_P (dest)
3262 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3263 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3264 && IN_RANGE (src, -0x8000, 0x7fff))
3266 if (valid_mov3q_const (src))
3267 return "mov3q%.l %1,%-";
3268 return "pea %a1";
3270 return "move%.l %1,%0";
3273 const char *
3274 output_move_simode (rtx *operands)
3276 handle_flags_for_move (operands);
3278 if (GET_CODE (operands[1]) == CONST_INT)
3279 return output_move_simode_const (operands);
3280 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3281 || GET_CODE (operands[1]) == CONST)
3282 && push_operand (operands[0], SImode))
3283 return "pea %a1";
3284 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3285 || GET_CODE (operands[1]) == CONST)
3286 && ADDRESS_REG_P (operands[0]))
3287 return "lea %a1,%0";
3288 return "move%.l %1,%0";
3291 const char *
3292 output_move_himode (rtx *operands)
3294 if (GET_CODE (operands[1]) == CONST_INT)
3296 if (operands[1] == const0_rtx
3297 && (DATA_REG_P (operands[0])
3298 || GET_CODE (operands[0]) == MEM)
3299 /* clr insns on 68000 read before writing. */
3300 && ((TARGET_68010 || TARGET_COLDFIRE)
3301 || !(GET_CODE (operands[0]) == MEM
3302 && MEM_VOLATILE_P (operands[0]))))
3303 return "clr%.w %0";
3304 else if (operands[1] == const0_rtx
3305 && ADDRESS_REG_P (operands[0]))
3306 return "sub%.l %0,%0";
3307 else if (DATA_REG_P (operands[0])
3308 && INTVAL (operands[1]) < 128
3309 && INTVAL (operands[1]) >= -128)
3310 return "moveq %1,%0";
3311 else if (INTVAL (operands[1]) < 0x8000
3312 && INTVAL (operands[1]) >= -0x8000)
3313 return "move%.w %1,%0";
3315 else if (CONSTANT_P (operands[1]))
3316 gcc_unreachable ();
3317 return "move%.w %1,%0";
3320 const char *
3321 output_move_qimode (rtx *operands)
3323 handle_flags_for_move (operands);
3325 /* 68k family always modifies the stack pointer by at least 2, even for
3326 byte pushes. The 5200 (ColdFire) does not do this. */
3328 /* This case is generated by pushqi1 pattern now. */
3329 gcc_assert (!(GET_CODE (operands[0]) == MEM
3330 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3331 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3332 && ! ADDRESS_REG_P (operands[1])
3333 && ! TARGET_COLDFIRE));
3335 /* clr and st insns on 68000 read before writing. */
3336 if (!ADDRESS_REG_P (operands[0])
3337 && ((TARGET_68010 || TARGET_COLDFIRE)
3338 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3340 if (operands[1] == const0_rtx)
3341 return "clr%.b %0";
3342 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3343 && GET_CODE (operands[1]) == CONST_INT
3344 && (INTVAL (operands[1]) & 255) == 255)
3346 CC_STATUS_INIT;
3347 return "st %0";
3350 if (GET_CODE (operands[1]) == CONST_INT
3351 && DATA_REG_P (operands[0])
3352 && INTVAL (operands[1]) < 128
3353 && INTVAL (operands[1]) >= -128)
3354 return "moveq %1,%0";
3355 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3356 return "sub%.l %0,%0";
3357 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3358 gcc_unreachable ();
3359 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3360 from address registers. */
3361 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3363 if (ADDRESS_REG_P (operands[1]))
3364 CC_STATUS_INIT;
3365 return "move%.w %1,%0";
3367 return "move%.b %1,%0";
3370 const char *
3371 output_move_stricthi (rtx *operands)
3373 if (operands[1] == const0_rtx
3374 /* clr insns on 68000 read before writing. */
3375 && ((TARGET_68010 || TARGET_COLDFIRE)
3376 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3377 return "clr%.w %0";
3378 return "move%.w %1,%0";
3381 const char *
3382 output_move_strictqi (rtx *operands)
3384 if (operands[1] == const0_rtx
3385 /* clr insns on 68000 read before writing. */
3386 && ((TARGET_68010 || TARGET_COLDFIRE)
3387 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3388 return "clr%.b %0";
3389 return "move%.b %1,%0";
3392 /* Return the best assembler insn template
3393 for moving operands[1] into operands[0] as a fullword. */
3395 static const char *
3396 singlemove_string (rtx *operands)
3398 if (GET_CODE (operands[1]) == CONST_INT)
3399 return output_move_simode_const (operands);
3400 return "move%.l %1,%0";
3404 /* Output assembler or rtl code to perform a doubleword move insn
3405 with operands OPERANDS.
3406 Pointers to 3 helper functions should be specified:
3407 HANDLE_REG_ADJUST to adjust a register by a small value,
3408 HANDLE_COMPADR to compute an address and
3409 HANDLE_MOVSI to move 4 bytes. */
3411 static void
3412 handle_move_double (rtx operands[2],
3413 void (*handle_reg_adjust) (rtx, int),
3414 void (*handle_compadr) (rtx [2]),
3415 void (*handle_movsi) (rtx [2]))
3417 enum
3419 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3420 } optype0, optype1;
3421 rtx latehalf[2];
3422 rtx middlehalf[2];
3423 rtx xops[2];
3424 rtx addreg0 = 0, addreg1 = 0;
3425 int dest_overlapped_low = 0;
3426 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3428 middlehalf[0] = 0;
3429 middlehalf[1] = 0;
3431 /* First classify both operands. */
3433 if (REG_P (operands[0]))
3434 optype0 = REGOP;
3435 else if (offsettable_memref_p (operands[0]))
3436 optype0 = OFFSOP;
3437 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3438 optype0 = POPOP;
3439 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3440 optype0 = PUSHOP;
3441 else if (GET_CODE (operands[0]) == MEM)
3442 optype0 = MEMOP;
3443 else
3444 optype0 = RNDOP;
3446 if (REG_P (operands[1]))
3447 optype1 = REGOP;
3448 else if (CONSTANT_P (operands[1]))
3449 optype1 = CNSTOP;
3450 else if (offsettable_memref_p (operands[1]))
3451 optype1 = OFFSOP;
3452 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3453 optype1 = POPOP;
3454 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3455 optype1 = PUSHOP;
3456 else if (GET_CODE (operands[1]) == MEM)
3457 optype1 = MEMOP;
3458 else
3459 optype1 = RNDOP;
3461 /* Check for the cases that the operand constraints are not supposed
3462 to allow to happen. Generating code for these cases is
3463 painful. */
3464 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3466 /* If one operand is decrementing and one is incrementing
3467 decrement the former register explicitly
3468 and change that operand into ordinary indexing. */
3470 if (optype0 == PUSHOP && optype1 == POPOP)
3472 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3474 handle_reg_adjust (operands[0], -size);
3476 if (GET_MODE (operands[1]) == XFmode)
3477 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3478 else if (GET_MODE (operands[0]) == DFmode)
3479 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3480 else
3481 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3482 optype0 = OFFSOP;
3484 if (optype0 == POPOP && optype1 == PUSHOP)
3486 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3488 handle_reg_adjust (operands[1], -size);
3490 if (GET_MODE (operands[1]) == XFmode)
3491 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3492 else if (GET_MODE (operands[1]) == DFmode)
3493 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3494 else
3495 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3496 optype1 = OFFSOP;
3499 /* If an operand is an unoffsettable memory ref, find a register
3500 we can increment temporarily to make it refer to the second word. */
3502 if (optype0 == MEMOP)
3503 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3505 if (optype1 == MEMOP)
3506 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3508 /* Ok, we can do one word at a time.
3509 Normally we do the low-numbered word first,
3510 but if either operand is autodecrementing then we
3511 do the high-numbered word first.
3513 In either case, set up in LATEHALF the operands to use
3514 for the high-numbered word and in some cases alter the
3515 operands in OPERANDS to be suitable for the low-numbered word. */
3517 if (size == 12)
3519 if (optype0 == REGOP)
3521 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3522 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3524 else if (optype0 == OFFSOP)
3526 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3527 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3529 else
3531 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3532 latehalf[0] = adjust_address (operands[0], SImode, 0);
3535 if (optype1 == REGOP)
3537 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3538 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3540 else if (optype1 == OFFSOP)
3542 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3543 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3545 else if (optype1 == CNSTOP)
3547 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3549 long l[3];
3551 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3552 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
3553 operands[1] = GEN_INT (l[0]);
3554 middlehalf[1] = GEN_INT (l[1]);
3555 latehalf[1] = GEN_INT (l[2]);
3557 else
3559 /* No non-CONST_DOUBLE constant should ever appear
3560 here. */
3561 gcc_assert (!CONSTANT_P (operands[1]));
3564 else
3566 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3567 latehalf[1] = adjust_address (operands[1], SImode, 0);
3570 else
3571 /* size is not 12: */
3573 if (optype0 == REGOP)
3574 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3575 else if (optype0 == OFFSOP)
3576 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3577 else
3578 latehalf[0] = adjust_address (operands[0], SImode, 0);
3580 if (optype1 == REGOP)
3581 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3582 else if (optype1 == OFFSOP)
3583 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3584 else if (optype1 == CNSTOP)
3585 split_double (operands[1], &operands[1], &latehalf[1]);
3586 else
3587 latehalf[1] = adjust_address (operands[1], SImode, 0);
3590 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3591 word first. We should use the adjusted operand 1 (which is N+4(REG))
3592 for the low word as well, to compensate for the first decrement of
3593 REG. */
3594 if (optype0 == PUSHOP
3595 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3596 operands[1] = middlehalf[1] = latehalf[1];
3598 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3599 if the upper part of reg N does not appear in the MEM, arrange to
3600 emit the move late-half first. Otherwise, compute the MEM address
3601 into the upper part of N and use that as a pointer to the memory
3602 operand. */
3603 if (optype0 == REGOP
3604 && (optype1 == OFFSOP || optype1 == MEMOP))
3606 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3608 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3609 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3611 /* If both halves of dest are used in the src memory address,
3612 compute the address into latehalf of dest.
3613 Note that this can't happen if the dest is two data regs. */
3614 compadr:
3615 xops[0] = latehalf[0];
3616 xops[1] = XEXP (operands[1], 0);
3618 handle_compadr (xops);
3619 if (GET_MODE (operands[1]) == XFmode)
3621 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3622 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3623 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3625 else
3627 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3628 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3631 else if (size == 12
3632 && reg_overlap_mentioned_p (middlehalf[0],
3633 XEXP (operands[1], 0)))
3635 /* Check for two regs used by both source and dest.
3636 Note that this can't happen if the dest is all data regs.
3637 It can happen if the dest is d6, d7, a0.
3638 But in that case, latehalf is an addr reg, so
3639 the code at compadr does ok. */
3641 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3642 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3643 goto compadr;
3645 /* JRV says this can't happen: */
3646 gcc_assert (!addreg0 && !addreg1);
3648 /* Only the middle reg conflicts; simply put it last. */
3649 handle_movsi (operands);
3650 handle_movsi (latehalf);
3651 handle_movsi (middlehalf);
3653 return;
3655 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3656 /* If the low half of dest is mentioned in the source memory
3657 address, the arrange to emit the move late half first. */
3658 dest_overlapped_low = 1;
3661 /* If one or both operands autodecrementing,
3662 do the two words, high-numbered first. */
3664 /* Likewise, the first move would clobber the source of the second one,
3665 do them in the other order. This happens only for registers;
3666 such overlap can't happen in memory unless the user explicitly
3667 sets it up, and that is an undefined circumstance. */
3669 if (optype0 == PUSHOP || optype1 == PUSHOP
3670 || (optype0 == REGOP && optype1 == REGOP
3671 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3672 || REGNO (operands[0]) == REGNO (latehalf[1])))
3673 || dest_overlapped_low)
3675 /* Make any unoffsettable addresses point at high-numbered word. */
3676 if (addreg0)
3677 handle_reg_adjust (addreg0, size - 4);
3678 if (addreg1)
3679 handle_reg_adjust (addreg1, size - 4);
3681 /* Do that word. */
3682 handle_movsi (latehalf);
3684 /* Undo the adds we just did. */
3685 if (addreg0)
3686 handle_reg_adjust (addreg0, -4);
3687 if (addreg1)
3688 handle_reg_adjust (addreg1, -4);
3690 if (size == 12)
3692 handle_movsi (middlehalf);
3694 if (addreg0)
3695 handle_reg_adjust (addreg0, -4);
3696 if (addreg1)
3697 handle_reg_adjust (addreg1, -4);
3700 /* Do low-numbered word. */
3702 handle_movsi (operands);
3703 return;
3706 /* Normal case: do the two words, low-numbered first. */
3708 m68k_final_prescan_insn (NULL, operands, 2);
3709 handle_movsi (operands);
3711 /* Do the middle one of the three words for long double */
3712 if (size == 12)
3714 if (addreg0)
3715 handle_reg_adjust (addreg0, 4);
3716 if (addreg1)
3717 handle_reg_adjust (addreg1, 4);
3719 m68k_final_prescan_insn (NULL, middlehalf, 2);
3720 handle_movsi (middlehalf);
3723 /* Make any unoffsettable addresses point at high-numbered word. */
3724 if (addreg0)
3725 handle_reg_adjust (addreg0, 4);
3726 if (addreg1)
3727 handle_reg_adjust (addreg1, 4);
3729 /* Do that word. */
3730 m68k_final_prescan_insn (NULL, latehalf, 2);
3731 handle_movsi (latehalf);
3733 /* Undo the adds we just did. */
3734 if (addreg0)
3735 handle_reg_adjust (addreg0, -(size - 4));
3736 if (addreg1)
3737 handle_reg_adjust (addreg1, -(size - 4));
3739 return;
3742 /* Output assembler code to adjust REG by N. */
3743 static void
3744 output_reg_adjust (rtx reg, int n)
3746 const char *s;
3748 gcc_assert (GET_MODE (reg) == SImode && n >= -12 && n != 0 && n <= 12);
3750 switch (n)
3752 case 12:
3753 s = "add%.l #12,%0";
3754 break;
3756 case 8:
3757 s = "addq%.l #8,%0";
3758 break;
3760 case 4:
3761 s = "addq%.l #4,%0";
3762 break;
3764 case -12:
3765 s = "sub%.l #12,%0";
3766 break;
3768 case -8:
3769 s = "subq%.l #8,%0";
3770 break;
3772 case -4:
3773 s = "subq%.l #4,%0";
3774 break;
3776 default:
3777 gcc_unreachable ();
3778 s = NULL;
3781 output_asm_insn (s, &reg);
3784 /* Emit rtl code to adjust REG by N. */
3785 static void
3786 emit_reg_adjust (rtx reg1, int n)
3788 rtx reg2;
3790 gcc_assert (GET_MODE (reg1) == SImode && n >= -12 && n != 0 && n <= 12);
3792 reg1 = copy_rtx (reg1);
3793 reg2 = copy_rtx (reg1);
3795 if (n < 0)
3796 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3797 else if (n > 0)
3798 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3799 else
3800 gcc_unreachable ();
3803 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3804 static void
3805 output_compadr (rtx operands[2])
3807 output_asm_insn ("lea %a1,%0", operands);
3810 /* Output the best assembler insn for moving operands[1] into operands[0]
3811 as a fullword. */
3812 static void
3813 output_movsi (rtx operands[2])
3815 output_asm_insn (singlemove_string (operands), operands);
3818 /* Copy OP and change its mode to MODE. */
3819 static rtx
3820 copy_operand (rtx op, machine_mode mode)
3822 /* ??? This looks really ugly. There must be a better way
3823 to change a mode on the operand. */
3824 if (GET_MODE (op) != VOIDmode)
3826 if (REG_P (op))
3827 op = gen_rtx_REG (mode, REGNO (op));
3828 else
3830 op = copy_rtx (op);
3831 PUT_MODE (op, mode);
3835 return op;
3838 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3839 static void
3840 emit_movsi (rtx operands[2])
3842 operands[0] = copy_operand (operands[0], SImode);
3843 operands[1] = copy_operand (operands[1], SImode);
3845 emit_insn (gen_movsi (operands[0], operands[1]));
3848 /* Output assembler code to perform a doubleword move insn
3849 with operands OPERANDS. */
3850 const char *
3851 output_move_double (rtx *operands)
3853 handle_move_double (operands,
3854 output_reg_adjust, output_compadr, output_movsi);
3856 return "";
3859 /* Output rtl code to perform a doubleword move insn
3860 with operands OPERANDS. */
3861 void
3862 m68k_emit_move_double (rtx operands[2])
3864 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3867 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3868 new rtx with the correct mode. */
3870 static rtx
3871 force_mode (machine_mode mode, rtx orig)
3873 if (mode == GET_MODE (orig))
3874 return orig;
3876 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3877 abort ();
3879 return gen_rtx_REG (mode, REGNO (orig));
3882 static int
3883 fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
3885 return reg_renumber && FP_REG_P (op);
3888 /* Emit insns to move operands[1] into operands[0].
3890 Return 1 if we have written out everything that needs to be done to
3891 do the move. Otherwise, return 0 and the caller will emit the move
3892 normally.
3894 Note SCRATCH_REG may not be in the proper mode depending on how it
3895 will be used. This routine is responsible for creating a new copy
3896 of SCRATCH_REG in the proper mode. */
3899 emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
3901 rtx operand0 = operands[0];
3902 rtx operand1 = operands[1];
3903 rtx tem;
3905 if (scratch_reg
3906 && (reload_in_progress || lra_in_progress)
3907 && GET_CODE (operand0) == REG
3908 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3909 operand0 = reg_equiv_mem (REGNO (operand0));
3910 else if (scratch_reg
3911 && (reload_in_progress || lra_in_progress)
3912 && GET_CODE (operand0) == SUBREG
3913 && GET_CODE (SUBREG_REG (operand0)) == REG
3914 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3916 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3917 the code which tracks sets/uses for delete_output_reload. */
3918 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3919 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3920 SUBREG_BYTE (operand0));
3921 operand0 = alter_subreg (&temp, true);
3924 if (scratch_reg
3925 && (reload_in_progress || lra_in_progress)
3926 && GET_CODE (operand1) == REG
3927 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3928 operand1 = reg_equiv_mem (REGNO (operand1));
3929 else if (scratch_reg
3930 && (reload_in_progress || lra_in_progress)
3931 && GET_CODE (operand1) == SUBREG
3932 && GET_CODE (SUBREG_REG (operand1)) == REG
3933 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3935 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3936 the code which tracks sets/uses for delete_output_reload. */
3937 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3938 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3939 SUBREG_BYTE (operand1));
3940 operand1 = alter_subreg (&temp, true);
3943 if (scratch_reg && (reload_in_progress || lra_in_progress)
3944 && GET_CODE (operand0) == MEM
3945 && ((tem = find_replacement (&XEXP (operand0, 0)))
3946 != XEXP (operand0, 0)))
3947 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3948 if (scratch_reg && (reload_in_progress || lra_in_progress)
3949 && GET_CODE (operand1) == MEM
3950 && ((tem = find_replacement (&XEXP (operand1, 0)))
3951 != XEXP (operand1, 0)))
3952 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3954 /* Handle secondary reloads for loads/stores of FP registers where
3955 the address is symbolic by using the scratch register */
3956 if (fp_reg_operand (operand0, mode)
3957 && ((GET_CODE (operand1) == MEM
3958 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3959 || ((GET_CODE (operand1) == SUBREG
3960 && GET_CODE (XEXP (operand1, 0)) == MEM
3961 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3962 && scratch_reg)
3964 if (GET_CODE (operand1) == SUBREG)
3965 operand1 = XEXP (operand1, 0);
3967 /* SCRATCH_REG will hold an address. We want
3968 it in SImode regardless of what mode it was originally given
3969 to us. */
3970 scratch_reg = force_mode (SImode, scratch_reg);
3972 /* D might not fit in 14 bits either; for such cases load D into
3973 scratch reg. */
3974 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3976 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3977 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3978 Pmode,
3979 XEXP (XEXP (operand1, 0), 0),
3980 scratch_reg));
3982 else
3983 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3984 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3985 return 1;
3987 else if (fp_reg_operand (operand1, mode)
3988 && ((GET_CODE (operand0) == MEM
3989 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3990 || ((GET_CODE (operand0) == SUBREG)
3991 && GET_CODE (XEXP (operand0, 0)) == MEM
3992 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3993 && scratch_reg)
3995 if (GET_CODE (operand0) == SUBREG)
3996 operand0 = XEXP (operand0, 0);
3998 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3999 it in SIMODE regardless of what mode it was originally given
4000 to us. */
4001 scratch_reg = force_mode (SImode, scratch_reg);
4003 /* D might not fit in 14 bits either; for such cases load D into
4004 scratch reg. */
4005 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
4007 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
4008 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
4009 0)),
4010 Pmode,
4011 XEXP (XEXP (operand0, 0),
4013 scratch_reg));
4015 else
4016 emit_move_insn (scratch_reg, XEXP (operand0, 0));
4017 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
4018 return 1;
4020 /* Handle secondary reloads for loads of FP registers from constant
4021 expressions by forcing the constant into memory.
4023 use scratch_reg to hold the address of the memory location.
4025 The proper fix is to change PREFERRED_RELOAD_CLASS to return
4026 NO_REGS when presented with a const_int and an register class
4027 containing only FP registers. Doing so unfortunately creates
4028 more problems than it solves. Fix this for 2.5. */
4029 else if (fp_reg_operand (operand0, mode)
4030 && CONSTANT_P (operand1)
4031 && scratch_reg)
4033 rtx xoperands[2];
4035 /* SCRATCH_REG will hold an address and maybe the actual data. We want
4036 it in SIMODE regardless of what mode it was originally given
4037 to us. */
4038 scratch_reg = force_mode (SImode, scratch_reg);
4040 /* Force the constant into memory and put the address of the
4041 memory location into scratch_reg. */
4042 xoperands[0] = scratch_reg;
4043 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
4044 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
4046 /* Now load the destination register. */
4047 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
4048 return 1;
4051 /* Now have insn-emit do whatever it normally does. */
4052 return 0;
4055 /* Split one or more DImode RTL references into pairs of SImode
4056 references. The RTL can be REG, offsettable MEM, integer constant, or
4057 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
4058 split and "num" is its length. lo_half and hi_half are output arrays
4059 that parallel "operands". */
4061 void
4062 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
4064 while (num--)
4066 rtx op = operands[num];
4068 /* simplify_subreg refuses to split volatile memory addresses,
4069 but we still have to handle it. */
4070 if (GET_CODE (op) == MEM)
4072 lo_half[num] = adjust_address (op, SImode, 4);
4073 hi_half[num] = adjust_address (op, SImode, 0);
4075 else
4077 lo_half[num] = simplify_gen_subreg (SImode, op,
4078 GET_MODE (op) == VOIDmode
4079 ? DImode : GET_MODE (op), 4);
4080 hi_half[num] = simplify_gen_subreg (SImode, op,
4081 GET_MODE (op) == VOIDmode
4082 ? DImode : GET_MODE (op), 0);
4087 /* Split X into a base and a constant offset, storing them in *BASE
4088 and *OFFSET respectively. */
4090 static void
4091 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
4093 *offset = 0;
4094 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
4096 *offset += INTVAL (XEXP (x, 1));
4097 x = XEXP (x, 0);
4099 *base = x;
4102 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
4103 instruction. STORE_P says whether the move is a load or store.
4105 If the instruction uses post-increment or pre-decrement addressing,
4106 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
4107 adjustment. This adjustment will be made by the first element of
4108 PARALLEL, with the loads or stores starting at element 1. If the
4109 instruction does not use post-increment or pre-decrement addressing,
4110 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
4111 start at element 0. */
4113 bool
4114 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
4115 HOST_WIDE_INT automod_offset, bool store_p)
4117 rtx base, mem_base, set, mem, reg, last_reg;
4118 HOST_WIDE_INT offset, mem_offset;
4119 int i, first, len;
4120 enum reg_class rclass;
4122 len = XVECLEN (pattern, 0);
4123 first = (automod_base != NULL);
4125 if (automod_base)
4127 /* Stores must be pre-decrement and loads must be post-increment. */
4128 if (store_p != (automod_offset < 0))
4129 return false;
4131 /* Work out the base and offset for lowest memory location. */
4132 base = automod_base;
4133 offset = (automod_offset < 0 ? automod_offset : 0);
4135 else
4137 /* Allow any valid base and offset in the first access. */
4138 base = NULL;
4139 offset = 0;
4142 last_reg = NULL;
4143 rclass = NO_REGS;
4144 for (i = first; i < len; i++)
4146 /* We need a plain SET. */
4147 set = XVECEXP (pattern, 0, i);
4148 if (GET_CODE (set) != SET)
4149 return false;
4151 /* Check that we have a memory location... */
4152 mem = XEXP (set, !store_p);
4153 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
4154 return false;
4156 /* ...with the right address. */
4157 if (base == NULL)
4159 m68k_split_offset (XEXP (mem, 0), &base, &offset);
4160 /* The ColdFire instruction only allows (An) and (d16,An) modes.
4161 There are no mode restrictions for 680x0 besides the
4162 automodification rules enforced above. */
4163 if (TARGET_COLDFIRE
4164 && !m68k_legitimate_base_reg_p (base, reload_completed))
4165 return false;
4167 else
4169 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
4170 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
4171 return false;
4174 /* Check that we have a register of the required mode and class. */
4175 reg = XEXP (set, store_p);
4176 if (!REG_P (reg)
4177 || !HARD_REGISTER_P (reg)
4178 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
4179 return false;
4181 if (last_reg)
4183 /* The register must belong to RCLASS and have a higher number
4184 than the register in the previous SET. */
4185 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
4186 || REGNO (last_reg) >= REGNO (reg))
4187 return false;
4189 else
4191 /* Work out which register class we need. */
4192 if (INT_REGNO_P (REGNO (reg)))
4193 rclass = GENERAL_REGS;
4194 else if (FP_REGNO_P (REGNO (reg)))
4195 rclass = FP_REGS;
4196 else
4197 return false;
4200 last_reg = reg;
4201 offset += GET_MODE_SIZE (GET_MODE (reg));
4204 /* If we have an automodification, check whether the final offset is OK. */
4205 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
4206 return false;
4208 /* Reject unprofitable cases. */
4209 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
4210 return false;
4212 return true;
4215 /* Return the assembly code template for a movem or fmovem instruction
4216 whose pattern is given by PATTERN. Store the template's operands
4217 in OPERANDS.
4219 If the instruction uses post-increment or pre-decrement addressing,
4220 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
4221 is true if this is a store instruction. */
4223 const char *
4224 m68k_output_movem (rtx *operands, rtx pattern,
4225 HOST_WIDE_INT automod_offset, bool store_p)
4227 unsigned int mask;
4228 int i, first;
4230 gcc_assert (GET_CODE (pattern) == PARALLEL);
4231 mask = 0;
4232 first = (automod_offset != 0);
4233 for (i = first; i < XVECLEN (pattern, 0); i++)
4235 /* When using movem with pre-decrement addressing, register X + D0_REG
4236 is controlled by bit 15 - X. For all other addressing modes,
4237 register X + D0_REG is controlled by bit X. Confusingly, the
4238 register mask for fmovem is in the opposite order to that for
4239 movem. */
4240 unsigned int regno;
4242 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4243 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4244 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4245 if (automod_offset < 0)
4247 if (FP_REGNO_P (regno))
4248 mask |= 1 << (regno - FP0_REG);
4249 else
4250 mask |= 1 << (15 - (regno - D0_REG));
4252 else
4254 if (FP_REGNO_P (regno))
4255 mask |= 1 << (7 - (regno - FP0_REG));
4256 else
4257 mask |= 1 << (regno - D0_REG);
4260 CC_STATUS_INIT;
4262 if (automod_offset == 0)
4263 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4264 else if (automod_offset < 0)
4265 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4266 else
4267 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4268 operands[1] = GEN_INT (mask);
4269 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4271 if (store_p)
4272 return "fmovem %1,%a0";
4273 else
4274 return "fmovem %a0,%1";
4276 else
4278 if (store_p)
4279 return "movem%.l %1,%a0";
4280 else
4281 return "movem%.l %a0,%1";
4285 /* Return a REG that occurs in ADDR with coefficient 1.
4286 ADDR can be effectively incremented by incrementing REG. */
4288 static rtx
4289 find_addr_reg (rtx addr)
4291 while (GET_CODE (addr) == PLUS)
4293 if (GET_CODE (XEXP (addr, 0)) == REG)
4294 addr = XEXP (addr, 0);
4295 else if (GET_CODE (XEXP (addr, 1)) == REG)
4296 addr = XEXP (addr, 1);
4297 else if (CONSTANT_P (XEXP (addr, 0)))
4298 addr = XEXP (addr, 1);
4299 else if (CONSTANT_P (XEXP (addr, 1)))
4300 addr = XEXP (addr, 0);
4301 else
4302 gcc_unreachable ();
4304 gcc_assert (GET_CODE (addr) == REG);
4305 return addr;
4308 /* Output assembler code to perform a 32-bit 3-operand add. */
4310 const char *
4311 output_addsi3 (rtx *operands)
4313 if (! operands_match_p (operands[0], operands[1]))
4315 if (!ADDRESS_REG_P (operands[1]))
4317 rtx tmp = operands[1];
4319 operands[1] = operands[2];
4320 operands[2] = tmp;
4323 /* These insns can result from reloads to access
4324 stack slots over 64k from the frame pointer. */
4325 if (GET_CODE (operands[2]) == CONST_INT
4326 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4327 return "move%.l %2,%0\n\tadd%.l %1,%0";
4328 if (GET_CODE (operands[2]) == REG)
4329 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4330 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4332 if (GET_CODE (operands[2]) == CONST_INT)
4334 if (INTVAL (operands[2]) > 0
4335 && INTVAL (operands[2]) <= 8)
4336 return "addq%.l %2,%0";
4337 if (INTVAL (operands[2]) < 0
4338 && INTVAL (operands[2]) >= -8)
4340 operands[2] = GEN_INT (- INTVAL (operands[2]));
4341 return "subq%.l %2,%0";
4343 /* On the CPU32 it is faster to use two addql instructions to
4344 add a small integer (8 < N <= 16) to a register.
4345 Likewise for subql. */
4346 if (TUNE_CPU32 && REG_P (operands[0]))
4348 if (INTVAL (operands[2]) > 8
4349 && INTVAL (operands[2]) <= 16)
4351 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4352 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4354 if (INTVAL (operands[2]) < -8
4355 && INTVAL (operands[2]) >= -16)
4357 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4358 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4361 if (ADDRESS_REG_P (operands[0])
4362 && INTVAL (operands[2]) >= -0x8000
4363 && INTVAL (operands[2]) < 0x8000)
4365 if (TUNE_68040)
4366 return "add%.w %2,%0";
4367 else
4368 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4371 return "add%.l %2,%0";
4374 /* Emit a comparison between OP0 and OP1. Return true iff the comparison
4375 was reversed. SC1 is an SImode scratch reg, and SC2 a DImode scratch reg,
4376 as needed. CODE is the code of the comparison, we return it unchanged or
4377 swapped, as necessary. */
4378 rtx_code
4379 m68k_output_compare_di (rtx op0, rtx op1, rtx sc1, rtx sc2, rtx_insn *insn,
4380 rtx_code code)
4382 rtx ops[4];
4383 ops[0] = op0;
4384 ops[1] = op1;
4385 ops[2] = sc1;
4386 ops[3] = sc2;
4387 if (op1 == const0_rtx)
4389 if (!REG_P (op0) || ADDRESS_REG_P (op0))
4391 rtx xoperands[2];
4393 xoperands[0] = sc2;
4394 xoperands[1] = op0;
4395 output_move_double (xoperands);
4396 output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", xoperands);
4397 return swap_condition (code);
4399 if (find_reg_note (insn, REG_DEAD, op0))
4401 output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", ops);
4402 return swap_condition (code);
4404 else
4406 /* 'sub' clears %1, and also clears the X cc bit.
4407 'tst' sets the Z cc bit according to the low part of the DImode
4408 operand.
4409 'subx %1' (i.e. subx #0) acts as a (non-existent) tstx on the high
4410 part. */
4411 output_asm_insn ("sub%.l %2,%2\n\ttst%.l %R0\n\tsubx%.l %2,%0", ops);
4412 return code;
4416 if (rtx_equal_p (sc2, op0))
4418 output_asm_insn ("sub%.l %R1,%R3\n\tsubx%.l %1,%3", ops);
4419 return code;
4421 else
4423 output_asm_insn ("sub%.l %R0,%R3\n\tsubx%.l %0,%3", ops);
4424 return swap_condition (code);
4428 static void
4429 remember_compare_flags (rtx op0, rtx op1)
4431 if (side_effects_p (op0) || side_effects_p (op1))
4432 CC_STATUS_INIT;
4433 else
4435 flags_compare_op0 = op0;
4436 flags_compare_op1 = op1;
4437 flags_operand1 = flags_operand2 = NULL_RTX;
4438 flags_valid = FLAGS_VALID_SET;
4442 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4443 comparison. It is returned, potentially modified if necessary. */
4444 rtx_code
4445 m68k_output_compare_si (rtx op0, rtx op1, rtx_code code)
4447 rtx_code tmp = m68k_find_flags_value (op0, op1, code);
4448 if (tmp != UNKNOWN)
4449 return tmp;
4451 remember_compare_flags (op0, op1);
4453 rtx ops[2];
4454 ops[0] = op0;
4455 ops[1] = op1;
4456 if (op1 == const0_rtx && (TARGET_68020 || TARGET_COLDFIRE || !ADDRESS_REG_P (op0)))
4457 output_asm_insn ("tst%.l %0", ops);
4458 else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
4459 output_asm_insn ("cmpm%.l %1,%0", ops);
4460 else if (REG_P (op1)
4461 || (!REG_P (op0) && GET_CODE (op0) != MEM))
4463 output_asm_insn ("cmp%.l %d0,%d1", ops);
4464 std::swap (flags_compare_op0, flags_compare_op1);
4465 return swap_condition (code);
4467 else if (!TARGET_COLDFIRE
4468 && ADDRESS_REG_P (op0)
4469 && GET_CODE (op1) == CONST_INT
4470 && INTVAL (op1) < 0x8000
4471 && INTVAL (op1) >= -0x8000)
4472 output_asm_insn ("cmp%.w %1,%0", ops);
4473 else
4474 output_asm_insn ("cmp%.l %d1,%d0", ops);
4475 return code;
4478 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4479 comparison. It is returned, potentially modified if necessary. */
4480 rtx_code
4481 m68k_output_compare_hi (rtx op0, rtx op1, rtx_code code)
4483 rtx_code tmp = m68k_find_flags_value (op0, op1, code);
4484 if (tmp != UNKNOWN)
4485 return tmp;
4487 remember_compare_flags (op0, op1);
4489 rtx ops[2];
4490 ops[0] = op0;
4491 ops[1] = op1;
4492 if (op1 == const0_rtx)
4493 output_asm_insn ("tst%.w %d0", ops);
4494 else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
4495 output_asm_insn ("cmpm%.w %1,%0", ops);
4496 else if ((REG_P (op1) && !ADDRESS_REG_P (op1))
4497 || (!REG_P (op0) && GET_CODE (op0) != MEM))
4499 output_asm_insn ("cmp%.w %d0,%d1", ops);
4500 std::swap (flags_compare_op0, flags_compare_op1);
4501 return swap_condition (code);
4503 else
4504 output_asm_insn ("cmp%.w %d1,%d0", ops);
4505 return code;
4508 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4509 comparison. It is returned, potentially modified if necessary. */
4510 rtx_code
4511 m68k_output_compare_qi (rtx op0, rtx op1, rtx_code code)
4513 rtx_code tmp = m68k_find_flags_value (op0, op1, code);
4514 if (tmp != UNKNOWN)
4515 return tmp;
4517 remember_compare_flags (op0, op1);
4519 rtx ops[2];
4520 ops[0] = op0;
4521 ops[1] = op1;
4522 if (op1 == const0_rtx)
4523 output_asm_insn ("tst%.b %d0", ops);
4524 else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
4525 output_asm_insn ("cmpm%.b %1,%0", ops);
4526 else if (REG_P (op1) || (!REG_P (op0) && GET_CODE (op0) != MEM))
4528 output_asm_insn ("cmp%.b %d0,%d1", ops);
4529 std::swap (flags_compare_op0, flags_compare_op1);
4530 return swap_condition (code);
4532 else
4533 output_asm_insn ("cmp%.b %d1,%d0", ops);
4534 return code;
4537 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4538 comparison. It is returned, potentially modified if necessary. */
4539 rtx_code
4540 m68k_output_compare_fp (rtx op0, rtx op1, rtx_code code)
4542 rtx_code tmp = m68k_find_flags_value (op0, op1, code);
4543 if (tmp != UNKNOWN)
4544 return tmp;
4546 rtx ops[2];
4547 ops[0] = op0;
4548 ops[1] = op1;
4550 remember_compare_flags (op0, op1);
4552 machine_mode mode = GET_MODE (op0);
4553 std::string prec = mode == SFmode ? "s" : mode == DFmode ? "d" : "x";
4555 if (op1 == CONST0_RTX (GET_MODE (op0)))
4557 if (FP_REG_P (op0))
4559 if (TARGET_COLDFIRE_FPU)
4560 output_asm_insn ("ftst%.d %0", ops);
4561 else
4562 output_asm_insn ("ftst%.x %0", ops);
4564 else
4565 output_asm_insn (("ftst%." + prec + " %0").c_str (), ops);
4566 return code;
4569 switch (which_alternative)
4571 case 0:
4572 if (TARGET_COLDFIRE_FPU)
4573 output_asm_insn ("fcmp%.d %1,%0", ops);
4574 else
4575 output_asm_insn ("fcmp%.x %1,%0", ops);
4576 break;
4577 case 1:
4578 output_asm_insn (("fcmp%." + prec + " %f1,%0").c_str (), ops);
4579 break;
4580 case 2:
4581 output_asm_insn (("fcmp%." + prec + " %0,%f1").c_str (), ops);
4582 std::swap (flags_compare_op0, flags_compare_op1);
4583 return swap_condition (code);
4584 case 3:
4585 /* This is the ftst case, handled earlier. */
4586 gcc_unreachable ();
4588 return code;
4591 /* Return an output template for a branch with CODE. */
4592 const char *
4593 m68k_output_branch_integer (rtx_code code)
4595 switch (code)
4597 case EQ:
4598 return "jeq %l3";
4599 case NE:
4600 return "jne %l3";
4601 case GT:
4602 return "jgt %l3";
4603 case GTU:
4604 return "jhi %l3";
4605 case LT:
4606 return "jlt %l3";
4607 case LTU:
4608 return "jcs %l3";
4609 case GE:
4610 return "jge %l3";
4611 case GEU:
4612 return "jcc %l3";
4613 case LE:
4614 return "jle %l3";
4615 case LEU:
4616 return "jls %l3";
4617 case PLUS:
4618 return "jpl %l3";
4619 case MINUS:
4620 return "jmi %l3";
4621 default:
4622 gcc_unreachable ();
4626 /* Return an output template for a reversed branch with CODE. */
4627 const char *
4628 m68k_output_branch_integer_rev (rtx_code code)
4630 switch (code)
4632 case EQ:
4633 return "jne %l3";
4634 case NE:
4635 return "jeq %l3";
4636 case GT:
4637 return "jle %l3";
4638 case GTU:
4639 return "jls %l3";
4640 case LT:
4641 return "jge %l3";
4642 case LTU:
4643 return "jcc %l3";
4644 case GE:
4645 return "jlt %l3";
4646 case GEU:
4647 return "jcs %l3";
4648 case LE:
4649 return "jgt %l3";
4650 case LEU:
4651 return "jhi %l3";
4652 case PLUS:
4653 return "jmi %l3";
4654 case MINUS:
4655 return "jpl %l3";
4656 default:
4657 gcc_unreachable ();
4661 /* Return an output template for a scc instruction with CODE. */
4662 const char *
4663 m68k_output_scc (rtx_code code)
4665 switch (code)
4667 case EQ:
4668 return "seq %0";
4669 case NE:
4670 return "sne %0";
4671 case GT:
4672 return "sgt %0";
4673 case GTU:
4674 return "shi %0";
4675 case LT:
4676 return "slt %0";
4677 case LTU:
4678 return "scs %0";
4679 case GE:
4680 return "sge %0";
4681 case GEU:
4682 return "scc %0";
4683 case LE:
4684 return "sle %0";
4685 case LEU:
4686 return "sls %0";
4687 case PLUS:
4688 return "spl %0";
4689 case MINUS:
4690 return "smi %0";
4691 default:
4692 gcc_unreachable ();
4696 /* Return an output template for a floating point branch
4697 instruction with CODE. */
4698 const char *
4699 m68k_output_branch_float (rtx_code code)
4701 switch (code)
4703 case EQ:
4704 return "fjeq %l3";
4705 case NE:
4706 return "fjne %l3";
4707 case GT:
4708 return "fjgt %l3";
4709 case LT:
4710 return "fjlt %l3";
4711 case GE:
4712 return "fjge %l3";
4713 case LE:
4714 return "fjle %l3";
4715 case ORDERED:
4716 return "fjor %l3";
4717 case UNORDERED:
4718 return "fjun %l3";
4719 case UNEQ:
4720 return "fjueq %l3";
4721 case UNGE:
4722 return "fjuge %l3";
4723 case UNGT:
4724 return "fjugt %l3";
4725 case UNLE:
4726 return "fjule %l3";
4727 case UNLT:
4728 return "fjult %l3";
4729 case LTGT:
4730 return "fjogl %l3";
4731 default:
4732 gcc_unreachable ();
4736 /* Return an output template for a reversed floating point branch
4737 instruction with CODE. */
4738 const char *
4739 m68k_output_branch_float_rev (rtx_code code)
4741 switch (code)
4743 case EQ:
4744 return "fjne %l3";
4745 case NE:
4746 return "fjeq %l3";
4747 case GT:
4748 return "fjngt %l3";
4749 case LT:
4750 return "fjnlt %l3";
4751 case GE:
4752 return "fjnge %l3";
4753 case LE:
4754 return "fjnle %l3";
4755 case ORDERED:
4756 return "fjun %l3";
4757 case UNORDERED:
4758 return "fjor %l3";
4759 case UNEQ:
4760 return "fjogl %l3";
4761 case UNGE:
4762 return "fjolt %l3";
4763 case UNGT:
4764 return "fjole %l3";
4765 case UNLE:
4766 return "fjogt %l3";
4767 case UNLT:
4768 return "fjoge %l3";
4769 case LTGT:
4770 return "fjueq %l3";
4771 default:
4772 gcc_unreachable ();
4776 /* Return an output template for a floating point scc
4777 instruction with CODE. */
4778 const char *
4779 m68k_output_scc_float (rtx_code code)
4781 switch (code)
4783 case EQ:
4784 return "fseq %0";
4785 case NE:
4786 return "fsne %0";
4787 case GT:
4788 return "fsgt %0";
4789 case GTU:
4790 return "fshi %0";
4791 case LT:
4792 return "fslt %0";
4793 case GE:
4794 return "fsge %0";
4795 case LE:
4796 return "fsle %0";
4797 case ORDERED:
4798 return "fsor %0";
4799 case UNORDERED:
4800 return "fsun %0";
4801 case UNEQ:
4802 return "fsueq %0";
4803 case UNGE:
4804 return "fsuge %0";
4805 case UNGT:
4806 return "fsugt %0";
4807 case UNLE:
4808 return "fsule %0";
4809 case UNLT:
4810 return "fsult %0";
4811 case LTGT:
4812 return "fsogl %0";
4813 default:
4814 gcc_unreachable ();
4818 const char *
4819 output_move_const_double (rtx *operands)
4821 int code = standard_68881_constant_p (operands[1]);
4823 if (code != 0)
4825 static char buf[40];
4827 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4828 return buf;
4830 return "fmove%.d %1,%0";
4833 const char *
4834 output_move_const_single (rtx *operands)
4836 int code = standard_68881_constant_p (operands[1]);
4838 if (code != 0)
4840 static char buf[40];
4842 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4843 return buf;
4845 return "fmove%.s %f1,%0";
4848 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4849 from the "fmovecr" instruction.
4850 The value, anded with 0xff, gives the code to use in fmovecr
4851 to get the desired constant. */
4853 /* This code has been fixed for cross-compilation. */
4855 static int inited_68881_table = 0;
4857 static const char *const strings_68881[7] = {
4858 "0.0",
4859 "1.0",
4860 "10.0",
4861 "100.0",
4862 "10000.0",
4863 "1e8",
4864 "1e16"
4867 static const int codes_68881[7] = {
4868 0x0f,
4869 0x32,
4870 0x33,
4871 0x34,
4872 0x35,
4873 0x36,
4874 0x37
4877 REAL_VALUE_TYPE values_68881[7];
4879 /* Set up values_68881 array by converting the decimal values
4880 strings_68881 to binary. */
4882 void
4883 init_68881_table (void)
4885 int i;
4886 REAL_VALUE_TYPE r;
4887 machine_mode mode;
4889 mode = SFmode;
4890 for (i = 0; i < 7; i++)
4892 if (i == 6)
4893 mode = DFmode;
4894 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4895 values_68881[i] = r;
4897 inited_68881_table = 1;
4901 standard_68881_constant_p (rtx x)
4903 const REAL_VALUE_TYPE *r;
4904 int i;
4906 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4907 used at all on those chips. */
4908 if (TUNE_68040_60)
4909 return 0;
4911 if (! inited_68881_table)
4912 init_68881_table ();
4914 r = CONST_DOUBLE_REAL_VALUE (x);
4916 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4917 for (i = 0; i < 6; i++)
4919 if (real_identical (r, &values_68881[i]))
4920 return (codes_68881[i]);
4923 if (GET_MODE (x) == SFmode)
4924 return 0;
4926 if (real_equal (r, &values_68881[6]))
4927 return (codes_68881[6]);
4929 /* larger powers of ten in the constants ram are not used
4930 because they are not equal to a `double' C constant. */
4931 return 0;
4934 /* If X is a floating-point constant, return the logarithm of X base 2,
4935 or 0 if X is not a power of 2. */
4938 floating_exact_log2 (rtx x)
4940 const REAL_VALUE_TYPE *r;
4941 REAL_VALUE_TYPE r1;
4942 int exp;
4944 r = CONST_DOUBLE_REAL_VALUE (x);
4946 if (real_less (r, &dconst1))
4947 return 0;
4949 exp = real_exponent (r);
4950 real_2expN (&r1, exp, DFmode);
4951 if (real_equal (&r1, r))
4952 return exp;
4954 return 0;
4957 /* A C compound statement to output to stdio stream STREAM the
4958 assembler syntax for an instruction operand X. X is an RTL
4959 expression.
4961 CODE is a value that can be used to specify one of several ways
4962 of printing the operand. It is used when identical operands
4963 must be printed differently depending on the context. CODE
4964 comes from the `%' specification that was used to request
4965 printing of the operand. If the specification was just `%DIGIT'
4966 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4967 is the ASCII code for LTR.
4969 If X is a register, this macro should print the register's name.
4970 The names can be found in an array `reg_names' whose type is
4971 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4973 When the machine description has a specification `%PUNCT' (a `%'
4974 followed by a punctuation character), this macro is called with
4975 a null pointer for X and the punctuation character for CODE.
4977 The m68k specific codes are:
4979 '.' for dot needed in Motorola-style opcode names.
4980 '-' for an operand pushing on the stack:
4981 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4982 '+' for an operand pushing on the stack:
4983 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4984 '@' for a reference to the top word on the stack:
4985 sp@, (sp) or (%sp) depending on the style of syntax.
4986 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4987 but & in SGS syntax).
4988 '!' for the cc register (used in an `and to cc' insn).
4989 '$' for the letter `s' in an op code, but only on the 68040.
4990 '&' for the letter `d' in an op code, but only on the 68040.
4991 '/' for register prefix needed by longlong.h.
4992 '?' for m68k_library_id_string
4994 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4995 'd' to force memory addressing to be absolute, not relative.
4996 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4997 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4998 or print pair of registers as rx:ry.
4999 'p' print an address with @PLTPC attached, but only if the operand
5000 is not locally-bound. */
5002 void
5003 print_operand (FILE *file, rtx op, int letter)
5005 if (op != NULL_RTX)
5006 m68k_adjust_decorated_operand (op);
5008 if (letter == '.')
5010 if (MOTOROLA)
5011 fprintf (file, ".");
5013 else if (letter == '#')
5014 asm_fprintf (file, "%I");
5015 else if (letter == '-')
5016 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
5017 else if (letter == '+')
5018 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
5019 else if (letter == '@')
5020 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
5021 else if (letter == '!')
5022 asm_fprintf (file, "%Rfpcr");
5023 else if (letter == '$')
5025 if (TARGET_68040)
5026 fprintf (file, "s");
5028 else if (letter == '&')
5030 if (TARGET_68040)
5031 fprintf (file, "d");
5033 else if (letter == '/')
5034 asm_fprintf (file, "%R");
5035 else if (letter == '?')
5036 asm_fprintf (file, m68k_library_id_string);
5037 else if (letter == 'p')
5039 output_addr_const (file, op);
5040 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
5041 fprintf (file, "@PLTPC");
5043 else if (GET_CODE (op) == REG)
5045 if (letter == 'R')
5046 /* Print out the second register name of a register pair.
5047 I.e., R (6) => 7. */
5048 fputs (M68K_REGNAME(REGNO (op) + 1), file);
5049 else
5050 fputs (M68K_REGNAME(REGNO (op)), file);
5052 else if (GET_CODE (op) == MEM)
5054 output_address (GET_MODE (op), XEXP (op, 0));
5055 if (letter == 'd' && ! TARGET_68020
5056 && CONSTANT_ADDRESS_P (XEXP (op, 0))
5057 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
5058 && INTVAL (XEXP (op, 0)) < 0x8000
5059 && INTVAL (XEXP (op, 0)) >= -0x8000))
5060 fprintf (file, MOTOROLA ? ".l" : ":l");
5062 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
5064 long l;
5065 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5066 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
5068 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
5070 long l[3];
5071 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5072 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
5073 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
5075 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
5077 long l[2];
5078 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5079 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
5081 else
5083 /* Use `print_operand_address' instead of `output_addr_const'
5084 to ensure that we print relevant PIC stuff. */
5085 asm_fprintf (file, "%I");
5086 if (TARGET_PCREL
5087 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
5088 print_operand_address (file, op);
5089 else
5090 output_addr_const (file, op);
5094 /* Return string for TLS relocation RELOC. */
5096 static const char *
5097 m68k_get_reloc_decoration (enum m68k_reloc reloc)
5099 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
5100 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
5102 switch (reloc)
5104 case RELOC_GOT:
5105 if (MOTOROLA)
5107 if (flag_pic == 1 && TARGET_68020)
5108 return "@GOT.w";
5109 else
5110 return "@GOT";
5112 else
5114 if (TARGET_68020)
5116 switch (flag_pic)
5118 case 1:
5119 return ":w";
5120 case 2:
5121 return ":l";
5122 default:
5123 return "";
5127 gcc_unreachable ();
5129 case RELOC_TLSGD:
5130 return "@TLSGD";
5132 case RELOC_TLSLDM:
5133 return "@TLSLDM";
5135 case RELOC_TLSLDO:
5136 return "@TLSLDO";
5138 case RELOC_TLSIE:
5139 return "@TLSIE";
5141 case RELOC_TLSLE:
5142 return "@TLSLE";
5144 default:
5145 gcc_unreachable ();
5149 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
5151 static bool
5152 m68k_output_addr_const_extra (FILE *file, rtx x)
5154 if (GET_CODE (x) == UNSPEC)
5156 switch (XINT (x, 1))
5158 case UNSPEC_RELOC16:
5159 case UNSPEC_RELOC32:
5160 output_addr_const (file, XVECEXP (x, 0, 0));
5161 fputs (m68k_get_reloc_decoration
5162 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
5163 return true;
5165 default:
5166 break;
5170 return false;
5173 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
5175 static void
5176 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
5178 gcc_assert (size == 4);
5179 fputs ("\t.long\t", file);
5180 output_addr_const (file, x);
5181 fputs ("@TLSLDO+0x8000", file);
5184 /* In the name of slightly smaller debug output, and to cater to
5185 general assembler lossage, recognize various UNSPEC sequences
5186 and turn them back into a direct symbol reference. */
5188 static rtx
5189 m68k_delegitimize_address (rtx orig_x)
5191 rtx x;
5192 struct m68k_address addr;
5193 rtx unspec;
5195 orig_x = delegitimize_mem_from_attrs (orig_x);
5196 x = orig_x;
5197 if (MEM_P (x))
5198 x = XEXP (x, 0);
5200 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
5201 return orig_x;
5203 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
5204 || addr.offset == NULL_RTX
5205 || GET_CODE (addr.offset) != CONST)
5206 return orig_x;
5208 unspec = XEXP (addr.offset, 0);
5209 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
5210 unspec = XEXP (unspec, 0);
5211 if (GET_CODE (unspec) != UNSPEC
5212 || (XINT (unspec, 1) != UNSPEC_RELOC16
5213 && XINT (unspec, 1) != UNSPEC_RELOC32))
5214 return orig_x;
5215 x = XVECEXP (unspec, 0, 0);
5216 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
5217 if (unspec != XEXP (addr.offset, 0))
5218 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
5219 if (addr.index)
5221 rtx idx = addr.index;
5222 if (addr.scale != 1)
5223 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
5224 x = gen_rtx_PLUS (Pmode, idx, x);
5226 if (addr.base)
5227 x = gen_rtx_PLUS (Pmode, addr.base, x);
5228 if (MEM_P (orig_x))
5229 x = replace_equiv_address_nv (orig_x, x);
5230 return x;
5234 /* A C compound statement to output to stdio stream STREAM the
5235 assembler syntax for an instruction operand that is a memory
5236 reference whose address is ADDR. ADDR is an RTL expression.
5238 Note that this contains a kludge that knows that the only reason
5239 we have an address (plus (label_ref...) (reg...)) when not generating
5240 PIC code is in the insn before a tablejump, and we know that m68k.md
5241 generates a label LInnn: on such an insn.
5243 It is possible for PIC to generate a (plus (label_ref...) (reg...))
5244 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
5246 This routine is responsible for distinguishing between -fpic and -fPIC
5247 style relocations in an address. When generating -fpic code the
5248 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
5249 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
5251 void
5252 print_operand_address (FILE *file, rtx addr)
5254 struct m68k_address address;
5256 m68k_adjust_decorated_operand (addr);
5258 if (!m68k_decompose_address (QImode, addr, true, &address))
5259 gcc_unreachable ();
5261 if (address.code == PRE_DEC)
5262 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
5263 M68K_REGNAME (REGNO (address.base)));
5264 else if (address.code == POST_INC)
5265 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
5266 M68K_REGNAME (REGNO (address.base)));
5267 else if (!address.base && !address.index)
5269 /* A constant address. */
5270 gcc_assert (address.offset == addr);
5271 if (GET_CODE (addr) == CONST_INT)
5273 /* (xxx).w or (xxx).l. */
5274 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
5275 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
5276 else
5277 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
5279 else if (TARGET_PCREL)
5281 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
5282 fputc ('(', file);
5283 output_addr_const (file, addr);
5284 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
5286 else
5288 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
5289 name ends in `.<letter>', as the last 2 characters can be
5290 mistaken as a size suffix. Put the name in parentheses. */
5291 if (GET_CODE (addr) == SYMBOL_REF
5292 && strlen (XSTR (addr, 0)) > 2
5293 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
5295 putc ('(', file);
5296 output_addr_const (file, addr);
5297 putc (')', file);
5299 else
5300 output_addr_const (file, addr);
5303 else
5305 int labelno;
5307 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
5308 label being accessed, otherwise it is -1. */
5309 labelno = (address.offset
5310 && !address.base
5311 && GET_CODE (address.offset) == LABEL_REF
5312 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
5313 : -1);
5314 if (MOTOROLA)
5316 /* Print the "offset(base" component. */
5317 if (labelno >= 0)
5318 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
5319 else
5321 if (address.offset)
5322 output_addr_const (file, address.offset);
5324 putc ('(', file);
5325 if (address.base)
5326 fputs (M68K_REGNAME (REGNO (address.base)), file);
5328 /* Print the ",index" component, if any. */
5329 if (address.index)
5331 if (address.base)
5332 putc (',', file);
5333 fprintf (file, "%s.%c",
5334 M68K_REGNAME (REGNO (address.index)),
5335 GET_MODE (address.index) == HImode ? 'w' : 'l');
5336 if (address.scale != 1)
5337 fprintf (file, "*%d", address.scale);
5339 putc (')', file);
5341 else /* !MOTOROLA */
5343 if (!address.offset && !address.index)
5344 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
5345 else
5347 /* Print the "base@(offset" component. */
5348 if (labelno >= 0)
5349 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
5350 else
5352 if (address.base)
5353 fputs (M68K_REGNAME (REGNO (address.base)), file);
5354 fprintf (file, "@(");
5355 if (address.offset)
5356 output_addr_const (file, address.offset);
5358 /* Print the ",index" component, if any. */
5359 if (address.index)
5361 fprintf (file, ",%s:%c",
5362 M68K_REGNAME (REGNO (address.index)),
5363 GET_MODE (address.index) == HImode ? 'w' : 'l');
5364 if (address.scale != 1)
5365 fprintf (file, ":%d", address.scale);
5367 putc (')', file);
5373 /* Check for cases where a clr insns can be omitted from code using
5374 strict_low_part sets. For example, the second clrl here is not needed:
5375 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
5377 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
5378 insn we are checking for redundancy. TARGET is the register set by the
5379 clear insn. */
5381 bool
5382 strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
5383 rtx target)
5385 rtx_insn *p = first_insn;
5387 while ((p = PREV_INSN (p)))
5389 if (NOTE_INSN_BASIC_BLOCK_P (p))
5390 return false;
5392 if (NOTE_P (p))
5393 continue;
5395 /* If it isn't an insn, then give up. */
5396 if (!INSN_P (p))
5397 return false;
5399 if (reg_set_p (target, p))
5401 rtx set = single_set (p);
5402 rtx dest;
5404 /* If it isn't an easy to recognize insn, then give up. */
5405 if (! set)
5406 return false;
5408 dest = SET_DEST (set);
5410 /* If this sets the entire target register to zero, then our
5411 first_insn is redundant. */
5412 if (rtx_equal_p (dest, target)
5413 && SET_SRC (set) == const0_rtx)
5414 return true;
5415 else if (GET_CODE (dest) == STRICT_LOW_PART
5416 && GET_CODE (XEXP (dest, 0)) == REG
5417 && REGNO (XEXP (dest, 0)) == REGNO (target)
5418 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
5419 <= GET_MODE_SIZE (mode)))
5420 /* This is a strict low part set which modifies less than
5421 we are using, so it is safe. */
5423 else
5424 return false;
5428 return false;
5431 /* Operand predicates for implementing asymmetric pc-relative addressing
5432 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
5433 when used as a source operand, but not as a destination operand.
5435 We model this by restricting the meaning of the basic predicates
5436 (general_operand, memory_operand, etc) to forbid the use of this
5437 addressing mode, and then define the following predicates that permit
5438 this addressing mode. These predicates can then be used for the
5439 source operands of the appropriate instructions.
5441 n.b. While it is theoretically possible to change all machine patterns
5442 to use this addressing more where permitted by the architecture,
5443 it has only been implemented for "common" cases: SImode, HImode, and
5444 QImode operands, and only for the principle operations that would
5445 require this addressing mode: data movement and simple integer operations.
5447 In parallel with these new predicates, two new constraint letters
5448 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
5449 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
5450 In the pcrel case 's' is only valid in combination with 'a' registers.
5451 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
5452 of how these constraints are used.
5454 The use of these predicates is strictly optional, though patterns that
5455 don't will cause an extra reload register to be allocated where one
5456 was not necessary:
5458 lea (abc:w,%pc),%a0 ; need to reload address
5459 moveq &1,%d1 ; since write to pc-relative space
5460 movel %d1,%a0@ ; is not allowed
5462 lea (abc:w,%pc),%a1 ; no need to reload address here
5463 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
5465 For more info, consult tiemann@cygnus.com.
5468 All of the ugliness with predicates and constraints is due to the
5469 simple fact that the m68k does not allow a pc-relative addressing
5470 mode as a destination. gcc does not distinguish between source and
5471 destination addresses. Hence, if we claim that pc-relative address
5472 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
5473 end up with invalid code. To get around this problem, we left
5474 pc-relative modes as invalid addresses, and then added special
5475 predicates and constraints to accept them.
5477 A cleaner way to handle this is to modify gcc to distinguish
5478 between source and destination addresses. We can then say that
5479 pc-relative is a valid source address but not a valid destination
5480 address, and hopefully avoid a lot of the predicate and constraint
5481 hackery. Unfortunately, this would be a pretty big change. It would
5482 be a useful change for a number of ports, but there aren't any current
5483 plans to undertake this.
5485 ***************************************************************************/
5488 const char *
5489 output_andsi3 (rtx *operands)
5491 int logval;
5492 CC_STATUS_INIT;
5493 if (GET_CODE (operands[2]) == CONST_INT
5494 && (INTVAL (operands[2]) | 0xffff) == -1
5495 && (DATA_REG_P (operands[0])
5496 || offsettable_memref_p (operands[0]))
5497 && !TARGET_COLDFIRE)
5499 if (GET_CODE (operands[0]) != REG)
5500 operands[0] = adjust_address (operands[0], HImode, 2);
5501 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
5502 if (operands[2] == const0_rtx)
5503 return "clr%.w %0";
5504 return "and%.w %2,%0";
5506 if (GET_CODE (operands[2]) == CONST_INT
5507 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
5508 && (DATA_REG_P (operands[0])
5509 || offsettable_memref_p (operands[0])))
5511 if (DATA_REG_P (operands[0]))
5512 operands[1] = GEN_INT (logval);
5513 else
5515 operands[0] = adjust_address (operands[0], QImode, 3 - (logval / 8));
5516 operands[1] = GEN_INT (logval % 8);
5518 return "bclr %1,%0";
5520 /* Only a standard logical operation on the whole word sets the
5521 condition codes in a way we can use. */
5522 if (!side_effects_p (operands[0]))
5523 flags_operand1 = operands[0];
5524 flags_valid = FLAGS_VALID_YES;
5525 return "and%.l %2,%0";
5528 const char *
5529 output_iorsi3 (rtx *operands)
5531 int logval;
5532 CC_STATUS_INIT;
5533 if (GET_CODE (operands[2]) == CONST_INT
5534 && INTVAL (operands[2]) >> 16 == 0
5535 && (DATA_REG_P (operands[0])
5536 || offsettable_memref_p (operands[0]))
5537 && !TARGET_COLDFIRE)
5539 if (GET_CODE (operands[0]) != REG)
5540 operands[0] = adjust_address (operands[0], HImode, 2);
5541 if (INTVAL (operands[2]) == 0xffff)
5542 return "mov%.w %2,%0";
5543 return "or%.w %2,%0";
5545 if (GET_CODE (operands[2]) == CONST_INT
5546 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5547 && (DATA_REG_P (operands[0])
5548 || offsettable_memref_p (operands[0])))
5550 if (DATA_REG_P (operands[0]))
5551 operands[1] = GEN_INT (logval);
5552 else
5554 operands[0] = adjust_address (operands[0], QImode, 3 - (logval / 8));
5555 operands[1] = GEN_INT (logval % 8);
5557 return "bset %1,%0";
5559 /* Only a standard logical operation on the whole word sets the
5560 condition codes in a way we can use. */
5561 if (!side_effects_p (operands[0]))
5562 flags_operand1 = operands[0];
5563 flags_valid = FLAGS_VALID_YES;
5564 return "or%.l %2,%0";
5567 const char *
5568 output_xorsi3 (rtx *operands)
5570 int logval;
5571 CC_STATUS_INIT;
5572 if (GET_CODE (operands[2]) == CONST_INT
5573 && INTVAL (operands[2]) >> 16 == 0
5574 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5575 && !TARGET_COLDFIRE)
5577 if (! DATA_REG_P (operands[0]))
5578 operands[0] = adjust_address (operands[0], HImode, 2);
5579 if (INTVAL (operands[2]) == 0xffff)
5580 return "not%.w %0";
5581 return "eor%.w %2,%0";
5583 if (GET_CODE (operands[2]) == CONST_INT
5584 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5585 && (DATA_REG_P (operands[0])
5586 || offsettable_memref_p (operands[0])))
5588 if (DATA_REG_P (operands[0]))
5589 operands[1] = GEN_INT (logval);
5590 else
5592 operands[0] = adjust_address (operands[0], QImode, 3 - (logval / 8));
5593 operands[1] = GEN_INT (logval % 8);
5595 return "bchg %1,%0";
5597 /* Only a standard logical operation on the whole word sets the
5598 condition codes in a way we can use. */
5599 if (!side_effects_p (operands[0]))
5600 flags_operand1 = operands[0];
5601 flags_valid = FLAGS_VALID_YES;
5602 return "eor%.l %2,%0";
5605 /* Return the instruction that should be used for a call to address X,
5606 which is known to be in operand 0. */
5608 const char *
5609 output_call (rtx x)
5611 if (symbolic_operand (x, VOIDmode))
5612 return m68k_symbolic_call;
5613 else
5614 return "jsr %a0";
5617 /* Likewise sibling calls. */
5619 const char *
5620 output_sibcall (rtx x)
5622 if (symbolic_operand (x, VOIDmode))
5623 return m68k_symbolic_jump;
5624 else
5625 return "jmp %a0";
5628 static void
5629 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5630 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5631 tree function)
5633 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk));
5634 rtx this_slot, offset, addr, mem, tmp;
5635 rtx_insn *insn;
5637 /* Avoid clobbering the struct value reg by using the
5638 static chain reg as a temporary. */
5639 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5641 /* Pretend to be a post-reload pass while generating rtl. */
5642 reload_completed = 1;
5644 /* The "this" pointer is stored at 4(%sp). */
5645 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5646 stack_pointer_rtx, 4));
5648 /* Add DELTA to THIS. */
5649 if (delta != 0)
5651 /* Make the offset a legitimate operand for memory addition. */
5652 offset = GEN_INT (delta);
5653 if ((delta < -8 || delta > 8)
5654 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5656 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5657 offset = gen_rtx_REG (Pmode, D0_REG);
5659 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5660 copy_rtx (this_slot), offset));
5663 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5664 if (vcall_offset != 0)
5666 /* Set the static chain register to *THIS. */
5667 emit_move_insn (tmp, this_slot);
5668 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5670 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5671 addr = plus_constant (Pmode, tmp, vcall_offset);
5672 if (!m68k_legitimate_address_p (Pmode, addr, true))
5674 emit_insn (gen_rtx_SET (tmp, addr));
5675 addr = tmp;
5678 /* Load the offset into %d0 and add it to THIS. */
5679 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5680 gen_rtx_MEM (Pmode, addr));
5681 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5682 copy_rtx (this_slot),
5683 gen_rtx_REG (Pmode, D0_REG)));
5686 /* Jump to the target function. Use a sibcall if direct jumps are
5687 allowed, otherwise load the address into a register first. */
5688 mem = DECL_RTL (function);
5689 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5691 gcc_assert (flag_pic);
5693 if (!TARGET_SEP_DATA)
5695 /* Use the static chain register as a temporary (call-clobbered)
5696 GOT pointer for this function. We can use the static chain
5697 register because it isn't live on entry to the thunk. */
5698 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5699 emit_insn (gen_load_got (pic_offset_table_rtx));
5701 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5702 mem = replace_equiv_address (mem, tmp);
5704 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5705 SIBLING_CALL_P (insn) = 1;
5707 /* Run just enough of rest_of_compilation. */
5708 insn = get_insns ();
5709 split_all_insns_noflow ();
5710 assemble_start_function (thunk, fnname);
5711 final_start_function (insn, file, 1);
5712 final (insn, file, 1);
5713 final_end_function ();
5714 assemble_end_function (thunk, fnname);
5716 /* Clean up the vars set above. */
5717 reload_completed = 0;
5719 /* Restore the original PIC register. */
5720 if (flag_pic)
5721 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5724 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5726 static rtx
5727 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5728 int incoming ATTRIBUTE_UNUSED)
5730 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5733 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5735 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5736 unsigned int new_reg)
5739 /* Interrupt functions can only use registers that have already been
5740 saved by the prologue, even if they would normally be
5741 call-clobbered. */
5743 if ((m68k_get_function_kind (current_function_decl)
5744 == m68k_fk_interrupt_handler)
5745 && !df_regs_ever_live_p (new_reg))
5746 return 0;
5748 return 1;
5751 /* Implement TARGET_HARD_REGNO_NREGS.
5753 On the m68k, ordinary registers hold 32 bits worth;
5754 for the 68881 registers, a single register is always enough for
5755 anything that can be stored in them at all. */
5757 static unsigned int
5758 m68k_hard_regno_nregs (unsigned int regno, machine_mode mode)
5760 if (regno >= 16)
5761 return GET_MODE_NUNITS (mode);
5762 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
5765 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5766 registers can hold any mode, but restrict the 68881 registers to
5767 floating-point modes. */
5769 static bool
5770 m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5772 if (DATA_REGNO_P (regno))
5774 /* Data Registers, can hold aggregate if fits in. */
5775 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5776 return true;
5778 else if (ADDRESS_REGNO_P (regno))
5780 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5781 return true;
5783 else if (FP_REGNO_P (regno))
5785 /* FPU registers, hold float or complex float of long double or
5786 smaller. */
5787 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5788 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5789 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5790 return true;
5792 return false;
5795 /* Implement TARGET_MODES_TIEABLE_P. */
5797 static bool
5798 m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5800 return (!TARGET_HARD_FLOAT
5801 || ((GET_MODE_CLASS (mode1) == MODE_FLOAT
5802 || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
5803 == (GET_MODE_CLASS (mode2) == MODE_FLOAT
5804 || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT)));
5807 /* Implement SECONDARY_RELOAD_CLASS. */
5809 enum reg_class
5810 m68k_secondary_reload_class (enum reg_class rclass,
5811 machine_mode mode, rtx x)
5813 int regno;
5815 regno = true_regnum (x);
5817 /* If one operand of a movqi is an address register, the other
5818 operand must be a general register or constant. Other types
5819 of operand must be reloaded through a data register. */
5820 if (GET_MODE_SIZE (mode) == 1
5821 && reg_classes_intersect_p (rclass, ADDR_REGS)
5822 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5823 return DATA_REGS;
5825 /* PC-relative addresses must be loaded into an address register first. */
5826 if (TARGET_PCREL
5827 && !reg_class_subset_p (rclass, ADDR_REGS)
5828 && symbolic_operand (x, VOIDmode))
5829 return ADDR_REGS;
5831 return NO_REGS;
5834 /* Implement PREFERRED_RELOAD_CLASS. */
5836 enum reg_class
5837 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5839 enum reg_class secondary_class;
5841 /* If RCLASS might need a secondary reload, try restricting it to
5842 a class that doesn't. */
5843 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5844 if (secondary_class != NO_REGS
5845 && reg_class_subset_p (secondary_class, rclass))
5846 return secondary_class;
5848 /* Prefer to use moveq for in-range constants. */
5849 if (GET_CODE (x) == CONST_INT
5850 && reg_class_subset_p (DATA_REGS, rclass)
5851 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5852 return DATA_REGS;
5854 /* ??? Do we really need this now? */
5855 if (GET_CODE (x) == CONST_DOUBLE
5856 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5858 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5859 return FP_REGS;
5861 return NO_REGS;
5864 return rclass;
5867 /* Return floating point values in a 68881 register. This makes 68881 code
5868 a little bit faster. It also makes -msoft-float code incompatible with
5869 hard-float code, so people have to be careful not to mix the two.
5870 For ColdFire it was decided the ABI incompatibility is undesirable.
5871 If there is need for a hard-float ABI it is probably worth doing it
5872 properly and also passing function arguments in FP registers. */
5874 m68k_libcall_value (machine_mode mode)
5876 switch (mode) {
5877 case E_SFmode:
5878 case E_DFmode:
5879 case E_XFmode:
5880 if (TARGET_68881)
5881 return gen_rtx_REG (mode, FP0_REG);
5882 break;
5883 default:
5884 break;
5887 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5890 /* Location in which function value is returned.
5891 NOTE: Due to differences in ABIs, don't call this function directly,
5892 use FUNCTION_VALUE instead. */
5894 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5896 machine_mode mode;
5898 mode = TYPE_MODE (valtype);
5899 switch (mode) {
5900 case E_SFmode:
5901 case E_DFmode:
5902 case E_XFmode:
5903 if (TARGET_68881)
5904 return gen_rtx_REG (mode, FP0_REG);
5905 break;
5906 default:
5907 break;
5910 /* If the function returns a pointer, push that into %a0. */
5911 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5912 /* For compatibility with the large body of existing code which
5913 does not always properly declare external functions returning
5914 pointer types, the m68k/SVR4 convention is to copy the value
5915 returned for pointer functions from a0 to d0 in the function
5916 epilogue, so that callers that have neglected to properly
5917 declare the callee can still find the correct return value in
5918 d0. */
5919 return gen_rtx_PARALLEL
5920 (mode,
5921 gen_rtvec (2,
5922 gen_rtx_EXPR_LIST (VOIDmode,
5923 gen_rtx_REG (mode, A0_REG),
5924 const0_rtx),
5925 gen_rtx_EXPR_LIST (VOIDmode,
5926 gen_rtx_REG (mode, D0_REG),
5927 const0_rtx)));
5928 else if (POINTER_TYPE_P (valtype))
5929 return gen_rtx_REG (mode, A0_REG);
5930 else
5931 return gen_rtx_REG (mode, D0_REG);
5934 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5935 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5936 static bool
5937 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5939 machine_mode mode = TYPE_MODE (type);
5941 if (mode == BLKmode)
5942 return true;
5944 /* If TYPE's known alignment is less than the alignment of MODE that
5945 would contain the structure, then return in memory. We need to
5946 do so to maintain the compatibility between code compiled with
5947 -mstrict-align and that compiled with -mno-strict-align. */
5948 if (AGGREGATE_TYPE_P (type)
5949 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5950 return true;
5952 return false;
5954 #endif
5956 /* CPU to schedule the program for. */
5957 enum attr_cpu m68k_sched_cpu;
5959 /* MAC to schedule the program for. */
5960 enum attr_mac m68k_sched_mac;
5962 /* Operand type. */
5963 enum attr_op_type
5965 /* No operand. */
5966 OP_TYPE_NONE,
5968 /* Integer register. */
5969 OP_TYPE_RN,
5971 /* FP register. */
5972 OP_TYPE_FPN,
5974 /* Implicit mem reference (e.g. stack). */
5975 OP_TYPE_MEM1,
5977 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5978 OP_TYPE_MEM234,
5980 /* Memory with offset but without indexing. EA mode 5. */
5981 OP_TYPE_MEM5,
5983 /* Memory with indexing. EA mode 6. */
5984 OP_TYPE_MEM6,
5986 /* Memory referenced by absolute address. EA mode 7. */
5987 OP_TYPE_MEM7,
5989 /* Immediate operand that doesn't require extension word. */
5990 OP_TYPE_IMM_Q,
5992 /* Immediate 16 bit operand. */
5993 OP_TYPE_IMM_W,
5995 /* Immediate 32 bit operand. */
5996 OP_TYPE_IMM_L
5999 /* Return type of memory ADDR_RTX refers to. */
6000 static enum attr_op_type
6001 sched_address_type (machine_mode mode, rtx addr_rtx)
6003 struct m68k_address address;
6005 if (symbolic_operand (addr_rtx, VOIDmode))
6006 return OP_TYPE_MEM7;
6008 if (!m68k_decompose_address (mode, addr_rtx,
6009 reload_completed, &address))
6011 gcc_assert (!reload_completed);
6012 /* Reload will likely fix the address to be in the register. */
6013 return OP_TYPE_MEM234;
6016 if (address.scale != 0)
6017 return OP_TYPE_MEM6;
6019 if (address.base != NULL_RTX)
6021 if (address.offset == NULL_RTX)
6022 return OP_TYPE_MEM234;
6024 return OP_TYPE_MEM5;
6027 gcc_assert (address.offset != NULL_RTX);
6029 return OP_TYPE_MEM7;
6032 /* Return X or Y (depending on OPX_P) operand of INSN. */
6033 static rtx
6034 sched_get_operand (rtx_insn *insn, bool opx_p)
6036 int i;
6038 if (recog_memoized (insn) < 0)
6039 gcc_unreachable ();
6041 extract_constrain_insn_cached (insn);
6043 if (opx_p)
6044 i = get_attr_opx (insn);
6045 else
6046 i = get_attr_opy (insn);
6048 if (i >= recog_data.n_operands)
6049 return NULL;
6051 return recog_data.operand[i];
6054 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
6055 If ADDRESS_P is true, return type of memory location operand refers to. */
6056 static enum attr_op_type
6057 sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
6059 rtx op;
6061 op = sched_get_operand (insn, opx_p);
6063 if (op == NULL)
6065 gcc_assert (!reload_completed);
6066 return OP_TYPE_RN;
6069 if (address_p)
6070 return sched_address_type (QImode, op);
6072 if (memory_operand (op, VOIDmode))
6073 return sched_address_type (GET_MODE (op), XEXP (op, 0));
6075 if (register_operand (op, VOIDmode))
6077 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
6078 || (reload_completed && FP_REG_P (op)))
6079 return OP_TYPE_FPN;
6081 return OP_TYPE_RN;
6084 if (GET_CODE (op) == CONST_INT)
6086 int ival;
6088 ival = INTVAL (op);
6090 /* Check for quick constants. */
6091 switch (get_attr_type (insn))
6093 case TYPE_ALUQ_L:
6094 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
6095 return OP_TYPE_IMM_Q;
6097 gcc_assert (!reload_completed);
6098 break;
6100 case TYPE_MOVEQ_L:
6101 if (USE_MOVQ (ival))
6102 return OP_TYPE_IMM_Q;
6104 gcc_assert (!reload_completed);
6105 break;
6107 case TYPE_MOV3Q_L:
6108 if (valid_mov3q_const (ival))
6109 return OP_TYPE_IMM_Q;
6111 gcc_assert (!reload_completed);
6112 break;
6114 default:
6115 break;
6118 if (IN_RANGE (ival, -0x8000, 0x7fff))
6119 return OP_TYPE_IMM_W;
6121 return OP_TYPE_IMM_L;
6124 if (GET_CODE (op) == CONST_DOUBLE)
6126 switch (GET_MODE (op))
6128 case E_SFmode:
6129 return OP_TYPE_IMM_W;
6131 case E_VOIDmode:
6132 case E_DFmode:
6133 return OP_TYPE_IMM_L;
6135 default:
6136 gcc_unreachable ();
6140 if (GET_CODE (op) == CONST
6141 || symbolic_operand (op, VOIDmode)
6142 || LABEL_P (op))
6144 switch (GET_MODE (op))
6146 case E_QImode:
6147 return OP_TYPE_IMM_Q;
6149 case E_HImode:
6150 return OP_TYPE_IMM_W;
6152 case E_SImode:
6153 return OP_TYPE_IMM_L;
6155 default:
6156 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
6157 /* Just a guess. */
6158 return OP_TYPE_IMM_W;
6160 return OP_TYPE_IMM_L;
6164 gcc_assert (!reload_completed);
6166 if (FLOAT_MODE_P (GET_MODE (op)))
6167 return OP_TYPE_FPN;
6169 return OP_TYPE_RN;
6172 /* Implement opx_type attribute.
6173 Return type of INSN's operand X.
6174 If ADDRESS_P is true, return type of memory location operand refers to. */
6175 enum attr_opx_type
6176 m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
6178 switch (sched_attr_op_type (insn, true, address_p != 0))
6180 case OP_TYPE_RN:
6181 return OPX_TYPE_RN;
6183 case OP_TYPE_FPN:
6184 return OPX_TYPE_FPN;
6186 case OP_TYPE_MEM1:
6187 return OPX_TYPE_MEM1;
6189 case OP_TYPE_MEM234:
6190 return OPX_TYPE_MEM234;
6192 case OP_TYPE_MEM5:
6193 return OPX_TYPE_MEM5;
6195 case OP_TYPE_MEM6:
6196 return OPX_TYPE_MEM6;
6198 case OP_TYPE_MEM7:
6199 return OPX_TYPE_MEM7;
6201 case OP_TYPE_IMM_Q:
6202 return OPX_TYPE_IMM_Q;
6204 case OP_TYPE_IMM_W:
6205 return OPX_TYPE_IMM_W;
6207 case OP_TYPE_IMM_L:
6208 return OPX_TYPE_IMM_L;
6210 default:
6211 gcc_unreachable ();
6215 /* Implement opy_type attribute.
6216 Return type of INSN's operand Y.
6217 If ADDRESS_P is true, return type of memory location operand refers to. */
6218 enum attr_opy_type
6219 m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
6221 switch (sched_attr_op_type (insn, false, address_p != 0))
6223 case OP_TYPE_RN:
6224 return OPY_TYPE_RN;
6226 case OP_TYPE_FPN:
6227 return OPY_TYPE_FPN;
6229 case OP_TYPE_MEM1:
6230 return OPY_TYPE_MEM1;
6232 case OP_TYPE_MEM234:
6233 return OPY_TYPE_MEM234;
6235 case OP_TYPE_MEM5:
6236 return OPY_TYPE_MEM5;
6238 case OP_TYPE_MEM6:
6239 return OPY_TYPE_MEM6;
6241 case OP_TYPE_MEM7:
6242 return OPY_TYPE_MEM7;
6244 case OP_TYPE_IMM_Q:
6245 return OPY_TYPE_IMM_Q;
6247 case OP_TYPE_IMM_W:
6248 return OPY_TYPE_IMM_W;
6250 case OP_TYPE_IMM_L:
6251 return OPY_TYPE_IMM_L;
6253 default:
6254 gcc_unreachable ();
6258 /* Return size of INSN as int. */
6259 static int
6260 sched_get_attr_size_int (rtx_insn *insn)
6262 int size;
6264 switch (get_attr_type (insn))
6266 case TYPE_IGNORE:
6267 /* There should be no references to m68k_sched_attr_size for 'ignore'
6268 instructions. */
6269 gcc_unreachable ();
6270 return 0;
6272 case TYPE_MUL_L:
6273 size = 2;
6274 break;
6276 default:
6277 size = 1;
6278 break;
6281 switch (get_attr_opx_type (insn))
6283 case OPX_TYPE_NONE:
6284 case OPX_TYPE_RN:
6285 case OPX_TYPE_FPN:
6286 case OPX_TYPE_MEM1:
6287 case OPX_TYPE_MEM234:
6288 case OPY_TYPE_IMM_Q:
6289 break;
6291 case OPX_TYPE_MEM5:
6292 case OPX_TYPE_MEM6:
6293 /* Here we assume that most absolute references are short. */
6294 case OPX_TYPE_MEM7:
6295 case OPY_TYPE_IMM_W:
6296 ++size;
6297 break;
6299 case OPY_TYPE_IMM_L:
6300 size += 2;
6301 break;
6303 default:
6304 gcc_unreachable ();
6307 switch (get_attr_opy_type (insn))
6309 case OPY_TYPE_NONE:
6310 case OPY_TYPE_RN:
6311 case OPY_TYPE_FPN:
6312 case OPY_TYPE_MEM1:
6313 case OPY_TYPE_MEM234:
6314 case OPY_TYPE_IMM_Q:
6315 break;
6317 case OPY_TYPE_MEM5:
6318 case OPY_TYPE_MEM6:
6319 /* Here we assume that most absolute references are short. */
6320 case OPY_TYPE_MEM7:
6321 case OPY_TYPE_IMM_W:
6322 ++size;
6323 break;
6325 case OPY_TYPE_IMM_L:
6326 size += 2;
6327 break;
6329 default:
6330 gcc_unreachable ();
6333 if (size > 3)
6335 gcc_assert (!reload_completed);
6337 size = 3;
6340 return size;
6343 /* Return size of INSN as attribute enum value. */
6344 enum attr_size
6345 m68k_sched_attr_size (rtx_insn *insn)
6347 switch (sched_get_attr_size_int (insn))
6349 case 1:
6350 return SIZE_1;
6352 case 2:
6353 return SIZE_2;
6355 case 3:
6356 return SIZE_3;
6358 default:
6359 gcc_unreachable ();
6363 /* Return operand X or Y (depending on OPX_P) of INSN,
6364 if it is a MEM, or NULL overwise. */
6365 static enum attr_op_type
6366 sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
6368 if (opx_p)
6370 switch (get_attr_opx_type (insn))
6372 case OPX_TYPE_NONE:
6373 case OPX_TYPE_RN:
6374 case OPX_TYPE_FPN:
6375 case OPX_TYPE_IMM_Q:
6376 case OPX_TYPE_IMM_W:
6377 case OPX_TYPE_IMM_L:
6378 return OP_TYPE_RN;
6380 case OPX_TYPE_MEM1:
6381 case OPX_TYPE_MEM234:
6382 case OPX_TYPE_MEM5:
6383 case OPX_TYPE_MEM7:
6384 return OP_TYPE_MEM1;
6386 case OPX_TYPE_MEM6:
6387 return OP_TYPE_MEM6;
6389 default:
6390 gcc_unreachable ();
6393 else
6395 switch (get_attr_opy_type (insn))
6397 case OPY_TYPE_NONE:
6398 case OPY_TYPE_RN:
6399 case OPY_TYPE_FPN:
6400 case OPY_TYPE_IMM_Q:
6401 case OPY_TYPE_IMM_W:
6402 case OPY_TYPE_IMM_L:
6403 return OP_TYPE_RN;
6405 case OPY_TYPE_MEM1:
6406 case OPY_TYPE_MEM234:
6407 case OPY_TYPE_MEM5:
6408 case OPY_TYPE_MEM7:
6409 return OP_TYPE_MEM1;
6411 case OPY_TYPE_MEM6:
6412 return OP_TYPE_MEM6;
6414 default:
6415 gcc_unreachable ();
6420 /* Implement op_mem attribute. */
6421 enum attr_op_mem
6422 m68k_sched_attr_op_mem (rtx_insn *insn)
6424 enum attr_op_type opx;
6425 enum attr_op_type opy;
6427 opx = sched_get_opxy_mem_type (insn, true);
6428 opy = sched_get_opxy_mem_type (insn, false);
6430 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
6431 return OP_MEM_00;
6433 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
6435 switch (get_attr_opx_access (insn))
6437 case OPX_ACCESS_R:
6438 return OP_MEM_10;
6440 case OPX_ACCESS_W:
6441 return OP_MEM_01;
6443 case OPX_ACCESS_RW:
6444 return OP_MEM_11;
6446 default:
6447 gcc_unreachable ();
6451 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
6453 switch (get_attr_opx_access (insn))
6455 case OPX_ACCESS_R:
6456 return OP_MEM_I0;
6458 case OPX_ACCESS_W:
6459 return OP_MEM_0I;
6461 case OPX_ACCESS_RW:
6462 return OP_MEM_I1;
6464 default:
6465 gcc_unreachable ();
6469 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
6470 return OP_MEM_10;
6472 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
6474 switch (get_attr_opx_access (insn))
6476 case OPX_ACCESS_W:
6477 return OP_MEM_11;
6479 default:
6480 gcc_assert (!reload_completed);
6481 return OP_MEM_11;
6485 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
6487 switch (get_attr_opx_access (insn))
6489 case OPX_ACCESS_W:
6490 return OP_MEM_1I;
6492 default:
6493 gcc_assert (!reload_completed);
6494 return OP_MEM_1I;
6498 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
6499 return OP_MEM_I0;
6501 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
6503 switch (get_attr_opx_access (insn))
6505 case OPX_ACCESS_W:
6506 return OP_MEM_I1;
6508 default:
6509 gcc_assert (!reload_completed);
6510 return OP_MEM_I1;
6514 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
6515 gcc_assert (!reload_completed);
6516 return OP_MEM_I1;
6519 /* Data for ColdFire V4 index bypass.
6520 Producer modifies register that is used as index in consumer with
6521 specified scale. */
6522 static struct
6524 /* Producer instruction. */
6525 rtx pro;
6527 /* Consumer instruction. */
6528 rtx con;
6530 /* Scale of indexed memory access within consumer.
6531 Or zero if bypass should not be effective at the moment. */
6532 int scale;
6533 } sched_cfv4_bypass_data;
6535 /* An empty state that is used in m68k_sched_adjust_cost. */
6536 static state_t sched_adjust_cost_state;
6538 /* Implement adjust_cost scheduler hook.
6539 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
6540 static int
6541 m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
6542 unsigned int)
6544 int delay;
6546 if (recog_memoized (def_insn) < 0
6547 || recog_memoized (insn) < 0)
6548 return cost;
6550 if (sched_cfv4_bypass_data.scale == 1)
6551 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
6553 /* haifa-sched.cc: insn_cost () calls bypass_p () just before
6554 targetm.sched.adjust_cost (). Hence, we can be relatively sure
6555 that the data in sched_cfv4_bypass_data is up to date. */
6556 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
6557 && sched_cfv4_bypass_data.con == insn);
6559 if (cost < 3)
6560 cost = 3;
6562 sched_cfv4_bypass_data.pro = NULL;
6563 sched_cfv4_bypass_data.con = NULL;
6564 sched_cfv4_bypass_data.scale = 0;
6566 else
6567 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6568 && sched_cfv4_bypass_data.con == NULL
6569 && sched_cfv4_bypass_data.scale == 0);
6571 /* Don't try to issue INSN earlier than DFA permits.
6572 This is especially useful for instructions that write to memory,
6573 as their true dependence (default) latency is better to be set to 0
6574 to workaround alias analysis limitations.
6575 This is, in fact, a machine independent tweak, so, probably,
6576 it should be moved to haifa-sched.cc: insn_cost (). */
6577 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6578 if (delay > cost)
6579 cost = delay;
6581 return cost;
6584 /* Return maximal number of insns that can be scheduled on a single cycle. */
6585 static int
6586 m68k_sched_issue_rate (void)
6588 switch (m68k_sched_cpu)
6590 case CPU_CFV1:
6591 case CPU_CFV2:
6592 case CPU_CFV3:
6593 return 1;
6595 case CPU_CFV4:
6596 return 2;
6598 default:
6599 gcc_unreachable ();
6600 return 0;
6604 /* Maximal length of instruction for current CPU.
6605 E.g. it is 3 for any ColdFire core. */
6606 static int max_insn_size;
6608 /* Data to model instruction buffer of CPU. */
6609 struct _sched_ib
6611 /* True if instruction buffer model is modeled for current CPU. */
6612 bool enabled_p;
6614 /* Size of the instruction buffer in words. */
6615 int size;
6617 /* Number of filled words in the instruction buffer. */
6618 int filled;
6620 /* Additional information about instruction buffer for CPUs that have
6621 a buffer of instruction records, rather then a plain buffer
6622 of instruction words. */
6623 struct _sched_ib_records
6625 /* Size of buffer in records. */
6626 int n_insns;
6628 /* Array to hold data on adjustments made to the size of the buffer. */
6629 int *adjust;
6631 /* Index of the above array. */
6632 int adjust_index;
6633 } records;
6635 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6636 rtx insn;
6639 static struct _sched_ib sched_ib;
6641 /* ID of memory unit. */
6642 static int sched_mem_unit_code;
6644 /* Implementation of the targetm.sched.variable_issue () hook.
6645 It is called after INSN was issued. It returns the number of insns
6646 that can possibly get scheduled on the current cycle.
6647 It is used here to determine the effect of INSN on the instruction
6648 buffer. */
6649 static int
6650 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6651 int sched_verbose ATTRIBUTE_UNUSED,
6652 rtx_insn *insn, int can_issue_more)
6654 int insn_size;
6656 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6658 switch (m68k_sched_cpu)
6660 case CPU_CFV1:
6661 case CPU_CFV2:
6662 insn_size = sched_get_attr_size_int (insn);
6663 break;
6665 case CPU_CFV3:
6666 insn_size = sched_get_attr_size_int (insn);
6668 /* ColdFire V3 and V4 cores have instruction buffers that can
6669 accumulate up to 8 instructions regardless of instructions'
6670 sizes. So we should take care not to "prefetch" 24 one-word
6671 or 12 two-words instructions.
6672 To model this behavior we temporarily decrease size of the
6673 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6675 int adjust;
6677 adjust = max_insn_size - insn_size;
6678 sched_ib.size -= adjust;
6680 if (sched_ib.filled > sched_ib.size)
6681 sched_ib.filled = sched_ib.size;
6683 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6686 ++sched_ib.records.adjust_index;
6687 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6688 sched_ib.records.adjust_index = 0;
6690 /* Undo adjustment we did 7 instructions ago. */
6691 sched_ib.size
6692 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6694 break;
6696 case CPU_CFV4:
6697 gcc_assert (!sched_ib.enabled_p);
6698 insn_size = 0;
6699 break;
6701 default:
6702 gcc_unreachable ();
6705 if (insn_size > sched_ib.filled)
6706 /* Scheduling for register pressure does not always take DFA into
6707 account. Workaround instruction buffer not being filled enough. */
6709 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6710 insn_size = sched_ib.filled;
6713 --can_issue_more;
6715 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6716 || asm_noperands (PATTERN (insn)) >= 0)
6717 insn_size = sched_ib.filled;
6718 else
6719 insn_size = 0;
6721 sched_ib.filled -= insn_size;
6723 return can_issue_more;
6726 /* Return how many instructions should scheduler lookahead to choose the
6727 best one. */
6728 static int
6729 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6731 return m68k_sched_issue_rate () - 1;
6734 /* Implementation of targetm.sched.init_global () hook.
6735 It is invoked once per scheduling pass and is used here
6736 to initialize scheduler constants. */
6737 static void
6738 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6739 int sched_verbose ATTRIBUTE_UNUSED,
6740 int n_insns ATTRIBUTE_UNUSED)
6742 /* Check that all instructions have DFA reservations and
6743 that all instructions can be issued from a clean state. */
6744 if (flag_checking)
6746 rtx_insn *insn;
6747 state_t state;
6749 state = alloca (state_size ());
6751 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6753 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6755 gcc_assert (insn_has_dfa_reservation_p (insn));
6757 state_reset (state);
6758 if (state_transition (state, insn) >= 0)
6759 gcc_unreachable ();
6764 /* Setup target cpu. */
6766 /* ColdFire V4 has a set of features to keep its instruction buffer full
6767 (e.g., a separate memory bus for instructions) and, hence, we do not model
6768 buffer for this CPU. */
6769 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6771 switch (m68k_sched_cpu)
6773 case CPU_CFV4:
6774 sched_ib.filled = 0;
6776 /* FALLTHRU */
6778 case CPU_CFV1:
6779 case CPU_CFV2:
6780 max_insn_size = 3;
6781 sched_ib.records.n_insns = 0;
6782 sched_ib.records.adjust = NULL;
6783 break;
6785 case CPU_CFV3:
6786 max_insn_size = 3;
6787 sched_ib.records.n_insns = 8;
6788 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6789 break;
6791 default:
6792 gcc_unreachable ();
6795 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6797 sched_adjust_cost_state = xmalloc (state_size ());
6798 state_reset (sched_adjust_cost_state);
6800 start_sequence ();
6801 emit_insn (gen_ib ());
6802 sched_ib.insn = get_insns ();
6803 end_sequence ();
6806 /* Scheduling pass is now finished. Free/reset static variables. */
6807 static void
6808 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6809 int verbose ATTRIBUTE_UNUSED)
6811 sched_ib.insn = NULL;
6813 free (sched_adjust_cost_state);
6814 sched_adjust_cost_state = NULL;
6816 sched_mem_unit_code = 0;
6818 free (sched_ib.records.adjust);
6819 sched_ib.records.adjust = NULL;
6820 sched_ib.records.n_insns = 0;
6821 max_insn_size = 0;
6824 /* Implementation of targetm.sched.init () hook.
6825 It is invoked each time scheduler starts on the new block (basic block or
6826 extended basic block). */
6827 static void
6828 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6829 int sched_verbose ATTRIBUTE_UNUSED,
6830 int n_insns ATTRIBUTE_UNUSED)
6832 switch (m68k_sched_cpu)
6834 case CPU_CFV1:
6835 case CPU_CFV2:
6836 sched_ib.size = 6;
6837 break;
6839 case CPU_CFV3:
6840 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6842 memset (sched_ib.records.adjust, 0,
6843 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6844 sched_ib.records.adjust_index = 0;
6845 break;
6847 case CPU_CFV4:
6848 gcc_assert (!sched_ib.enabled_p);
6849 sched_ib.size = 0;
6850 break;
6852 default:
6853 gcc_unreachable ();
6856 if (sched_ib.enabled_p)
6857 /* haifa-sched.cc: schedule_block () calls advance_cycle () just before
6858 the first cycle. Workaround that. */
6859 sched_ib.filled = -2;
6862 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6863 It is invoked just before current cycle finishes and is used here
6864 to track if instruction buffer got its two words this cycle. */
6865 static void
6866 m68k_sched_dfa_pre_advance_cycle (void)
6868 if (!sched_ib.enabled_p)
6869 return;
6871 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6873 sched_ib.filled += 2;
6875 if (sched_ib.filled > sched_ib.size)
6876 sched_ib.filled = sched_ib.size;
6880 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6881 It is invoked just after new cycle begins and is used here
6882 to setup number of filled words in the instruction buffer so that
6883 instructions which won't have all their words prefetched would be
6884 stalled for a cycle. */
6885 static void
6886 m68k_sched_dfa_post_advance_cycle (void)
6888 int i;
6890 if (!sched_ib.enabled_p)
6891 return;
6893 /* Setup number of prefetched instruction words in the instruction
6894 buffer. */
6895 i = max_insn_size - sched_ib.filled;
6897 while (--i >= 0)
6899 if (state_transition (curr_state, sched_ib.insn) >= 0)
6900 /* Pick up scheduler state. */
6901 ++sched_ib.filled;
6905 /* Return X or Y (depending on OPX_P) operand of INSN,
6906 if it is an integer register, or NULL overwise. */
6907 static rtx
6908 sched_get_reg_operand (rtx_insn *insn, bool opx_p)
6910 rtx op = NULL;
6912 if (opx_p)
6914 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6916 op = sched_get_operand (insn, true);
6917 gcc_assert (op != NULL);
6919 if (!reload_completed && !REG_P (op))
6920 return NULL;
6923 else
6925 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6927 op = sched_get_operand (insn, false);
6928 gcc_assert (op != NULL);
6930 if (!reload_completed && !REG_P (op))
6931 return NULL;
6935 return op;
6938 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6939 is a MEM. */
6940 static bool
6941 sched_mem_operand_p (rtx_insn *insn, bool opx_p)
6943 switch (sched_get_opxy_mem_type (insn, opx_p))
6945 case OP_TYPE_MEM1:
6946 case OP_TYPE_MEM6:
6947 return true;
6949 default:
6950 return false;
6954 /* Return X or Y (depending on OPX_P) operand of INSN,
6955 if it is a MEM, or NULL overwise. */
6956 static rtx
6957 sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
6959 bool opx_p;
6960 bool opy_p;
6962 opx_p = false;
6963 opy_p = false;
6965 if (must_read_p)
6967 opx_p = true;
6968 opy_p = true;
6971 if (must_write_p)
6973 opx_p = true;
6974 opy_p = false;
6977 if (opy_p && sched_mem_operand_p (insn, false))
6978 return sched_get_operand (insn, false);
6980 if (opx_p && sched_mem_operand_p (insn, true))
6981 return sched_get_operand (insn, true);
6983 gcc_unreachable ();
6984 return NULL;
6987 /* Return non-zero if PRO modifies register used as part of
6988 address in CON. */
6990 m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6992 rtx pro_x;
6993 rtx con_mem_read;
6995 pro_x = sched_get_reg_operand (pro, true);
6996 if (pro_x == NULL)
6997 return 0;
6999 con_mem_read = sched_get_mem_operand (con, true, false);
7000 gcc_assert (con_mem_read != NULL);
7002 if (reg_mentioned_p (pro_x, con_mem_read))
7003 return 1;
7005 return 0;
7008 /* Helper function for m68k_sched_indexed_address_bypass_p.
7009 if PRO modifies register used as index in CON,
7010 return scale of indexed memory access in CON. Return zero overwise. */
7011 static int
7012 sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
7014 rtx reg;
7015 rtx mem;
7016 struct m68k_address address;
7018 reg = sched_get_reg_operand (pro, true);
7019 if (reg == NULL)
7020 return 0;
7022 mem = sched_get_mem_operand (con, true, false);
7023 gcc_assert (mem != NULL && MEM_P (mem));
7025 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
7026 &address))
7027 gcc_unreachable ();
7029 if (REGNO (reg) == REGNO (address.index))
7031 gcc_assert (address.scale != 0);
7032 return address.scale;
7035 return 0;
7038 /* Return non-zero if PRO modifies register used
7039 as index with scale 2 or 4 in CON. */
7041 m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
7043 gcc_assert (sched_cfv4_bypass_data.pro == NULL
7044 && sched_cfv4_bypass_data.con == NULL
7045 && sched_cfv4_bypass_data.scale == 0);
7047 switch (sched_get_indexed_address_scale (pro, con))
7049 case 1:
7050 /* We can't have a variable latency bypass, so
7051 remember to adjust the insn cost in adjust_cost hook. */
7052 sched_cfv4_bypass_data.pro = pro;
7053 sched_cfv4_bypass_data.con = con;
7054 sched_cfv4_bypass_data.scale = 1;
7055 return 0;
7057 case 2:
7058 case 4:
7059 return 1;
7061 default:
7062 return 0;
7066 /* We generate a two-instructions program at M_TRAMP :
7067 movea.l &CHAIN_VALUE,%a0
7068 jmp FNADDR
7069 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
7071 static void
7072 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
7074 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
7075 rtx mem;
7077 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
7079 mem = adjust_address (m_tramp, HImode, 0);
7080 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
7081 mem = adjust_address (m_tramp, SImode, 2);
7082 emit_move_insn (mem, chain_value);
7084 mem = adjust_address (m_tramp, HImode, 6);
7085 emit_move_insn (mem, GEN_INT(0x4EF9));
7086 mem = adjust_address (m_tramp, SImode, 8);
7087 emit_move_insn (mem, fnaddr);
7089 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
7092 /* On the 68000, the RTS insn cannot pop anything.
7093 On the 68010, the RTD insn may be used to pop them if the number
7094 of args is fixed, but if the number is variable then the caller
7095 must pop them all. RTD can't be used for library calls now
7096 because the library is compiled with the Unix compiler.
7097 Use of RTD is a selectable option, since it is incompatible with
7098 standard Unix calling sequences. If the option is not selected,
7099 the caller must always pop the args. */
7101 static poly_int64
7102 m68k_return_pops_args (tree fundecl, tree funtype, poly_int64 size)
7104 return ((TARGET_RTD
7105 && (!fundecl
7106 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
7107 && (!stdarg_p (funtype)))
7108 ? (HOST_WIDE_INT) size : 0);
7111 /* Make sure everything's fine if we *don't* have a given processor.
7112 This assumes that putting a register in fixed_regs will keep the
7113 compiler's mitts completely off it. We don't bother to zero it out
7114 of register classes. */
7116 static void
7117 m68k_conditional_register_usage (void)
7119 int i;
7120 HARD_REG_SET x;
7121 if (!TARGET_HARD_FLOAT)
7123 x = reg_class_contents[FP_REGS];
7124 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7125 if (TEST_HARD_REG_BIT (x, i))
7126 fixed_regs[i] = call_used_regs[i] = 1;
7128 if (flag_pic)
7129 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
7132 static void
7133 m68k_init_sync_libfuncs (void)
7135 init_sync_libfuncs (UNITS_PER_WORD);
7138 /* Implements EPILOGUE_USES. All registers are live on exit from an
7139 interrupt routine. */
7140 bool
7141 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
7143 return (reload_completed
7144 && (m68k_get_function_kind (current_function_decl)
7145 == m68k_fk_interrupt_handler));
7149 /* Implement TARGET_C_EXCESS_PRECISION.
7151 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
7152 instructions, we get proper intermediate rounding, otherwise we
7153 get extended precision results. */
7155 static enum flt_eval_method
7156 m68k_excess_precision (enum excess_precision_type type)
7158 switch (type)
7160 case EXCESS_PRECISION_TYPE_FAST:
7161 /* The fastest type to promote to will always be the native type,
7162 whether that occurs with implicit excess precision or
7163 otherwise. */
7164 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
7165 case EXCESS_PRECISION_TYPE_STANDARD:
7166 case EXCESS_PRECISION_TYPE_IMPLICIT:
7167 /* Otherwise, the excess precision we want when we are
7168 in a standards compliant mode, and the implicit precision we
7169 provide can be identical. */
7170 if (TARGET_68040 || ! TARGET_68881)
7171 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
7173 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE;
7174 case EXCESS_PRECISION_TYPE_FLOAT16:
7175 error ("%<-fexcess-precision=16%> is not supported on this target");
7176 break;
7177 default:
7178 gcc_unreachable ();
7180 return FLT_EVAL_METHOD_UNPREDICTABLE;
7183 /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes
7184 a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */
7186 poly_int64
7187 m68k_push_rounding (poly_int64 bytes)
7189 if (TARGET_COLDFIRE)
7190 return bytes;
7191 return (bytes + 1) & ~1;
7194 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
7196 static machine_mode
7197 m68k_promote_function_mode (const_tree type, machine_mode mode,
7198 int *punsignedp ATTRIBUTE_UNUSED,
7199 const_tree fntype ATTRIBUTE_UNUSED,
7200 int for_return)
7202 /* Promote libcall arguments narrower than int to match the normal C
7203 ABI (for which promotions are handled via
7204 TARGET_PROMOTE_PROTOTYPES). */
7205 if (type == NULL_TREE && !for_return && (mode == QImode || mode == HImode))
7206 return SImode;
7207 return mode;
7210 /* Implement TARGET_ZERO_CALL_USED_REGS. */
7212 static HARD_REG_SET
7213 m68k_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
7215 rtx zero_fpreg = NULL_RTX;
7217 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7218 if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
7220 rtx reg, zero;
7222 if (INT_REGNO_P (regno))
7224 reg = regno_reg_rtx[regno];
7225 zero = CONST0_RTX (SImode);
7227 else if (FP_REGNO_P (regno))
7229 reg = gen_raw_REG (SFmode, regno);
7230 if (zero_fpreg == NULL_RTX)
7232 /* On the 040/060 clearing an FP reg loads a large
7233 immediate. To reduce code size use the first
7234 cleared FP reg to clear remaining ones. Don't do
7235 this on cores which use fmovecr. */
7236 zero = CONST0_RTX (SFmode);
7237 if (TUNE_68040_60)
7238 zero_fpreg = reg;
7240 else
7241 zero = zero_fpreg;
7243 else
7244 gcc_unreachable ();
7246 emit_move_insn (reg, zero);
7249 return need_zeroed_hardregs;
7252 /* Implement TARGET_C_MODE_FOR_FLOATING_TYPE. Return XFmode or DFmode
7253 for TI_LONG_DOUBLE_TYPE which is for long double type, go with the
7254 default one for the others. */
7256 static machine_mode
7257 m68k_c_mode_for_floating_type (enum tree_index ti)
7259 if (ti == TI_LONG_DOUBLE_TYPE)
7260 return LONG_DOUBLE_TYPE_MODE;
7261 return default_mode_for_floating_type (ti);
7264 /* Implement TARGET_LRA_P. */
7266 static bool
7267 m68k_use_lra_p ()
7269 return m68k_lra_p;
7272 #include "gt-m68k.h"