libcpp, c, middle-end: Optimize initializers using #embed in C
[official-gcc.git] / gcc / config / m68k / m68k.cc
blob729a1e8875d9681b43edd77d8c9005687c5461c3
1 /* Subroutines for insn-output.cc for Motorola 68000 family.
2 Copyright (C) 1987-2024 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #define IN_TARGET_CODE 1
22 #include "config.h"
23 #define INCLUDE_STRING
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "cfghooks.h"
28 #include "tree.h"
29 #include "stringpool.h"
30 #include "attribs.h"
31 #include "rtl.h"
32 #include "df.h"
33 #include "alias.h"
34 #include "fold-const.h"
35 #include "calls.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "regs.h"
39 #include "insn-config.h"
40 #include "conditions.h"
41 #include "output.h"
42 #include "insn-attr.h"
43 #include "recog.h"
44 #include "diagnostic-core.h"
45 #include "flags.h"
46 #include "expmed.h"
47 #include "dojump.h"
48 #include "explow.h"
49 #include "memmodel.h"
50 #include "emit-rtl.h"
51 #include "stmt.h"
52 #include "expr.h"
53 #include "reload.h"
54 #include "tm_p.h"
55 #include "target.h"
56 #include "debug.h"
57 #include "cfgrtl.h"
58 #include "cfganal.h"
59 #include "lcm.h"
60 #include "cfgbuild.h"
61 #include "cfgcleanup.h"
62 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
63 #include "sched-int.h"
64 #include "insn-codes.h"
65 #include "opts.h"
66 #include "optabs.h"
67 #include "builtins.h"
68 #include "rtl-iter.h"
69 #include "toplev.h"
71 /* This file should be included last. */
72 #include "target-def.h"
74 enum reg_class regno_reg_class[] =
76 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
77 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
78 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
79 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
80 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
81 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
82 ADDR_REGS
86 /* The minimum number of integer registers that we want to save with the
87 movem instruction. Using two movel instructions instead of a single
88 moveml is about 15% faster for the 68020 and 68030 at no expense in
89 code size. */
90 #define MIN_MOVEM_REGS 3
92 /* The minimum number of floating point registers that we want to save
93 with the fmovem instruction. */
94 #define MIN_FMOVEM_REGS 1
96 /* Structure describing stack frame layout. */
97 struct m68k_frame
99 /* Stack pointer to frame pointer offset. */
100 HOST_WIDE_INT offset;
102 /* Offset of FPU registers. */
103 HOST_WIDE_INT foffset;
105 /* Frame size in bytes (rounded up). */
106 HOST_WIDE_INT size;
108 /* Data and address register. */
109 int reg_no;
110 unsigned int reg_mask;
112 /* FPU registers. */
113 int fpu_no;
114 unsigned int fpu_mask;
116 /* Offsets relative to ARG_POINTER. */
117 HOST_WIDE_INT frame_pointer_offset;
118 HOST_WIDE_INT stack_pointer_offset;
120 /* Function which the above information refers to. */
121 int funcdef_no;
124 /* Current frame information calculated by m68k_compute_frame_layout(). */
125 static struct m68k_frame current_frame;
127 /* Structure describing an m68k address.
129 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
130 with null fields evaluating to 0. Here:
132 - BASE satisfies m68k_legitimate_base_reg_p
133 - INDEX satisfies m68k_legitimate_index_reg_p
134 - OFFSET satisfies m68k_legitimate_constant_address_p
136 INDEX is either HImode or SImode. The other fields are SImode.
138 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
139 the address is (BASE)+. */
140 struct m68k_address {
141 enum rtx_code code;
142 rtx base;
143 rtx index;
144 rtx offset;
145 int scale;
148 static int m68k_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int,
149 unsigned int);
150 static int m68k_sched_issue_rate (void);
151 static int m68k_sched_variable_issue (FILE *, int, rtx_insn *, int);
152 static void m68k_sched_md_init_global (FILE *, int, int);
153 static void m68k_sched_md_finish_global (FILE *, int);
154 static void m68k_sched_md_init (FILE *, int, int);
155 static void m68k_sched_dfa_pre_advance_cycle (void);
156 static void m68k_sched_dfa_post_advance_cycle (void);
157 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
159 static bool m68k_can_eliminate (const int, const int);
160 static void m68k_conditional_register_usage (void);
161 static bool m68k_legitimate_address_p (machine_mode, rtx, bool,
162 code_helper = ERROR_MARK);
163 static void m68k_option_override (void);
164 static void m68k_override_options_after_change (void);
165 static rtx find_addr_reg (rtx);
166 static const char *singlemove_string (rtx *);
167 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
168 HOST_WIDE_INT, tree);
169 static rtx m68k_struct_value_rtx (tree, int);
170 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
171 tree args, int flags,
172 bool *no_add_attrs);
173 static void m68k_compute_frame_layout (void);
174 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
175 static bool m68k_ok_for_sibcall_p (tree, tree);
176 static bool m68k_tls_symbol_p (rtx);
177 static rtx m68k_legitimize_address (rtx, rtx, machine_mode);
178 static bool m68k_rtx_costs (rtx, machine_mode, int, int, int *, bool);
179 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
180 static bool m68k_return_in_memory (const_tree, const_tree);
181 #endif
182 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
183 static void m68k_trampoline_init (rtx, tree, rtx);
184 static poly_int64 m68k_return_pops_args (tree, tree, poly_int64);
185 static rtx m68k_delegitimize_address (rtx);
186 static void m68k_function_arg_advance (cumulative_args_t,
187 const function_arg_info &);
188 static rtx m68k_function_arg (cumulative_args_t, const function_arg_info &);
189 static bool m68k_cannot_force_const_mem (machine_mode mode, rtx x);
190 static bool m68k_output_addr_const_extra (FILE *, rtx);
191 static void m68k_init_sync_libfuncs (void) ATTRIBUTE_UNUSED;
192 static enum flt_eval_method
193 m68k_excess_precision (enum excess_precision_type);
194 static unsigned int m68k_hard_regno_nregs (unsigned int, machine_mode);
195 static bool m68k_hard_regno_mode_ok (unsigned int, machine_mode);
196 static bool m68k_modes_tieable_p (machine_mode, machine_mode);
197 static machine_mode m68k_promote_function_mode (const_tree, machine_mode,
198 int *, const_tree, int);
199 static void m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int);
200 static HARD_REG_SET m68k_zero_call_used_regs (HARD_REG_SET);
201 static machine_mode m68k_c_mode_for_floating_type (enum tree_index);
202 static bool m68k_use_lra_p (void);
204 /* Initialize the GCC target structure. */
206 #if INT_OP_GROUP == INT_OP_DOT_WORD
207 #undef TARGET_ASM_ALIGNED_HI_OP
208 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
209 #endif
211 #if INT_OP_GROUP == INT_OP_NO_DOT
212 #undef TARGET_ASM_BYTE_OP
213 #define TARGET_ASM_BYTE_OP "\tbyte\t"
214 #undef TARGET_ASM_ALIGNED_HI_OP
215 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
216 #undef TARGET_ASM_ALIGNED_SI_OP
217 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
218 #endif
220 #if INT_OP_GROUP == INT_OP_DC
221 #undef TARGET_ASM_BYTE_OP
222 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
223 #undef TARGET_ASM_ALIGNED_HI_OP
224 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
225 #undef TARGET_ASM_ALIGNED_SI_OP
226 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
227 #endif
229 #undef TARGET_ASM_UNALIGNED_HI_OP
230 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
231 #undef TARGET_ASM_UNALIGNED_SI_OP
232 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
234 #undef TARGET_ASM_OUTPUT_MI_THUNK
235 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
236 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
237 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
239 #undef TARGET_ASM_FILE_START_APP_OFF
240 #define TARGET_ASM_FILE_START_APP_OFF true
242 #undef TARGET_LEGITIMIZE_ADDRESS
243 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
245 #undef TARGET_SCHED_ADJUST_COST
246 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
248 #undef TARGET_SCHED_ISSUE_RATE
249 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
251 #undef TARGET_SCHED_VARIABLE_ISSUE
252 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
254 #undef TARGET_SCHED_INIT_GLOBAL
255 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
257 #undef TARGET_SCHED_FINISH_GLOBAL
258 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
260 #undef TARGET_SCHED_INIT
261 #define TARGET_SCHED_INIT m68k_sched_md_init
263 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
264 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
266 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
267 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
269 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
270 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
271 m68k_sched_first_cycle_multipass_dfa_lookahead
273 #undef TARGET_OPTION_OVERRIDE
274 #define TARGET_OPTION_OVERRIDE m68k_option_override
276 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
277 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE m68k_override_options_after_change
279 #undef TARGET_RTX_COSTS
280 #define TARGET_RTX_COSTS m68k_rtx_costs
282 #undef TARGET_ATTRIBUTE_TABLE
283 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
285 #undef TARGET_PROMOTE_PROTOTYPES
286 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
288 #undef TARGET_STRUCT_VALUE_RTX
289 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
291 #undef TARGET_CANNOT_FORCE_CONST_MEM
292 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_cannot_force_const_mem
294 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
295 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
297 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
298 #undef TARGET_RETURN_IN_MEMORY
299 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
300 #endif
302 #ifdef HAVE_AS_TLS
303 #undef TARGET_HAVE_TLS
304 #define TARGET_HAVE_TLS (true)
306 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
307 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
308 #endif
310 #undef TARGET_LRA_P
311 #define TARGET_LRA_P m68k_use_lra_p
313 #undef TARGET_LEGITIMATE_ADDRESS_P
314 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
316 #undef TARGET_CAN_ELIMINATE
317 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
319 #undef TARGET_CONDITIONAL_REGISTER_USAGE
320 #define TARGET_CONDITIONAL_REGISTER_USAGE m68k_conditional_register_usage
322 #undef TARGET_TRAMPOLINE_INIT
323 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
325 #undef TARGET_RETURN_POPS_ARGS
326 #define TARGET_RETURN_POPS_ARGS m68k_return_pops_args
328 #undef TARGET_DELEGITIMIZE_ADDRESS
329 #define TARGET_DELEGITIMIZE_ADDRESS m68k_delegitimize_address
331 #undef TARGET_FUNCTION_ARG
332 #define TARGET_FUNCTION_ARG m68k_function_arg
334 #undef TARGET_FUNCTION_ARG_ADVANCE
335 #define TARGET_FUNCTION_ARG_ADVANCE m68k_function_arg_advance
337 #undef TARGET_LEGITIMATE_CONSTANT_P
338 #define TARGET_LEGITIMATE_CONSTANT_P m68k_legitimate_constant_p
340 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
341 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA m68k_output_addr_const_extra
343 #undef TARGET_C_EXCESS_PRECISION
344 #define TARGET_C_EXCESS_PRECISION m68k_excess_precision
346 /* The value stored by TAS. */
347 #undef TARGET_ATOMIC_TEST_AND_SET_TRUEVAL
348 #define TARGET_ATOMIC_TEST_AND_SET_TRUEVAL 128
350 #undef TARGET_HARD_REGNO_NREGS
351 #define TARGET_HARD_REGNO_NREGS m68k_hard_regno_nregs
352 #undef TARGET_HARD_REGNO_MODE_OK
353 #define TARGET_HARD_REGNO_MODE_OK m68k_hard_regno_mode_ok
355 #undef TARGET_MODES_TIEABLE_P
356 #define TARGET_MODES_TIEABLE_P m68k_modes_tieable_p
358 #undef TARGET_PROMOTE_FUNCTION_MODE
359 #define TARGET_PROMOTE_FUNCTION_MODE m68k_promote_function_mode
361 #undef TARGET_HAVE_SPECULATION_SAFE_VALUE
362 #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
364 #undef TARGET_ASM_FINAL_POSTSCAN_INSN
365 #define TARGET_ASM_FINAL_POSTSCAN_INSN m68k_asm_final_postscan_insn
367 #undef TARGET_ZERO_CALL_USED_REGS
368 #define TARGET_ZERO_CALL_USED_REGS m68k_zero_call_used_regs
370 #undef TARGET_C_MODE_FOR_FLOATING_TYPE
371 #define TARGET_C_MODE_FOR_FLOATING_TYPE m68k_c_mode_for_floating_type
373 TARGET_GNU_ATTRIBUTES (m68k_attribute_table,
375 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
376 affects_type_identity, handler, exclude } */
377 { "interrupt", 0, 0, true, false, false, false,
378 m68k_handle_fndecl_attribute, NULL },
379 { "interrupt_handler", 0, 0, true, false, false, false,
380 m68k_handle_fndecl_attribute, NULL },
381 { "interrupt_thread", 0, 0, true, false, false, false,
382 m68k_handle_fndecl_attribute, NULL }
385 struct gcc_target targetm = TARGET_INITIALIZER;
387 /* Base flags for 68k ISAs. */
388 #define FL_FOR_isa_00 FL_ISA_68000
389 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
390 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
391 generated 68881 code for 68020 and 68030 targets unless explicitly told
392 not to. */
393 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
394 | FL_BITFIELD | FL_68881 | FL_CAS)
395 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
396 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
398 /* Base flags for ColdFire ISAs. */
399 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
400 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
401 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
402 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
403 /* ISA_C is not upwardly compatible with ISA_B. */
404 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
406 enum m68k_isa
408 /* Traditional 68000 instruction sets. */
409 isa_00,
410 isa_10,
411 isa_20,
412 isa_40,
413 isa_cpu32,
414 /* ColdFire instruction set variants. */
415 isa_a,
416 isa_aplus,
417 isa_b,
418 isa_c,
419 isa_max
422 /* Information about one of the -march, -mcpu or -mtune arguments. */
423 struct m68k_target_selection
425 /* The argument being described. */
426 const char *name;
428 /* For -mcpu, this is the device selected by the option.
429 For -mtune and -march, it is a representative device
430 for the microarchitecture or ISA respectively. */
431 enum target_device device;
433 /* The M68K_DEVICE fields associated with DEVICE. See the comment
434 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
435 const char *family;
436 enum uarch_type microarch;
437 enum m68k_isa isa;
438 unsigned long flags;
441 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
442 static const struct m68k_target_selection all_devices[] =
444 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
445 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
446 #include "m68k-devices.def"
447 #undef M68K_DEVICE
448 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
451 /* A list of all ISAs, mapping each one to a representative device.
452 Used for -march selection. */
453 static const struct m68k_target_selection all_isas[] =
455 #define M68K_ISA(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
456 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
457 #include "m68k-isas.def"
458 #undef M68K_ISA
459 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
462 /* A list of all microarchitectures, mapping each one to a representative
463 device. Used for -mtune selection. */
464 static const struct m68k_target_selection all_microarchs[] =
466 #define M68K_MICROARCH(NAME,DEVICE,MICROARCH,ISA,FLAGS) \
467 { NAME, DEVICE, NULL, u##MICROARCH, ISA, FLAGS },
468 #include "m68k-microarchs.def"
469 #undef M68K_MICROARCH
470 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
473 /* The entries associated with the -mcpu, -march and -mtune settings,
474 or null for options that have not been used. */
475 const struct m68k_target_selection *m68k_cpu_entry;
476 const struct m68k_target_selection *m68k_arch_entry;
477 const struct m68k_target_selection *m68k_tune_entry;
479 /* Which CPU we are generating code for. */
480 enum target_device m68k_cpu;
482 /* Which microarchitecture to tune for. */
483 enum uarch_type m68k_tune;
485 /* Which FPU to use. */
486 enum fpu_type m68k_fpu;
488 /* The set of FL_* flags that apply to the target processor. */
489 unsigned int m68k_cpu_flags;
491 /* The set of FL_* flags that apply to the processor to be tuned for. */
492 unsigned int m68k_tune_flags;
494 /* Asm templates for calling or jumping to an arbitrary symbolic address,
495 or NULL if such calls or jumps are not supported. The address is held
496 in operand 0. */
497 const char *m68k_symbolic_call;
498 const char *m68k_symbolic_jump;
500 /* Enum variable that corresponds to m68k_symbolic_call values. */
501 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
504 /* Implement TARGET_OPTION_OVERRIDE. */
506 static void
507 m68k_option_override (void)
509 const struct m68k_target_selection *entry;
510 unsigned long target_mask;
512 if (OPTION_SET_P (m68k_arch_option))
513 m68k_arch_entry = &all_isas[m68k_arch_option];
515 if (OPTION_SET_P (m68k_cpu_option))
516 m68k_cpu_entry = &all_devices[(int) m68k_cpu_option];
518 if (OPTION_SET_P (m68k_tune_option))
519 m68k_tune_entry = &all_microarchs[(int) m68k_tune_option];
521 /* User can choose:
523 -mcpu=
524 -march=
525 -mtune=
527 -march=ARCH should generate code that runs any processor
528 implementing architecture ARCH. -mcpu=CPU should override -march
529 and should generate code that runs on processor CPU, making free
530 use of any instructions that CPU understands. -mtune=UARCH applies
531 on top of -mcpu or -march and optimizes the code for UARCH. It does
532 not change the target architecture. */
533 if (m68k_cpu_entry)
535 /* Complain if the -march setting is for a different microarchitecture,
536 or includes flags that the -mcpu setting doesn't. */
537 if (m68k_arch_entry
538 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
539 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
540 warning (0, "%<-mcpu=%s%> conflicts with %<-march=%s%>",
541 m68k_cpu_entry->name, m68k_arch_entry->name);
543 entry = m68k_cpu_entry;
545 else
546 entry = m68k_arch_entry;
548 if (!entry)
549 entry = all_devices + TARGET_CPU_DEFAULT;
551 m68k_cpu_flags = entry->flags;
553 /* Use the architecture setting to derive default values for
554 certain flags. */
555 target_mask = 0;
557 /* ColdFire is lenient about alignment. */
558 if (!TARGET_COLDFIRE)
559 target_mask |= MASK_STRICT_ALIGNMENT;
561 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
562 target_mask |= MASK_BITFIELD;
563 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
564 target_mask |= MASK_CF_HWDIV;
565 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
566 target_mask |= MASK_HARD_FLOAT;
567 target_flags |= target_mask & ~target_flags_explicit;
569 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
570 m68k_cpu = entry->device;
571 if (m68k_tune_entry)
573 m68k_tune = m68k_tune_entry->microarch;
574 m68k_tune_flags = m68k_tune_entry->flags;
576 #ifdef M68K_DEFAULT_TUNE
577 else if (!m68k_cpu_entry && !m68k_arch_entry)
579 enum target_device dev;
580 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
581 m68k_tune_flags = all_devices[dev].flags;
583 #endif
584 else
586 m68k_tune = entry->microarch;
587 m68k_tune_flags = entry->flags;
590 /* Set the type of FPU. */
591 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
592 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
593 : FPUTYPE_68881);
595 /* Sanity check to ensure that msep-data and mid-sahred-library are not
596 * both specified together. Doing so simply doesn't make sense.
598 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
599 error ("cannot specify both %<-msep-data%> and %<-mid-shared-library%>");
601 /* If we're generating code for a separate A5 relative data segment,
602 * we've got to enable -fPIC as well. This might be relaxable to
603 * -fpic but it hasn't been tested properly.
605 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
606 flag_pic = 2;
608 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
609 error if the target does not support them. */
610 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
611 error ("%<-mpcrel%> %<-fPIC%> is not currently supported on selected cpu");
613 /* ??? A historic way of turning on pic, or is this intended to
614 be an embedded thing that doesn't have the same name binding
615 significance that it does on hosted ELF systems? */
616 if (TARGET_PCREL && flag_pic == 0)
617 flag_pic = 1;
619 if (!flag_pic)
621 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
623 m68k_symbolic_jump = "jra %a0";
625 else if (TARGET_ID_SHARED_LIBRARY)
626 /* All addresses must be loaded from the GOT. */
628 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
630 if (TARGET_PCREL)
631 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
632 else
633 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
635 if (TARGET_ISAC)
636 /* No unconditional long branch */;
637 else if (TARGET_PCREL)
638 m68k_symbolic_jump = "bra%.l %c0";
639 else
640 m68k_symbolic_jump = "bra%.l %p0";
641 /* Turn off function cse if we are doing PIC. We always want
642 function call to be done as `bsr foo@PLTPC'. */
643 /* ??? It's traditional to do this for -mpcrel too, but it isn't
644 clear how intentional that is. */
645 flag_no_function_cse = 1;
648 switch (m68k_symbolic_call_var)
650 case M68K_SYMBOLIC_CALL_JSR:
651 m68k_symbolic_call = "jsr %a0";
652 break;
654 case M68K_SYMBOLIC_CALL_BSR_C:
655 m68k_symbolic_call = "bsr%.l %c0";
656 break;
658 case M68K_SYMBOLIC_CALL_BSR_P:
659 m68k_symbolic_call = "bsr%.l %p0";
660 break;
662 case M68K_SYMBOLIC_CALL_NONE:
663 gcc_assert (m68k_symbolic_call == NULL);
664 break;
666 default:
667 gcc_unreachable ();
670 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
671 parse_alignment_opts ();
672 int label_alignment = align_labels.levels[0].get_value ();
673 if (label_alignment > 2)
675 warning (0, "%<-falign-labels=%d%> is not supported", label_alignment);
676 str_align_labels = "1";
679 int loop_alignment = align_loops.levels[0].get_value ();
680 if (loop_alignment > 2)
682 warning (0, "%<-falign-loops=%d%> is not supported", loop_alignment);
683 str_align_loops = "1";
685 #endif
687 if ((opt_fstack_limit_symbol_arg != NULL || opt_fstack_limit_register_no >= 0)
688 && !TARGET_68020)
690 warning (0, "%<-fstack-limit-%> options are not supported on this cpu");
691 opt_fstack_limit_symbol_arg = NULL;
692 opt_fstack_limit_register_no = -1;
695 SUBTARGET_OVERRIDE_OPTIONS;
697 /* Setup scheduling options. */
698 if (TUNE_CFV1)
699 m68k_sched_cpu = CPU_CFV1;
700 else if (TUNE_CFV2)
701 m68k_sched_cpu = CPU_CFV2;
702 else if (TUNE_CFV3)
703 m68k_sched_cpu = CPU_CFV3;
704 else if (TUNE_CFV4)
705 m68k_sched_cpu = CPU_CFV4;
706 else
708 m68k_sched_cpu = CPU_UNKNOWN;
709 flag_schedule_insns = 0;
710 flag_schedule_insns_after_reload = 0;
711 flag_modulo_sched = 0;
712 flag_live_range_shrinkage = 0;
715 if (m68k_sched_cpu != CPU_UNKNOWN)
717 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
718 m68k_sched_mac = MAC_CF_EMAC;
719 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
720 m68k_sched_mac = MAC_CF_MAC;
721 else
722 m68k_sched_mac = MAC_NO;
726 /* Implement TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE. */
728 static void
729 m68k_override_options_after_change (void)
731 if (m68k_sched_cpu == CPU_UNKNOWN)
733 flag_schedule_insns = 0;
734 flag_schedule_insns_after_reload = 0;
735 flag_modulo_sched = 0;
736 flag_live_range_shrinkage = 0;
740 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
741 given argument and NAME is the argument passed to -mcpu. Return NULL
742 if -mcpu was not passed. */
744 const char *
745 m68k_cpp_cpu_ident (const char *prefix)
747 if (!m68k_cpu_entry)
748 return NULL;
749 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
752 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
753 given argument and NAME is the name of the representative device for
754 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
756 const char *
757 m68k_cpp_cpu_family (const char *prefix)
759 if (!m68k_cpu_entry)
760 return NULL;
761 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
764 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
765 "interrupt_handler" attribute and interrupt_thread if FUNC has an
766 "interrupt_thread" attribute. Otherwise, return
767 m68k_fk_normal_function. */
769 enum m68k_function_kind
770 m68k_get_function_kind (tree func)
772 tree a;
774 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
776 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
777 if (a != NULL_TREE)
778 return m68k_fk_interrupt_handler;
780 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
781 if (a != NULL_TREE)
782 return m68k_fk_interrupt_handler;
784 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
785 if (a != NULL_TREE)
786 return m68k_fk_interrupt_thread;
788 return m68k_fk_normal_function;
791 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
792 struct attribute_spec.handler. */
793 static tree
794 m68k_handle_fndecl_attribute (tree *node, tree name,
795 tree args ATTRIBUTE_UNUSED,
796 int flags ATTRIBUTE_UNUSED,
797 bool *no_add_attrs)
799 if (TREE_CODE (*node) != FUNCTION_DECL)
801 warning (OPT_Wattributes, "%qE attribute only applies to functions",
802 name);
803 *no_add_attrs = true;
806 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
808 error ("multiple interrupt attributes not allowed");
809 *no_add_attrs = true;
812 if (!TARGET_FIDOA
813 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
815 error ("%<interrupt_thread%> is available only on fido");
816 *no_add_attrs = true;
819 return NULL_TREE;
822 static void
823 m68k_compute_frame_layout (void)
825 int regno, saved;
826 unsigned int mask;
827 enum m68k_function_kind func_kind =
828 m68k_get_function_kind (current_function_decl);
829 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
830 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
832 /* Only compute the frame once per function.
833 Don't cache information until reload has been completed. */
834 if (current_frame.funcdef_no == current_function_funcdef_no
835 && reload_completed)
836 return;
838 current_frame.size = (get_frame_size () + 3) & -4;
840 mask = saved = 0;
842 /* Interrupt thread does not need to save any register. */
843 if (!interrupt_thread)
844 for (regno = 0; regno < 16; regno++)
845 if (m68k_save_reg (regno, interrupt_handler))
847 mask |= 1 << (regno - D0_REG);
848 saved++;
850 current_frame.offset = saved * 4;
851 current_frame.reg_no = saved;
852 current_frame.reg_mask = mask;
854 current_frame.foffset = 0;
855 mask = saved = 0;
856 if (TARGET_HARD_FLOAT)
858 /* Interrupt thread does not need to save any register. */
859 if (!interrupt_thread)
860 for (regno = 16; regno < 24; regno++)
861 if (m68k_save_reg (regno, interrupt_handler))
863 mask |= 1 << (regno - FP0_REG);
864 saved++;
866 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
867 current_frame.offset += current_frame.foffset;
869 current_frame.fpu_no = saved;
870 current_frame.fpu_mask = mask;
872 /* Remember what function this frame refers to. */
873 current_frame.funcdef_no = current_function_funcdef_no;
876 /* Worker function for TARGET_CAN_ELIMINATE. */
878 bool
879 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
881 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
884 HOST_WIDE_INT
885 m68k_initial_elimination_offset (int from, int to)
887 int argptr_offset;
888 /* The arg pointer points 8 bytes before the start of the arguments,
889 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
890 frame pointer in most frames. */
891 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
892 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
893 return argptr_offset;
895 m68k_compute_frame_layout ();
897 gcc_assert (to == STACK_POINTER_REGNUM);
898 switch (from)
900 case ARG_POINTER_REGNUM:
901 return current_frame.offset + current_frame.size - argptr_offset;
902 case FRAME_POINTER_REGNUM:
903 return current_frame.offset + current_frame.size;
904 default:
905 gcc_unreachable ();
909 /* Refer to the array `regs_ever_live' to determine which registers
910 to save; `regs_ever_live[I]' is nonzero if register number I
911 is ever used in the function. This function is responsible for
912 knowing which registers should not be saved even if used.
913 Return true if we need to save REGNO. */
915 static bool
916 m68k_save_reg (unsigned int regno, bool interrupt_handler)
918 if (flag_pic && regno == PIC_REG)
920 if (crtl->saves_all_registers)
921 return true;
922 if (crtl->uses_pic_offset_table)
923 return true;
924 /* Reload may introduce constant pool references into a function
925 that thitherto didn't need a PIC register. Note that the test
926 above will not catch that case because we will only set
927 crtl->uses_pic_offset_table when emitting
928 the address reloads. */
929 if (crtl->uses_const_pool)
930 return true;
933 if (crtl->calls_eh_return)
935 unsigned int i;
936 for (i = 0; ; i++)
938 unsigned int test = EH_RETURN_DATA_REGNO (i);
939 if (test == INVALID_REGNUM)
940 break;
941 if (test == regno)
942 return true;
946 /* Fixed regs we never touch. */
947 if (fixed_regs[regno])
948 return false;
950 /* The frame pointer (if it is such) is handled specially. */
951 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
952 return false;
954 /* Interrupt handlers must also save call_used_regs
955 if they are live or when calling nested functions. */
956 if (interrupt_handler)
958 if (df_regs_ever_live_p (regno))
959 return true;
961 if (!crtl->is_leaf && call_used_or_fixed_reg_p (regno))
962 return true;
965 /* Never need to save registers that aren't touched. */
966 if (!df_regs_ever_live_p (regno))
967 return false;
969 /* Otherwise save everything that isn't call-clobbered. */
970 return !call_used_or_fixed_reg_p (regno);
973 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
974 the lowest memory address. COUNT is the number of registers to be
975 moved, with register REGNO + I being moved if bit I of MASK is set.
976 STORE_P specifies the direction of the move and ADJUST_STACK_P says
977 whether or not this is pre-decrement (if STORE_P) or post-increment
978 (if !STORE_P) operation. */
980 static rtx_insn *
981 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
982 unsigned int count, unsigned int regno,
983 unsigned int mask, bool store_p, bool adjust_stack_p)
985 int i;
986 rtx body, addr, src, operands[2];
987 machine_mode mode;
989 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
990 mode = reg_raw_mode[regno];
991 i = 0;
993 if (adjust_stack_p)
995 src = plus_constant (Pmode, base,
996 (count
997 * GET_MODE_SIZE (mode)
998 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
999 XVECEXP (body, 0, i++) = gen_rtx_SET (base, src);
1002 for (; mask != 0; mask >>= 1, regno++)
1003 if (mask & 1)
1005 addr = plus_constant (Pmode, base, offset);
1006 operands[!store_p] = gen_frame_mem (mode, addr);
1007 operands[store_p] = gen_rtx_REG (mode, regno);
1008 XVECEXP (body, 0, i++)
1009 = gen_rtx_SET (operands[0], operands[1]);
1010 offset += GET_MODE_SIZE (mode);
1012 gcc_assert (i == XVECLEN (body, 0));
1014 return emit_insn (body);
1017 /* Make INSN a frame-related instruction. */
1019 static void
1020 m68k_set_frame_related (rtx_insn *insn)
1022 rtx body;
1023 int i;
1025 RTX_FRAME_RELATED_P (insn) = 1;
1026 body = PATTERN (insn);
1027 if (GET_CODE (body) == PARALLEL)
1028 for (i = 0; i < XVECLEN (body, 0); i++)
1029 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1032 /* Emit RTL for the "prologue" define_expand. */
1034 void
1035 m68k_expand_prologue (void)
1037 HOST_WIDE_INT fsize_with_regs;
1038 rtx limit, src, dest;
1040 m68k_compute_frame_layout ();
1042 if (flag_stack_usage_info)
1043 current_function_static_stack_size
1044 = current_frame.size + current_frame.offset;
1046 /* If the stack limit is a symbol, we can check it here,
1047 before actually allocating the space. */
1048 if (crtl->limit_stack
1049 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1051 limit = plus_constant (Pmode, stack_limit_rtx, current_frame.size + 4);
1052 if (!m68k_legitimate_constant_p (Pmode, limit))
1054 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1055 limit = gen_rtx_REG (Pmode, D0_REG);
1057 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1058 stack_pointer_rtx, limit),
1059 stack_pointer_rtx, limit,
1060 const1_rtx));
1063 fsize_with_regs = current_frame.size;
1064 if (TARGET_COLDFIRE)
1066 /* ColdFire's move multiple instructions do not allow pre-decrement
1067 addressing. Add the size of movem saves to the initial stack
1068 allocation instead. */
1069 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1070 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1071 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1072 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1075 if (frame_pointer_needed)
1077 if (fsize_with_regs == 0 && TUNE_68040)
1079 /* On the 68040, two separate moves are faster than link.w 0. */
1080 dest = gen_frame_mem (Pmode,
1081 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1082 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1083 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1084 stack_pointer_rtx));
1086 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1087 m68k_set_frame_related
1088 (emit_insn (gen_link (frame_pointer_rtx,
1089 GEN_INT (-4 - fsize_with_regs))));
1090 else
1092 m68k_set_frame_related
1093 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1094 m68k_set_frame_related
1095 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1096 stack_pointer_rtx,
1097 GEN_INT (-fsize_with_regs))));
1100 /* If the frame pointer is needed, emit a special barrier that
1101 will prevent the scheduler from moving stores to the frame
1102 before the stack adjustment. */
1103 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1105 else if (fsize_with_regs != 0)
1106 m68k_set_frame_related
1107 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1108 stack_pointer_rtx,
1109 GEN_INT (-fsize_with_regs))));
1111 if (current_frame.fpu_mask)
1113 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1114 if (TARGET_68881)
1115 m68k_set_frame_related
1116 (m68k_emit_movem (stack_pointer_rtx,
1117 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1118 current_frame.fpu_no, FP0_REG,
1119 current_frame.fpu_mask, true, true));
1120 else
1122 int offset;
1124 /* If we're using moveml to save the integer registers,
1125 the stack pointer will point to the bottom of the moveml
1126 save area. Find the stack offset of the first FP register. */
1127 if (current_frame.reg_no < MIN_MOVEM_REGS)
1128 offset = 0;
1129 else
1130 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1131 m68k_set_frame_related
1132 (m68k_emit_movem (stack_pointer_rtx, offset,
1133 current_frame.fpu_no, FP0_REG,
1134 current_frame.fpu_mask, true, false));
1138 /* If the stack limit is not a symbol, check it here.
1139 This has the disadvantage that it may be too late... */
1140 if (crtl->limit_stack)
1142 if (REG_P (stack_limit_rtx))
1143 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1144 stack_limit_rtx),
1145 stack_pointer_rtx, stack_limit_rtx,
1146 const1_rtx));
1148 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1149 warning (0, "stack limit expression is not supported");
1152 if (current_frame.reg_no < MIN_MOVEM_REGS)
1154 /* Store each register separately in the same order moveml does. */
1155 int i;
1157 for (i = 16; i-- > 0; )
1158 if (current_frame.reg_mask & (1 << i))
1160 src = gen_rtx_REG (SImode, D0_REG + i);
1161 dest = gen_frame_mem (SImode,
1162 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1163 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1166 else
1168 if (TARGET_COLDFIRE)
1169 /* The required register save space has already been allocated.
1170 The first register should be stored at (%sp). */
1171 m68k_set_frame_related
1172 (m68k_emit_movem (stack_pointer_rtx, 0,
1173 current_frame.reg_no, D0_REG,
1174 current_frame.reg_mask, true, false));
1175 else
1176 m68k_set_frame_related
1177 (m68k_emit_movem (stack_pointer_rtx,
1178 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1179 current_frame.reg_no, D0_REG,
1180 current_frame.reg_mask, true, true));
1183 if (!TARGET_SEP_DATA
1184 && crtl->uses_pic_offset_table)
1185 emit_insn (gen_load_got (pic_offset_table_rtx));
1188 /* Return true if a simple (return) instruction is sufficient for this
1189 instruction (i.e. if no epilogue is needed). */
1191 bool
1192 m68k_use_return_insn (void)
1194 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1195 return false;
1197 m68k_compute_frame_layout ();
1198 return current_frame.offset == 0;
1201 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1202 SIBCALL_P says which.
1204 The function epilogue should not depend on the current stack pointer!
1205 It should use the frame pointer only, if there is a frame pointer.
1206 This is mandatory because of alloca; we also take advantage of it to
1207 omit stack adjustments before returning. */
1209 void
1210 m68k_expand_epilogue (bool sibcall_p)
1212 HOST_WIDE_INT fsize, fsize_with_regs;
1213 bool big, restore_from_sp;
1215 m68k_compute_frame_layout ();
1217 fsize = current_frame.size;
1218 big = false;
1219 restore_from_sp = false;
1221 /* FIXME : crtl->is_leaf below is too strong.
1222 What we really need to know there is if there could be pending
1223 stack adjustment needed at that point. */
1224 restore_from_sp = (!frame_pointer_needed
1225 || (!cfun->calls_alloca && crtl->is_leaf));
1227 /* fsize_with_regs is the size we need to adjust the sp when
1228 popping the frame. */
1229 fsize_with_regs = fsize;
1230 if (TARGET_COLDFIRE && restore_from_sp)
1232 /* ColdFire's move multiple instructions do not allow post-increment
1233 addressing. Add the size of movem loads to the final deallocation
1234 instead. */
1235 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1236 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1237 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1238 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1241 if (current_frame.offset + fsize >= 0x8000
1242 && !restore_from_sp
1243 && (current_frame.reg_mask || current_frame.fpu_mask))
1245 if (TARGET_COLDFIRE
1246 && (current_frame.reg_no >= MIN_MOVEM_REGS
1247 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1249 /* ColdFire's move multiple instructions do not support the
1250 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1251 stack-based restore. */
1252 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1253 GEN_INT (-(current_frame.offset + fsize)));
1254 emit_insn (gen_blockage ());
1255 emit_insn (gen_addsi3 (stack_pointer_rtx,
1256 gen_rtx_REG (Pmode, A1_REG),
1257 frame_pointer_rtx));
1258 restore_from_sp = true;
1260 else
1262 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1263 fsize = 0;
1264 big = true;
1268 if (current_frame.reg_no < MIN_MOVEM_REGS)
1270 /* Restore each register separately in the same order moveml does. */
1271 int i;
1272 HOST_WIDE_INT offset;
1274 offset = current_frame.offset + fsize;
1275 for (i = 0; i < 16; i++)
1276 if (current_frame.reg_mask & (1 << i))
1278 rtx addr;
1280 if (big)
1282 /* Generate the address -OFFSET(%fp,%a1.l). */
1283 addr = gen_rtx_REG (Pmode, A1_REG);
1284 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1285 addr = plus_constant (Pmode, addr, -offset);
1287 else if (restore_from_sp)
1288 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1289 else
1290 addr = plus_constant (Pmode, frame_pointer_rtx, -offset);
1291 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1292 gen_frame_mem (SImode, addr));
1293 offset -= GET_MODE_SIZE (SImode);
1296 else if (current_frame.reg_mask)
1298 if (big)
1299 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1300 gen_rtx_REG (Pmode, A1_REG),
1301 frame_pointer_rtx),
1302 -(current_frame.offset + fsize),
1303 current_frame.reg_no, D0_REG,
1304 current_frame.reg_mask, false, false);
1305 else if (restore_from_sp)
1306 m68k_emit_movem (stack_pointer_rtx, 0,
1307 current_frame.reg_no, D0_REG,
1308 current_frame.reg_mask, false,
1309 !TARGET_COLDFIRE);
1310 else
1311 m68k_emit_movem (frame_pointer_rtx,
1312 -(current_frame.offset + fsize),
1313 current_frame.reg_no, D0_REG,
1314 current_frame.reg_mask, false, false);
1317 if (current_frame.fpu_no > 0)
1319 if (big)
1320 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1321 gen_rtx_REG (Pmode, A1_REG),
1322 frame_pointer_rtx),
1323 -(current_frame.foffset + fsize),
1324 current_frame.fpu_no, FP0_REG,
1325 current_frame.fpu_mask, false, false);
1326 else if (restore_from_sp)
1328 if (TARGET_COLDFIRE)
1330 int offset;
1332 /* If we used moveml to restore the integer registers, the
1333 stack pointer will still point to the bottom of the moveml
1334 save area. Find the stack offset of the first FP
1335 register. */
1336 if (current_frame.reg_no < MIN_MOVEM_REGS)
1337 offset = 0;
1338 else
1339 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1340 m68k_emit_movem (stack_pointer_rtx, offset,
1341 current_frame.fpu_no, FP0_REG,
1342 current_frame.fpu_mask, false, false);
1344 else
1345 m68k_emit_movem (stack_pointer_rtx, 0,
1346 current_frame.fpu_no, FP0_REG,
1347 current_frame.fpu_mask, false, true);
1349 else
1350 m68k_emit_movem (frame_pointer_rtx,
1351 -(current_frame.foffset + fsize),
1352 current_frame.fpu_no, FP0_REG,
1353 current_frame.fpu_mask, false, false);
1356 emit_insn (gen_blockage ());
1357 if (frame_pointer_needed)
1358 emit_insn (gen_unlink (frame_pointer_rtx));
1359 else if (fsize_with_regs)
1360 emit_insn (gen_addsi3 (stack_pointer_rtx,
1361 stack_pointer_rtx,
1362 GEN_INT (fsize_with_regs)));
1364 if (crtl->calls_eh_return)
1365 emit_insn (gen_addsi3 (stack_pointer_rtx,
1366 stack_pointer_rtx,
1367 EH_RETURN_STACKADJ_RTX));
1369 if (!sibcall_p)
1370 emit_jump_insn (ret_rtx);
1373 /* Return true if PARALLEL contains register REGNO. */
1374 static bool
1375 m68k_reg_present_p (const_rtx parallel, unsigned int regno)
1377 int i;
1379 if (REG_P (parallel) && REGNO (parallel) == regno)
1380 return true;
1382 if (GET_CODE (parallel) != PARALLEL)
1383 return false;
1385 for (i = 0; i < XVECLEN (parallel, 0); ++i)
1387 const_rtx x;
1389 x = XEXP (XVECEXP (parallel, 0, i), 0);
1390 if (REG_P (x) && REGNO (x) == regno)
1391 return true;
1394 return false;
1397 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1399 static bool
1400 m68k_ok_for_sibcall_p (tree decl, tree exp)
1402 enum m68k_function_kind kind;
1404 /* We cannot use sibcalls for nested functions because we use the
1405 static chain register for indirect calls. */
1406 if (CALL_EXPR_STATIC_CHAIN (exp))
1407 return false;
1409 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
1411 /* Check that the return value locations are the same. For
1412 example that we aren't returning a value from the sibling in
1413 a D0 register but then need to transfer it to a A0 register. */
1414 rtx cfun_value;
1415 rtx call_value;
1417 cfun_value = FUNCTION_VALUE (TREE_TYPE (DECL_RESULT (cfun->decl)),
1418 cfun->decl);
1419 call_value = FUNCTION_VALUE (TREE_TYPE (exp), decl);
1421 /* Check that the values are equal or that the result the callee
1422 function returns is superset of what the current function returns. */
1423 if (!(rtx_equal_p (cfun_value, call_value)
1424 || (REG_P (cfun_value)
1425 && m68k_reg_present_p (call_value, REGNO (cfun_value)))))
1426 return false;
1429 kind = m68k_get_function_kind (current_function_decl);
1430 if (kind == m68k_fk_normal_function)
1431 /* We can always sibcall from a normal function, because it's
1432 undefined if it is calling an interrupt function. */
1433 return true;
1435 /* Otherwise we can only sibcall if the function kind is known to be
1436 the same. */
1437 if (decl && m68k_get_function_kind (decl) == kind)
1438 return true;
1440 return false;
1443 /* On the m68k all args are always pushed. */
1445 static rtx
1446 m68k_function_arg (cumulative_args_t, const function_arg_info &)
1448 return NULL_RTX;
1451 static void
1452 m68k_function_arg_advance (cumulative_args_t cum_v,
1453 const function_arg_info &arg)
1455 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1457 *cum += (arg.promoted_size_in_bytes () + 3) & ~3;
1460 /* Convert X to a legitimate function call memory reference and return the
1461 result. */
1464 m68k_legitimize_call_address (rtx x)
1466 gcc_assert (MEM_P (x));
1467 if (call_operand (XEXP (x, 0), VOIDmode))
1468 return x;
1469 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1472 /* Likewise for sibling calls. */
1475 m68k_legitimize_sibcall_address (rtx x)
1477 gcc_assert (MEM_P (x));
1478 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1479 return x;
1481 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1482 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1485 /* Convert X to a legitimate address and return it if successful. Otherwise
1486 return X.
1488 For the 68000, we handle X+REG by loading X into a register R and
1489 using R+REG. R will go in an address reg and indexing will be used.
1490 However, if REG is a broken-out memory address or multiplication,
1491 nothing needs to be done because REG can certainly go in an address reg. */
1493 static rtx
1494 m68k_legitimize_address (rtx x, rtx oldx, machine_mode mode)
1496 if (m68k_tls_symbol_p (x))
1497 return m68k_legitimize_tls_address (x);
1499 if (GET_CODE (x) == PLUS)
1501 int ch = (x) != (oldx);
1502 int copied = 0;
1504 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1506 if (GET_CODE (XEXP (x, 0)) == MULT
1507 || GET_CODE (XEXP (x, 0)) == ASHIFT)
1509 COPY_ONCE (x);
1510 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1512 if (GET_CODE (XEXP (x, 1)) == MULT
1513 || GET_CODE (XEXP (x, 1)) == ASHIFT)
1515 COPY_ONCE (x);
1516 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1518 if (ch)
1520 if (GET_CODE (XEXP (x, 1)) == REG
1521 && GET_CODE (XEXP (x, 0)) == REG)
1523 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1525 COPY_ONCE (x);
1526 x = force_operand (x, 0);
1528 return x;
1530 if (memory_address_p (mode, x))
1531 return x;
1533 if (GET_CODE (XEXP (x, 0)) == REG
1534 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1535 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1536 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1538 rtx temp = gen_reg_rtx (Pmode);
1539 rtx val = force_operand (XEXP (x, 1), 0);
1540 emit_move_insn (temp, val);
1541 COPY_ONCE (x);
1542 XEXP (x, 1) = temp;
1543 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1544 && GET_CODE (XEXP (x, 0)) == REG)
1545 x = force_operand (x, 0);
1547 else if (GET_CODE (XEXP (x, 1)) == REG
1548 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1549 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1550 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1552 rtx temp = gen_reg_rtx (Pmode);
1553 rtx val = force_operand (XEXP (x, 0), 0);
1554 emit_move_insn (temp, val);
1555 COPY_ONCE (x);
1556 XEXP (x, 0) = temp;
1557 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1558 && GET_CODE (XEXP (x, 1)) == REG)
1559 x = force_operand (x, 0);
1563 return x;
1566 /* For eliding comparisons, we remember how the flags were set.
1567 FLAGS_COMPARE_OP0 and FLAGS_COMPARE_OP1 are remembered for a direct
1568 comparison, they take priority. FLAGS_OPERAND1 and FLAGS_OPERAND2
1569 are used in more cases, they are a fallback for comparisons against
1570 zero after a move or arithmetic insn.
1571 FLAGS_VALID is set to FLAGS_VALID_NO if we should not use any of
1572 these values. */
1574 static rtx flags_compare_op0, flags_compare_op1;
1575 static rtx flags_operand1, flags_operand2;
1576 static attr_flags_valid flags_valid = FLAGS_VALID_NO;
1578 /* Return a code other than UNKNOWN if we can elide a CODE comparison of
1579 OP0 with OP1. */
1581 rtx_code
1582 m68k_find_flags_value (rtx op0, rtx op1, rtx_code code)
1584 if (flags_compare_op0 != NULL_RTX)
1586 if (rtx_equal_p (op0, flags_compare_op0)
1587 && rtx_equal_p (op1, flags_compare_op1))
1588 return code;
1589 if (rtx_equal_p (op0, flags_compare_op1)
1590 && rtx_equal_p (op1, flags_compare_op0))
1591 return swap_condition (code);
1592 return UNKNOWN;
1595 machine_mode mode = GET_MODE (op0);
1596 if (op1 != CONST0_RTX (mode))
1597 return UNKNOWN;
1598 /* Comparisons against 0 with these two should have been optimized out. */
1599 gcc_assert (code != LTU && code != GEU);
1600 if (flags_valid == FLAGS_VALID_NOOV && (code == GT || code == LE))
1601 return UNKNOWN;
1602 if (rtx_equal_p (flags_operand1, op0) || rtx_equal_p (flags_operand2, op0))
1603 return (FLOAT_MODE_P (mode) ? code
1604 : code == GE ? PLUS : code == LT ? MINUS : code);
1605 /* See if we are testing whether the high part of a DImode value is
1606 positive or negative and we have the full value as a remembered
1607 operand. */
1608 if (code != GE && code != LT)
1609 return UNKNOWN;
1610 if (mode == SImode
1611 && flags_operand1 != NULL_RTX && GET_MODE (flags_operand1) == DImode
1612 && REG_P (flags_operand1) && REG_P (op0)
1613 && hard_regno_nregs (REGNO (flags_operand1), DImode) == 2
1614 && REGNO (flags_operand1) == REGNO (op0))
1615 return code == GE ? PLUS : MINUS;
1616 if (mode == SImode
1617 && flags_operand2 != NULL_RTX && GET_MODE (flags_operand2) == DImode
1618 && REG_P (flags_operand2) && REG_P (op0)
1619 && hard_regno_nregs (REGNO (flags_operand2), DImode) == 2
1620 && REGNO (flags_operand2) == REGNO (op0))
1621 return code == GE ? PLUS : MINUS;
1622 return UNKNOWN;
1625 /* Called through CC_STATUS_INIT, which is invoked by final whenever a
1626 label is encountered. */
1628 void
1629 m68k_init_cc ()
1631 flags_compare_op0 = flags_compare_op1 = NULL_RTX;
1632 flags_operand1 = flags_operand2 = NULL_RTX;
1633 flags_valid = FLAGS_VALID_NO;
1636 /* Update flags for a move operation with OPERANDS. Called for move
1637 operations where attr_flags_valid returns "set". */
1639 static void
1640 handle_flags_for_move (rtx *operands)
1642 flags_compare_op0 = flags_compare_op1 = NULL_RTX;
1643 if (!ADDRESS_REG_P (operands[0]))
1645 flags_valid = FLAGS_VALID_MOVE;
1646 flags_operand1 = side_effects_p (operands[0]) ? NULL_RTX : operands[0];
1647 if (side_effects_p (operands[1])
1648 /* ??? For mem->mem moves, this can discard the source as a
1649 valid compare operand. If you assume aligned moves, this
1650 is unnecessary, but in theory, we could have an unaligned
1651 move overwriting parts of its source. */
1652 || modified_in_p (operands[1], current_output_insn))
1653 flags_operand2 = NULL_RTX;
1654 else
1655 flags_operand2 = operands[1];
1656 return;
1658 if (flags_operand1 != NULL_RTX
1659 && modified_in_p (flags_operand1, current_output_insn))
1660 flags_operand1 = NULL_RTX;
1661 if (flags_operand2 != NULL_RTX
1662 && modified_in_p (flags_operand2, current_output_insn))
1663 flags_operand2 = NULL_RTX;
1666 /* Process INSN to remember flag operands if possible. */
1668 static void
1669 m68k_asm_final_postscan_insn (FILE *, rtx_insn *insn, rtx [], int)
1671 enum attr_flags_valid v = get_attr_flags_valid (insn);
1672 if (v == FLAGS_VALID_SET)
1673 return;
1674 /* Comparisons use FLAGS_VALID_SET, so we can be sure we need to clear these
1675 now. */
1676 flags_compare_op0 = flags_compare_op1 = NULL_RTX;
1678 if (v == FLAGS_VALID_NO)
1680 flags_operand1 = flags_operand2 = NULL_RTX;
1681 return;
1683 else if (v == FLAGS_VALID_UNCHANGED)
1685 if (flags_operand1 != NULL_RTX && modified_in_p (flags_operand1, insn))
1686 flags_operand1 = NULL_RTX;
1687 if (flags_operand2 != NULL_RTX && modified_in_p (flags_operand2, insn))
1688 flags_operand2 = NULL_RTX;
1689 return;
1692 flags_valid = v;
1693 rtx set = single_set (insn);
1694 rtx dest = SET_DEST (set);
1695 rtx src = SET_SRC (set);
1696 if (side_effects_p (dest))
1697 dest = NULL_RTX;
1699 switch (v)
1701 case FLAGS_VALID_YES:
1702 case FLAGS_VALID_NOOV:
1703 flags_operand1 = dest;
1704 flags_operand2 = NULL_RTX;
1705 break;
1706 case FLAGS_VALID_MOVE:
1707 /* fmoves to memory or data registers do not set the condition
1708 codes. Normal moves _do_ set the condition codes, but not in
1709 a way that is appropriate for comparison with 0, because -0.0
1710 would be treated as a negative nonzero number. Note that it
1711 isn't appropriate to conditionalize this restriction on
1712 HONOR_SIGNED_ZEROS because that macro merely indicates whether
1713 we care about the difference between -0.0 and +0.0. */
1714 if (dest != NULL_RTX
1715 && !FP_REG_P (dest)
1716 && (FP_REG_P (src)
1717 || GET_CODE (src) == FIX
1718 || FLOAT_MODE_P (GET_MODE (dest))))
1719 flags_operand1 = flags_operand2 = NULL_RTX;
1720 else
1722 flags_operand1 = dest;
1723 if (GET_MODE (src) != VOIDmode && !side_effects_p (src)
1724 && !modified_in_p (src, insn))
1725 flags_operand2 = src;
1726 else
1727 flags_operand2 = NULL_RTX;
1729 break;
1730 default:
1731 gcc_unreachable ();
1733 return;
1736 /* Output a dbCC; jCC sequence. Note we do not handle the
1737 floating point version of this sequence (Fdbcc).
1738 OPERANDS are as in the two peepholes. CODE is the code
1739 returned by m68k_output_branch_<mode>. */
1741 void
1742 output_dbcc_and_branch (rtx *operands, rtx_code code)
1744 switch (code)
1746 case EQ:
1747 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1748 break;
1750 case NE:
1751 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1752 break;
1754 case GT:
1755 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1756 break;
1758 case GTU:
1759 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1760 break;
1762 case LT:
1763 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1764 break;
1766 case LTU:
1767 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1768 break;
1770 case GE:
1771 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1772 break;
1774 case GEU:
1775 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1776 break;
1778 case LE:
1779 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1780 break;
1782 case LEU:
1783 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1784 break;
1786 case PLUS:
1787 output_asm_insn ("dbpl %0,%l1\n\tjle %l2", operands);
1788 break;
1790 case MINUS:
1791 output_asm_insn ("dbmi %0,%l1\n\tjle %l2", operands);
1792 break;
1794 default:
1795 gcc_unreachable ();
1798 /* If the decrement is to be done in SImode, then we have
1799 to compensate for the fact that dbcc decrements in HImode. */
1800 switch (GET_MODE (operands[0]))
1802 case E_SImode:
1803 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1804 break;
1806 case E_HImode:
1807 break;
1809 default:
1810 gcc_unreachable ();
1814 const char *
1815 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1817 rtx loperands[7];
1818 enum rtx_code op_code = GET_CODE (op);
1820 /* This does not produce a useful cc. */
1821 CC_STATUS_INIT;
1823 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1824 below. Swap the operands and change the op if these requirements
1825 are not fulfilled. */
1826 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1828 rtx tmp = operand1;
1830 operand1 = operand2;
1831 operand2 = tmp;
1832 op_code = swap_condition (op_code);
1834 loperands[0] = operand1;
1835 if (GET_CODE (operand1) == REG)
1836 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1837 else
1838 loperands[1] = adjust_address (operand1, SImode, 4);
1839 if (operand2 != const0_rtx)
1841 loperands[2] = operand2;
1842 if (GET_CODE (operand2) == REG)
1843 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1844 else
1845 loperands[3] = adjust_address (operand2, SImode, 4);
1847 loperands[4] = gen_label_rtx ();
1848 if (operand2 != const0_rtx)
1849 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1850 else
1852 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1853 output_asm_insn ("tst%.l %0", loperands);
1854 else
1855 output_asm_insn ("cmp%.w #0,%0", loperands);
1857 output_asm_insn ("jne %l4", loperands);
1859 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1860 output_asm_insn ("tst%.l %1", loperands);
1861 else
1862 output_asm_insn ("cmp%.w #0,%1", loperands);
1865 loperands[5] = dest;
1867 switch (op_code)
1869 case EQ:
1870 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1871 CODE_LABEL_NUMBER (loperands[4]));
1872 output_asm_insn ("seq %5", loperands);
1873 break;
1875 case NE:
1876 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1877 CODE_LABEL_NUMBER (loperands[4]));
1878 output_asm_insn ("sne %5", loperands);
1879 break;
1881 case GT:
1882 loperands[6] = gen_label_rtx ();
1883 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1884 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1885 CODE_LABEL_NUMBER (loperands[4]));
1886 output_asm_insn ("sgt %5", loperands);
1887 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1888 CODE_LABEL_NUMBER (loperands[6]));
1889 break;
1891 case GTU:
1892 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1893 CODE_LABEL_NUMBER (loperands[4]));
1894 output_asm_insn ("shi %5", loperands);
1895 break;
1897 case LT:
1898 loperands[6] = gen_label_rtx ();
1899 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1900 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1901 CODE_LABEL_NUMBER (loperands[4]));
1902 output_asm_insn ("slt %5", loperands);
1903 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1904 CODE_LABEL_NUMBER (loperands[6]));
1905 break;
1907 case LTU:
1908 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1909 CODE_LABEL_NUMBER (loperands[4]));
1910 output_asm_insn ("scs %5", loperands);
1911 break;
1913 case GE:
1914 loperands[6] = gen_label_rtx ();
1915 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1916 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1917 CODE_LABEL_NUMBER (loperands[4]));
1918 output_asm_insn ("sge %5", loperands);
1919 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1920 CODE_LABEL_NUMBER (loperands[6]));
1921 break;
1923 case GEU:
1924 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1925 CODE_LABEL_NUMBER (loperands[4]));
1926 output_asm_insn ("scc %5", loperands);
1927 break;
1929 case LE:
1930 loperands[6] = gen_label_rtx ();
1931 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1932 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1933 CODE_LABEL_NUMBER (loperands[4]));
1934 output_asm_insn ("sle %5", loperands);
1935 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1936 CODE_LABEL_NUMBER (loperands[6]));
1937 break;
1939 case LEU:
1940 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1941 CODE_LABEL_NUMBER (loperands[4]));
1942 output_asm_insn ("sls %5", loperands);
1943 break;
1945 default:
1946 gcc_unreachable ();
1948 return "";
1951 rtx_code
1952 m68k_output_btst (rtx countop, rtx dataop, rtx_code code, int signpos)
1954 rtx ops[2];
1955 ops[0] = countop;
1956 ops[1] = dataop;
1958 if (GET_CODE (countop) == CONST_INT)
1960 int count = INTVAL (countop);
1961 /* If COUNT is bigger than size of storage unit in use,
1962 advance to the containing unit of same size. */
1963 if (count > signpos)
1965 int offset = (count & ~signpos) / 8;
1966 count = count & signpos;
1967 ops[1] = dataop = adjust_address (dataop, QImode, offset);
1970 if (code == EQ || code == NE)
1972 if (count == 31)
1974 output_asm_insn ("tst%.l %1", ops);
1975 return code == EQ ? PLUS : MINUS;
1977 if (count == 15)
1979 output_asm_insn ("tst%.w %1", ops);
1980 return code == EQ ? PLUS : MINUS;
1982 if (count == 7)
1984 output_asm_insn ("tst%.b %1", ops);
1985 return code == EQ ? PLUS : MINUS;
1988 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1989 On some m68k variants unfortunately that's slower than btst.
1990 On 68000 and higher, that should also work for all HImode operands. */
1991 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1993 if (count == 3 && DATA_REG_P (ops[1]) && (code == EQ || code == NE))
1995 output_asm_insn ("move%.w %1,%%ccr", ops);
1996 return code == EQ ? PLUS : MINUS;
1998 if (count == 2 && DATA_REG_P (ops[1]) && (code == EQ || code == NE))
2000 output_asm_insn ("move%.w %1,%%ccr", ops);
2001 return code == EQ ? NE : EQ;
2003 /* count == 1 followed by bvc/bvs and
2004 count == 0 followed by bcc/bcs are also possible, but need
2005 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
2008 output_asm_insn ("btst %0,%1", ops);
2009 return code;
2012 /* Output a bftst instruction for a zero_extract with ZXOP0, ZXOP1 and ZXOP2
2013 operands. CODE is the code of the comparison, and we return the code to
2014 be actually used in the jump. */
2016 rtx_code
2017 m68k_output_bftst (rtx zxop0, rtx zxop1, rtx zxop2, rtx_code code)
2019 if (zxop1 == const1_rtx && GET_CODE (zxop2) == CONST_INT)
2021 int width = GET_CODE (zxop0) == REG ? 31 : 7;
2022 /* Pass 1000 as SIGNPOS argument so that btst will
2023 not think we are testing the sign bit for an `and'
2024 and assume that nonzero implies a negative result. */
2025 return m68k_output_btst (GEN_INT (width - INTVAL (zxop2)), zxop0, code, 1000);
2027 rtx ops[3] = { zxop0, zxop1, zxop2 };
2028 output_asm_insn ("bftst %0{%b2:%b1}", ops);
2029 return code;
2032 /* Return true if X is a legitimate base register. STRICT_P says
2033 whether we need strict checking. */
2035 bool
2036 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
2038 /* Allow SUBREG everywhere we allow REG. This results in better code. */
2039 if (!strict_p && GET_CODE (x) == SUBREG)
2040 x = SUBREG_REG (x);
2042 return (REG_P (x)
2043 && (strict_p
2044 ? REGNO_OK_FOR_BASE_P (REGNO (x))
2045 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
2048 /* Return true if X is a legitimate index register. STRICT_P says
2049 whether we need strict checking. */
2051 bool
2052 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
2054 if (!strict_p && GET_CODE (x) == SUBREG)
2055 x = SUBREG_REG (x);
2057 return (REG_P (x)
2058 && (strict_p
2059 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
2060 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
2063 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
2064 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
2065 ADDRESS if so. STRICT_P says whether we need strict checking. */
2067 static bool
2068 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
2070 int scale;
2072 /* Check for a scale factor. */
2073 scale = 1;
2074 if (TARGET_68020 || TARGET_COLDFIRE)
2076 if (GET_CODE (x) == MULT
2077 && GET_CODE (XEXP (x, 1)) == CONST_INT
2078 && (INTVAL (XEXP (x, 1)) == 2
2079 || INTVAL (XEXP (x, 1)) == 4
2080 || (INTVAL (XEXP (x, 1)) == 8
2081 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
2083 scale = INTVAL (XEXP (x, 1));
2084 x = XEXP (x, 0);
2086 /* LRA uses ASHIFT instead of MULT outside of MEM. */
2087 else if (GET_CODE (x) == ASHIFT
2088 && GET_CODE (XEXP (x, 1)) == CONST_INT
2089 && (INTVAL (XEXP (x, 1)) == 1
2090 || INTVAL (XEXP (x, 1)) == 2
2091 || (INTVAL (XEXP (x, 1)) == 3
2092 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
2094 scale = 1 << INTVAL (XEXP (x, 1));
2095 x = XEXP (x, 0);
2099 /* Check for a word extension. */
2100 if (!TARGET_COLDFIRE
2101 && GET_CODE (x) == SIGN_EXTEND
2102 && GET_MODE (XEXP (x, 0)) == HImode)
2103 x = XEXP (x, 0);
2105 if (m68k_legitimate_index_reg_p (x, strict_p))
2107 address->scale = scale;
2108 address->index = x;
2109 return true;
2112 return false;
2115 /* Return true if X is an illegitimate symbolic constant. */
2117 bool
2118 m68k_illegitimate_symbolic_constant_p (rtx x)
2120 rtx base, offset;
2122 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
2124 split_const (x, &base, &offset);
2125 if (GET_CODE (base) == SYMBOL_REF
2126 && !offset_within_block_p (base, INTVAL (offset)))
2127 return true;
2129 return m68k_tls_reference_p (x, false);
2132 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2134 static bool
2135 m68k_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2137 return m68k_illegitimate_symbolic_constant_p (x);
2140 /* Return true if X is a legitimate constant address that can reach
2141 bytes in the range [X, X + REACH). STRICT_P says whether we need
2142 strict checking. */
2144 static bool
2145 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
2147 rtx base, offset;
2149 if (!CONSTANT_ADDRESS_P (x))
2150 return false;
2152 if (flag_pic
2153 && !(strict_p && TARGET_PCREL)
2154 && symbolic_operand (x, VOIDmode))
2155 return false;
2157 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
2159 split_const (x, &base, &offset);
2160 if (GET_CODE (base) == SYMBOL_REF
2161 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
2162 return false;
2165 return !m68k_tls_reference_p (x, false);
2168 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
2169 labels will become jump tables. */
2171 static bool
2172 m68k_jump_table_ref_p (rtx x)
2174 if (GET_CODE (x) != LABEL_REF)
2175 return false;
2177 rtx_insn *insn = as_a <rtx_insn *> (XEXP (x, 0));
2178 if (!NEXT_INSN (insn) && !PREV_INSN (insn))
2179 return true;
2181 insn = next_nonnote_insn (insn);
2182 return insn && JUMP_TABLE_DATA_P (insn);
2185 /* Return true if X is a legitimate address for values of mode MODE.
2186 STRICT_P says whether strict checking is needed. If the address
2187 is valid, describe its components in *ADDRESS. */
2189 static bool
2190 m68k_decompose_address (machine_mode mode, rtx x,
2191 bool strict_p, struct m68k_address *address)
2193 unsigned int reach;
2195 memset (address, 0, sizeof (*address));
2197 if (mode == BLKmode)
2198 reach = 1;
2199 else
2200 reach = GET_MODE_SIZE (mode);
2202 /* Check for (An) (mode 2). */
2203 if (m68k_legitimate_base_reg_p (x, strict_p))
2205 address->base = x;
2206 return true;
2209 /* Check for -(An) and (An)+ (modes 3 and 4). */
2210 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
2211 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2213 address->code = GET_CODE (x);
2214 address->base = XEXP (x, 0);
2215 return true;
2218 /* Check for (d16,An) (mode 5). */
2219 if (GET_CODE (x) == PLUS
2220 && GET_CODE (XEXP (x, 1)) == CONST_INT
2221 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
2222 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
2224 address->base = XEXP (x, 0);
2225 address->offset = XEXP (x, 1);
2226 return true;
2229 /* Check for GOT loads. These are (bd,An,Xn) addresses if
2230 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
2231 addresses. */
2232 if (GET_CODE (x) == PLUS
2233 && XEXP (x, 0) == pic_offset_table_rtx)
2235 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
2236 they are invalid in this context. */
2237 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
2239 address->base = XEXP (x, 0);
2240 address->offset = XEXP (x, 1);
2241 return true;
2245 /* The ColdFire FPU only accepts addressing modes 2-5. */
2246 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
2247 return false;
2249 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
2250 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
2251 All these modes are variations of mode 7. */
2252 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2254 address->offset = x;
2255 return true;
2258 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2259 tablejumps.
2261 ??? do_tablejump creates these addresses before placing the target
2262 label, so we have to assume that unplaced labels are jump table
2263 references. It seems unlikely that we would ever generate indexed
2264 accesses to unplaced labels in other cases. Do not accept it in
2265 PIC mode, since the label address will need to be loaded from memory. */
2266 if (GET_CODE (x) == PLUS
2267 && !flag_pic
2268 && m68k_jump_table_ref_p (XEXP (x, 1))
2269 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2271 address->offset = XEXP (x, 1);
2272 return true;
2275 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2276 (bd,An,Xn.SIZE*SCALE) addresses. */
2278 if (TARGET_68020)
2280 /* Check for a nonzero base displacement. */
2281 if (GET_CODE (x) == PLUS
2282 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2284 address->offset = XEXP (x, 1);
2285 x = XEXP (x, 0);
2288 /* Check for a suppressed index register. */
2289 if (m68k_legitimate_base_reg_p (x, strict_p))
2291 address->base = x;
2292 return true;
2295 /* Check for a suppressed base register. Do not allow this case
2296 for non-symbolic offsets as it effectively gives gcc freedom
2297 to treat data registers as base registers, which can generate
2298 worse code. */
2299 if (address->offset
2300 && symbolic_operand (address->offset, VOIDmode)
2301 && m68k_decompose_index (x, strict_p, address))
2302 return true;
2304 else
2306 /* Check for a nonzero base displacement. */
2307 if (GET_CODE (x) == PLUS
2308 && GET_CODE (XEXP (x, 1)) == CONST_INT
2309 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2311 address->offset = XEXP (x, 1);
2312 x = XEXP (x, 0);
2316 /* We now expect the sum of a base and an index. */
2317 if (GET_CODE (x) == PLUS)
2319 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2320 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2322 address->base = XEXP (x, 0);
2323 return true;
2326 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2327 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2329 address->base = XEXP (x, 1);
2330 return true;
2333 return false;
2336 /* Return true if X is a legitimate address for values of mode MODE.
2337 STRICT_P says whether strict checking is needed. */
2339 bool
2340 m68k_legitimate_address_p (machine_mode mode, rtx x, bool strict_p, code_helper)
2342 struct m68k_address address;
2344 return m68k_decompose_address (mode, x, strict_p, &address);
2347 /* Return true if X is a memory, describing its address in ADDRESS if so.
2348 Apply strict checking if called during or after reload. */
2350 static bool
2351 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2353 return (MEM_P (x)
2354 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2355 (reload_in_progress || lra_in_progress
2356 || reload_completed),
2357 address));
2360 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
2362 bool
2363 m68k_legitimate_constant_p (machine_mode mode, rtx x)
2365 return mode != XFmode && !m68k_illegitimate_symbolic_constant_p (x);
2368 /* Return true if X matches the 'Q' constraint. It must be a memory
2369 with a base address and no constant offset or index. */
2371 bool
2372 m68k_matches_q_p (rtx x)
2374 struct m68k_address address;
2376 return (m68k_legitimate_mem_p (x, &address)
2377 && address.code == UNKNOWN
2378 && address.base
2379 && !address.offset
2380 && !address.index);
2383 /* Return true if X matches the 'U' constraint. It must be a base address
2384 with a constant offset and no index. */
2386 bool
2387 m68k_matches_u_p (rtx x)
2389 struct m68k_address address;
2391 return (m68k_legitimate_mem_p (x, &address)
2392 && address.code == UNKNOWN
2393 && address.base
2394 && address.offset
2395 && !address.index);
2398 /* Return GOT pointer. */
2400 static rtx
2401 m68k_get_gp (void)
2403 if (pic_offset_table_rtx == NULL_RTX)
2404 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2406 crtl->uses_pic_offset_table = 1;
2408 return pic_offset_table_rtx;
2411 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2412 wrappers. */
2413 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2414 RELOC_TLSIE, RELOC_TLSLE };
2416 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2418 /* Wrap symbol X into unspec representing relocation RELOC.
2419 BASE_REG - register that should be added to the result.
2420 TEMP_REG - if non-null, temporary register. */
2422 static rtx
2423 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2425 bool use_x_p;
2427 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2429 if (TARGET_COLDFIRE && use_x_p)
2430 /* When compiling with -mx{got, tls} switch the code will look like this:
2432 move.l <X>@<RELOC>,<TEMP_REG>
2433 add.l <BASE_REG>,<TEMP_REG> */
2435 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2436 to put @RELOC after reference. */
2437 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2438 UNSPEC_RELOC32);
2439 x = gen_rtx_CONST (Pmode, x);
2441 if (temp_reg == NULL)
2443 gcc_assert (can_create_pseudo_p ());
2444 temp_reg = gen_reg_rtx (Pmode);
2447 emit_move_insn (temp_reg, x);
2448 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2449 x = temp_reg;
2451 else
2453 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2454 UNSPEC_RELOC16);
2455 x = gen_rtx_CONST (Pmode, x);
2457 x = gen_rtx_PLUS (Pmode, base_reg, x);
2460 return x;
2463 /* Helper for m68k_unwrap_symbol.
2464 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2465 sets *RELOC_PTR to relocation type for the symbol. */
2467 static rtx
2468 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2469 enum m68k_reloc *reloc_ptr)
2471 if (GET_CODE (orig) == CONST)
2473 rtx x;
2474 enum m68k_reloc dummy;
2476 x = XEXP (orig, 0);
2478 if (reloc_ptr == NULL)
2479 reloc_ptr = &dummy;
2481 /* Handle an addend. */
2482 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2483 && CONST_INT_P (XEXP (x, 1)))
2484 x = XEXP (x, 0);
2486 if (GET_CODE (x) == UNSPEC)
2488 switch (XINT (x, 1))
2490 case UNSPEC_RELOC16:
2491 orig = XVECEXP (x, 0, 0);
2492 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2493 break;
2495 case UNSPEC_RELOC32:
2496 if (unwrap_reloc32_p)
2498 orig = XVECEXP (x, 0, 0);
2499 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2501 break;
2503 default:
2504 break;
2509 return orig;
2512 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2513 UNSPEC_RELOC32 wrappers. */
2516 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2518 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2521 /* Adjust decorated address operand before outputing assembler for it. */
2523 static void
2524 m68k_adjust_decorated_operand (rtx op)
2526 /* Combine and, possibly, other optimizations may do good job
2527 converting
2528 (const (unspec [(symbol)]))
2529 into
2530 (const (plus (unspec [(symbol)])
2531 (const_int N))).
2532 The problem with this is emitting @TLS or @GOT decorations.
2533 The decoration is emitted when processing (unspec), so the
2534 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2536 It seems that the easiest solution to this is to convert such
2537 operands to
2538 (const (unspec [(plus (symbol)
2539 (const_int N))])).
2540 Note, that the top level of operand remains intact, so we don't have
2541 to patch up anything outside of the operand. */
2543 subrtx_var_iterator::array_type array;
2544 FOR_EACH_SUBRTX_VAR (iter, array, op, ALL)
2546 rtx x = *iter;
2547 if (m68k_unwrap_symbol (x, true) != x)
2549 rtx plus;
2551 gcc_assert (GET_CODE (x) == CONST);
2552 plus = XEXP (x, 0);
2554 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2556 rtx unspec;
2557 rtx addend;
2559 unspec = XEXP (plus, 0);
2560 gcc_assert (GET_CODE (unspec) == UNSPEC);
2561 addend = XEXP (plus, 1);
2562 gcc_assert (CONST_INT_P (addend));
2564 /* We now have all the pieces, rearrange them. */
2566 /* Move symbol to plus. */
2567 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2569 /* Move plus inside unspec. */
2570 XVECEXP (unspec, 0, 0) = plus;
2572 /* Move unspec to top level of const. */
2573 XEXP (x, 0) = unspec;
2575 iter.skip_subrtxes ();
2580 /* Prescan insn before outputing assembler for it. */
2582 void
2583 m68k_final_prescan_insn (rtx_insn *insn ATTRIBUTE_UNUSED,
2584 rtx *operands, int n_operands)
2586 int i;
2588 for (i = 0; i < n_operands; ++i)
2589 m68k_adjust_decorated_operand (operands[i]);
2592 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2593 If REG is non-null, use it; generate new pseudo otherwise. */
2595 static rtx
2596 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2598 rtx_insn *insn;
2600 if (reg == NULL_RTX)
2602 gcc_assert (can_create_pseudo_p ());
2603 reg = gen_reg_rtx (Pmode);
2606 insn = emit_move_insn (reg, x);
2607 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2608 by loop. */
2609 set_unique_reg_note (insn, REG_EQUAL, orig);
2611 return reg;
2614 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2615 GOT slot. */
2617 static rtx
2618 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2620 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2622 x = gen_rtx_MEM (Pmode, x);
2623 MEM_READONLY_P (x) = 1;
2625 return x;
2628 /* Legitimize PIC addresses. If the address is already
2629 position-independent, we return ORIG. Newly generated
2630 position-independent addresses go to REG. If we need more
2631 than one register, we lose.
2633 An address is legitimized by making an indirect reference
2634 through the Global Offset Table with the name of the symbol
2635 used as an offset.
2637 The assembler and linker are responsible for placing the
2638 address of the symbol in the GOT. The function prologue
2639 is responsible for initializing a5 to the starting address
2640 of the GOT.
2642 The assembler is also responsible for translating a symbol name
2643 into a constant displacement from the start of the GOT.
2645 A quick example may make things a little clearer:
2647 When not generating PIC code to store the value 12345 into _foo
2648 we would generate the following code:
2650 movel #12345, _foo
2652 When generating PIC two transformations are made. First, the compiler
2653 loads the address of foo into a register. So the first transformation makes:
2655 lea _foo, a0
2656 movel #12345, a0@
2658 The code in movsi will intercept the lea instruction and call this
2659 routine which will transform the instructions into:
2661 movel a5@(_foo:w), a0
2662 movel #12345, a0@
2665 That (in a nutshell) is how *all* symbol and label references are
2666 handled. */
2669 legitimize_pic_address (rtx orig, machine_mode mode ATTRIBUTE_UNUSED,
2670 rtx reg)
2672 rtx pic_ref = orig;
2674 /* First handle a simple SYMBOL_REF or LABEL_REF */
2675 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2677 gcc_assert (reg);
2679 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2680 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2682 else if (GET_CODE (orig) == CONST)
2684 rtx base;
2686 /* Make sure this has not already been legitimized. */
2687 if (m68k_unwrap_symbol (orig, true) != orig)
2688 return orig;
2690 gcc_assert (reg);
2692 /* legitimize both operands of the PLUS */
2693 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2695 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2696 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2697 base == reg ? 0 : reg);
2699 if (GET_CODE (orig) == CONST_INT)
2700 pic_ref = plus_constant (Pmode, base, INTVAL (orig));
2701 else
2702 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2705 return pic_ref;
2708 /* The __tls_get_addr symbol. */
2709 static GTY(()) rtx m68k_tls_get_addr;
2711 /* Return SYMBOL_REF for __tls_get_addr. */
2713 static rtx
2714 m68k_get_tls_get_addr (void)
2716 if (m68k_tls_get_addr == NULL_RTX)
2717 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2719 return m68k_tls_get_addr;
2722 /* Return libcall result in A0 instead of usual D0. */
2723 static bool m68k_libcall_value_in_a0_p = false;
2725 /* Emit instruction sequence that calls __tls_get_addr. X is
2726 the TLS symbol we are referencing and RELOC is the symbol type to use
2727 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2728 emitted. A pseudo register with result of __tls_get_addr call is
2729 returned. */
2731 static rtx
2732 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2734 rtx a0;
2735 rtx_insn *insns;
2736 rtx dest;
2738 /* Emit the call sequence. */
2739 start_sequence ();
2741 /* FIXME: Unfortunately, emit_library_call_value does not
2742 consider (plus (%a5) (const (unspec))) to be a good enough
2743 operand for push, so it forces it into a register. The bad
2744 thing about this is that combiner, due to copy propagation and other
2745 optimizations, sometimes cannot later fix this. As a consequence,
2746 additional register may be allocated resulting in a spill.
2747 For reference, see args processing loops in
2748 calls.cc:emit_library_call_value_1.
2749 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2750 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2752 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2753 is the simpliest way of generating a call. The difference between
2754 __tls_get_addr() and libcall is that the result is returned in D0
2755 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2756 which temporarily switches returning the result to A0. */
2758 m68k_libcall_value_in_a0_p = true;
2759 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2760 Pmode, x, Pmode);
2761 m68k_libcall_value_in_a0_p = false;
2763 insns = get_insns ();
2764 end_sequence ();
2766 gcc_assert (can_create_pseudo_p ());
2767 dest = gen_reg_rtx (Pmode);
2768 emit_libcall_block (insns, dest, a0, eqv);
2770 return dest;
2773 /* The __tls_get_addr symbol. */
2774 static GTY(()) rtx m68k_read_tp;
2776 /* Return SYMBOL_REF for __m68k_read_tp. */
2778 static rtx
2779 m68k_get_m68k_read_tp (void)
2781 if (m68k_read_tp == NULL_RTX)
2782 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2784 return m68k_read_tp;
2787 /* Emit instruction sequence that calls __m68k_read_tp.
2788 A pseudo register with result of __m68k_read_tp call is returned. */
2790 static rtx
2791 m68k_call_m68k_read_tp (void)
2793 rtx a0;
2794 rtx eqv;
2795 rtx_insn *insns;
2796 rtx dest;
2798 start_sequence ();
2800 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2801 is the simpliest way of generating a call. The difference between
2802 __m68k_read_tp() and libcall is that the result is returned in D0
2803 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2804 which temporarily switches returning the result to A0. */
2806 /* Emit the call sequence. */
2807 m68k_libcall_value_in_a0_p = true;
2808 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2809 Pmode);
2810 m68k_libcall_value_in_a0_p = false;
2811 insns = get_insns ();
2812 end_sequence ();
2814 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2815 share the m68k_read_tp result with other IE/LE model accesses. */
2816 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2818 gcc_assert (can_create_pseudo_p ());
2819 dest = gen_reg_rtx (Pmode);
2820 emit_libcall_block (insns, dest, a0, eqv);
2822 return dest;
2825 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2826 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2827 ColdFire. */
2830 m68k_legitimize_tls_address (rtx orig)
2832 switch (SYMBOL_REF_TLS_MODEL (orig))
2834 case TLS_MODEL_GLOBAL_DYNAMIC:
2835 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2836 break;
2838 case TLS_MODEL_LOCAL_DYNAMIC:
2840 rtx eqv;
2841 rtx a0;
2842 rtx x;
2844 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2845 share the LDM result with other LD model accesses. */
2846 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2847 UNSPEC_RELOC32);
2849 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2851 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2853 if (can_create_pseudo_p ())
2854 x = m68k_move_to_reg (x, orig, NULL_RTX);
2856 orig = x;
2857 break;
2860 case TLS_MODEL_INITIAL_EXEC:
2862 rtx a0;
2863 rtx x;
2865 a0 = m68k_call_m68k_read_tp ();
2867 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2868 x = gen_rtx_PLUS (Pmode, x, a0);
2870 if (can_create_pseudo_p ())
2871 x = m68k_move_to_reg (x, orig, NULL_RTX);
2873 orig = x;
2874 break;
2877 case TLS_MODEL_LOCAL_EXEC:
2879 rtx a0;
2880 rtx x;
2882 a0 = m68k_call_m68k_read_tp ();
2884 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2886 if (can_create_pseudo_p ())
2887 x = m68k_move_to_reg (x, orig, NULL_RTX);
2889 orig = x;
2890 break;
2893 default:
2894 gcc_unreachable ();
2897 return orig;
2900 /* Return true if X is a TLS symbol. */
2902 static bool
2903 m68k_tls_symbol_p (rtx x)
2905 if (!TARGET_HAVE_TLS)
2906 return false;
2908 if (GET_CODE (x) != SYMBOL_REF)
2909 return false;
2911 return SYMBOL_REF_TLS_MODEL (x) != 0;
2914 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2915 though illegitimate one.
2916 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2918 bool
2919 m68k_tls_reference_p (rtx x, bool legitimate_p)
2921 if (!TARGET_HAVE_TLS)
2922 return false;
2924 if (!legitimate_p)
2926 subrtx_var_iterator::array_type array;
2927 FOR_EACH_SUBRTX_VAR (iter, array, x, ALL)
2929 rtx x = *iter;
2931 /* Note: this is not the same as m68k_tls_symbol_p. */
2932 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
2933 return true;
2935 /* Don't recurse into legitimate TLS references. */
2936 if (m68k_tls_reference_p (x, true))
2937 iter.skip_subrtxes ();
2939 return false;
2941 else
2943 enum m68k_reloc reloc = RELOC_GOT;
2945 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2946 && TLS_RELOC_P (reloc));
2952 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2954 /* Return the type of move that should be used for integer I. */
2956 M68K_CONST_METHOD
2957 m68k_const_method (HOST_WIDE_INT i)
2959 unsigned u;
2961 if (USE_MOVQ (i))
2962 return MOVQ;
2964 /* The ColdFire doesn't have byte or word operations. */
2965 /* FIXME: This may not be useful for the m68060 either. */
2966 if (!TARGET_COLDFIRE)
2968 /* if -256 < N < 256 but N is not in range for a moveq
2969 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2970 if (USE_MOVQ (i ^ 0xff))
2971 return NOTB;
2972 /* Likewise, try with not.w */
2973 if (USE_MOVQ (i ^ 0xffff))
2974 return NOTW;
2975 /* This is the only value where neg.w is useful */
2976 if (i == -65408)
2977 return NEGW;
2980 /* Try also with swap. */
2981 u = i;
2982 if (USE_MOVQ ((u >> 16) | (u << 16)))
2983 return SWAP;
2985 if (TARGET_ISAB)
2987 /* Try using MVZ/MVS with an immediate value to load constants. */
2988 if (i >= 0 && i <= 65535)
2989 return MVZ;
2990 if (i >= -32768 && i <= 32767)
2991 return MVS;
2994 /* Otherwise, use move.l */
2995 return MOVL;
2998 /* Return the cost of moving constant I into a data register. */
3000 static int
3001 const_int_cost (HOST_WIDE_INT i)
3003 switch (m68k_const_method (i))
3005 case MOVQ:
3006 /* Constants between -128 and 127 are cheap due to moveq. */
3007 return 0;
3008 case MVZ:
3009 case MVS:
3010 case NOTB:
3011 case NOTW:
3012 case NEGW:
3013 case SWAP:
3014 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
3015 return 1;
3016 case MOVL:
3017 return 2;
3018 default:
3019 gcc_unreachable ();
3023 static bool
3024 m68k_rtx_costs (rtx x, machine_mode mode, int outer_code,
3025 int opno ATTRIBUTE_UNUSED,
3026 int *total, bool speed ATTRIBUTE_UNUSED)
3028 int code = GET_CODE (x);
3030 switch (code)
3032 case CONST_INT:
3033 /* Constant zero is super cheap due to clr instruction. */
3034 if (x == const0_rtx)
3035 *total = 0;
3036 else
3037 *total = const_int_cost (INTVAL (x));
3038 return true;
3040 case CONST:
3041 case LABEL_REF:
3042 case SYMBOL_REF:
3043 *total = 3;
3044 return true;
3046 case CONST_DOUBLE:
3047 /* Make 0.0 cheaper than other floating constants to
3048 encourage creating tstsf and tstdf insns. */
3049 if ((GET_RTX_CLASS (outer_code) == RTX_COMPARE
3050 || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE)
3051 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
3052 *total = 4;
3053 else
3054 *total = 5;
3055 return true;
3057 /* These are vaguely right for a 68020. */
3058 /* The costs for long multiply have been adjusted to work properly
3059 in synth_mult on the 68020, relative to an average of the time
3060 for add and the time for shift, taking away a little more because
3061 sometimes move insns are needed. */
3062 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
3063 terms. */
3064 #define MULL_COST \
3065 (TUNE_68060 ? 2 \
3066 : TUNE_68040 ? 5 \
3067 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
3068 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
3069 : TUNE_CFV2 ? 8 \
3070 : TARGET_COLDFIRE ? 3 : 13)
3072 #define MULW_COST \
3073 (TUNE_68060 ? 2 \
3074 : TUNE_68040 ? 3 \
3075 : TUNE_68000_10 ? 5 \
3076 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
3077 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
3078 : TUNE_CFV2 ? 8 \
3079 : TARGET_COLDFIRE ? 2 : 8)
3081 #define DIVW_COST \
3082 (TARGET_CF_HWDIV ? 11 \
3083 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
3085 case PLUS:
3086 /* An lea costs about three times as much as a simple add. */
3087 if (mode == SImode
3088 && GET_CODE (XEXP (x, 1)) == REG
3089 && ((GET_CODE (XEXP (x, 0)) == MULT
3090 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
3091 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3092 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
3093 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
3094 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
3095 || (GET_CODE (XEXP (x, 0)) == ASHIFT
3096 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
3097 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
3098 && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (XEXP (x, 0), 1))
3099 <= 3))))
3101 /* lea an@(dx:l:i),am */
3102 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
3103 return true;
3105 return false;
3107 case ASHIFT:
3108 case ASHIFTRT:
3109 case LSHIFTRT:
3110 if (TUNE_68060)
3112 *total = COSTS_N_INSNS(1);
3113 return true;
3115 if (TUNE_68000_10)
3117 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
3119 if (INTVAL (XEXP (x, 1)) < 16)
3120 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
3121 else
3122 /* We're using clrw + swap for these cases. */
3123 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
3125 else
3126 *total = COSTS_N_INSNS (10); /* Worst case. */
3127 return true;
3129 /* A shift by a big integer takes an extra instruction. */
3130 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3131 && (INTVAL (XEXP (x, 1)) == 16))
3133 *total = COSTS_N_INSNS (2); /* clrw;swap */
3134 return true;
3136 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3137 && !(INTVAL (XEXP (x, 1)) > 0
3138 && INTVAL (XEXP (x, 1)) <= 8))
3140 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
3141 return true;
3143 return false;
3145 case MULT:
3146 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
3147 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
3148 && mode == SImode)
3149 *total = COSTS_N_INSNS (MULW_COST);
3150 else if (mode == QImode || mode == HImode)
3151 *total = COSTS_N_INSNS (MULW_COST);
3152 else
3153 *total = COSTS_N_INSNS (MULL_COST);
3154 return true;
3156 case DIV:
3157 case UDIV:
3158 case MOD:
3159 case UMOD:
3160 if (mode == QImode || mode == HImode)
3161 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
3162 else if (TARGET_CF_HWDIV)
3163 *total = COSTS_N_INSNS (18);
3164 else
3165 *total = COSTS_N_INSNS (43); /* div.l */
3166 return true;
3168 case ZERO_EXTRACT:
3169 if (GET_RTX_CLASS (outer_code) == RTX_COMPARE
3170 || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE)
3171 *total = 0;
3172 return false;
3174 default:
3175 return false;
3179 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
3180 OPERANDS[0]. */
3182 static const char *
3183 output_move_const_into_data_reg (rtx *operands)
3185 HOST_WIDE_INT i;
3187 i = INTVAL (operands[1]);
3188 switch (m68k_const_method (i))
3190 case MVZ:
3191 return "mvzw %1,%0";
3192 case MVS:
3193 return "mvsw %1,%0";
3194 case MOVQ:
3195 return "moveq %1,%0";
3196 case NOTB:
3197 CC_STATUS_INIT;
3198 operands[1] = GEN_INT (i ^ 0xff);
3199 return "moveq %1,%0\n\tnot%.b %0";
3200 case NOTW:
3201 CC_STATUS_INIT;
3202 operands[1] = GEN_INT (i ^ 0xffff);
3203 return "moveq %1,%0\n\tnot%.w %0";
3204 case NEGW:
3205 CC_STATUS_INIT;
3206 return "moveq #-128,%0\n\tneg%.w %0";
3207 case SWAP:
3209 unsigned u = i;
3211 operands[1] = GEN_INT ((u << 16) | (u >> 16));
3212 return "moveq %1,%0\n\tswap %0";
3214 case MOVL:
3215 return "move%.l %1,%0";
3216 default:
3217 gcc_unreachable ();
3221 /* Return true if I can be handled by ISA B's mov3q instruction. */
3223 bool
3224 valid_mov3q_const (HOST_WIDE_INT i)
3226 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
3229 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
3230 I is the value of OPERANDS[1]. */
3232 static const char *
3233 output_move_simode_const (rtx *operands)
3235 rtx dest;
3236 HOST_WIDE_INT src;
3238 dest = operands[0];
3239 src = INTVAL (operands[1]);
3240 if (src == 0
3241 && (DATA_REG_P (dest) || MEM_P (dest))
3242 /* clr insns on 68000 read before writing. */
3243 && ((TARGET_68010 || TARGET_COLDFIRE)
3244 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
3245 return "clr%.l %0";
3246 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
3247 return "mov3q%.l %1,%0";
3248 else if (src == 0 && ADDRESS_REG_P (dest))
3249 return "sub%.l %0,%0";
3250 else if (DATA_REG_P (dest))
3251 return output_move_const_into_data_reg (operands);
3252 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
3254 if (valid_mov3q_const (src))
3255 return "mov3q%.l %1,%0";
3256 return "move%.w %1,%0";
3258 else if (MEM_P (dest)
3259 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
3260 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
3261 && IN_RANGE (src, -0x8000, 0x7fff))
3263 if (valid_mov3q_const (src))
3264 return "mov3q%.l %1,%-";
3265 return "pea %a1";
3267 return "move%.l %1,%0";
3270 const char *
3271 output_move_simode (rtx *operands)
3273 handle_flags_for_move (operands);
3275 if (GET_CODE (operands[1]) == CONST_INT)
3276 return output_move_simode_const (operands);
3277 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3278 || GET_CODE (operands[1]) == CONST)
3279 && push_operand (operands[0], SImode))
3280 return "pea %a1";
3281 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3282 || GET_CODE (operands[1]) == CONST)
3283 && ADDRESS_REG_P (operands[0]))
3284 return "lea %a1,%0";
3285 return "move%.l %1,%0";
3288 const char *
3289 output_move_himode (rtx *operands)
3291 if (GET_CODE (operands[1]) == CONST_INT)
3293 if (operands[1] == const0_rtx
3294 && (DATA_REG_P (operands[0])
3295 || GET_CODE (operands[0]) == MEM)
3296 /* clr insns on 68000 read before writing. */
3297 && ((TARGET_68010 || TARGET_COLDFIRE)
3298 || !(GET_CODE (operands[0]) == MEM
3299 && MEM_VOLATILE_P (operands[0]))))
3300 return "clr%.w %0";
3301 else if (operands[1] == const0_rtx
3302 && ADDRESS_REG_P (operands[0]))
3303 return "sub%.l %0,%0";
3304 else if (DATA_REG_P (operands[0])
3305 && INTVAL (operands[1]) < 128
3306 && INTVAL (operands[1]) >= -128)
3307 return "moveq %1,%0";
3308 else if (INTVAL (operands[1]) < 0x8000
3309 && INTVAL (operands[1]) >= -0x8000)
3310 return "move%.w %1,%0";
3312 else if (CONSTANT_P (operands[1]))
3313 gcc_unreachable ();
3314 return "move%.w %1,%0";
3317 const char *
3318 output_move_qimode (rtx *operands)
3320 handle_flags_for_move (operands);
3322 /* 68k family always modifies the stack pointer by at least 2, even for
3323 byte pushes. The 5200 (ColdFire) does not do this. */
3325 /* This case is generated by pushqi1 pattern now. */
3326 gcc_assert (!(GET_CODE (operands[0]) == MEM
3327 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3328 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3329 && ! ADDRESS_REG_P (operands[1])
3330 && ! TARGET_COLDFIRE));
3332 /* clr and st insns on 68000 read before writing. */
3333 if (!ADDRESS_REG_P (operands[0])
3334 && ((TARGET_68010 || TARGET_COLDFIRE)
3335 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3337 if (operands[1] == const0_rtx)
3338 return "clr%.b %0";
3339 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3340 && GET_CODE (operands[1]) == CONST_INT
3341 && (INTVAL (operands[1]) & 255) == 255)
3343 CC_STATUS_INIT;
3344 return "st %0";
3347 if (GET_CODE (operands[1]) == CONST_INT
3348 && DATA_REG_P (operands[0])
3349 && INTVAL (operands[1]) < 128
3350 && INTVAL (operands[1]) >= -128)
3351 return "moveq %1,%0";
3352 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3353 return "sub%.l %0,%0";
3354 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3355 gcc_unreachable ();
3356 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3357 from address registers. */
3358 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3360 if (ADDRESS_REG_P (operands[1]))
3361 CC_STATUS_INIT;
3362 return "move%.w %1,%0";
3364 return "move%.b %1,%0";
3367 const char *
3368 output_move_stricthi (rtx *operands)
3370 if (operands[1] == const0_rtx
3371 /* clr insns on 68000 read before writing. */
3372 && ((TARGET_68010 || TARGET_COLDFIRE)
3373 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3374 return "clr%.w %0";
3375 return "move%.w %1,%0";
3378 const char *
3379 output_move_strictqi (rtx *operands)
3381 if (operands[1] == const0_rtx
3382 /* clr insns on 68000 read before writing. */
3383 && ((TARGET_68010 || TARGET_COLDFIRE)
3384 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3385 return "clr%.b %0";
3386 return "move%.b %1,%0";
3389 /* Return the best assembler insn template
3390 for moving operands[1] into operands[0] as a fullword. */
3392 static const char *
3393 singlemove_string (rtx *operands)
3395 if (GET_CODE (operands[1]) == CONST_INT)
3396 return output_move_simode_const (operands);
3397 return "move%.l %1,%0";
3401 /* Output assembler or rtl code to perform a doubleword move insn
3402 with operands OPERANDS.
3403 Pointers to 3 helper functions should be specified:
3404 HANDLE_REG_ADJUST to adjust a register by a small value,
3405 HANDLE_COMPADR to compute an address and
3406 HANDLE_MOVSI to move 4 bytes. */
3408 static void
3409 handle_move_double (rtx operands[2],
3410 void (*handle_reg_adjust) (rtx, int),
3411 void (*handle_compadr) (rtx [2]),
3412 void (*handle_movsi) (rtx [2]))
3414 enum
3416 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3417 } optype0, optype1;
3418 rtx latehalf[2];
3419 rtx middlehalf[2];
3420 rtx xops[2];
3421 rtx addreg0 = 0, addreg1 = 0;
3422 int dest_overlapped_low = 0;
3423 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3425 middlehalf[0] = 0;
3426 middlehalf[1] = 0;
3428 /* First classify both operands. */
3430 if (REG_P (operands[0]))
3431 optype0 = REGOP;
3432 else if (offsettable_memref_p (operands[0]))
3433 optype0 = OFFSOP;
3434 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3435 optype0 = POPOP;
3436 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3437 optype0 = PUSHOP;
3438 else if (GET_CODE (operands[0]) == MEM)
3439 optype0 = MEMOP;
3440 else
3441 optype0 = RNDOP;
3443 if (REG_P (operands[1]))
3444 optype1 = REGOP;
3445 else if (CONSTANT_P (operands[1]))
3446 optype1 = CNSTOP;
3447 else if (offsettable_memref_p (operands[1]))
3448 optype1 = OFFSOP;
3449 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3450 optype1 = POPOP;
3451 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3452 optype1 = PUSHOP;
3453 else if (GET_CODE (operands[1]) == MEM)
3454 optype1 = MEMOP;
3455 else
3456 optype1 = RNDOP;
3458 /* Check for the cases that the operand constraints are not supposed
3459 to allow to happen. Generating code for these cases is
3460 painful. */
3461 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3463 /* If one operand is decrementing and one is incrementing
3464 decrement the former register explicitly
3465 and change that operand into ordinary indexing. */
3467 if (optype0 == PUSHOP && optype1 == POPOP)
3469 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3471 handle_reg_adjust (operands[0], -size);
3473 if (GET_MODE (operands[1]) == XFmode)
3474 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3475 else if (GET_MODE (operands[0]) == DFmode)
3476 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3477 else
3478 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3479 optype0 = OFFSOP;
3481 if (optype0 == POPOP && optype1 == PUSHOP)
3483 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3485 handle_reg_adjust (operands[1], -size);
3487 if (GET_MODE (operands[1]) == XFmode)
3488 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3489 else if (GET_MODE (operands[1]) == DFmode)
3490 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3491 else
3492 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3493 optype1 = OFFSOP;
3496 /* If an operand is an unoffsettable memory ref, find a register
3497 we can increment temporarily to make it refer to the second word. */
3499 if (optype0 == MEMOP)
3500 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3502 if (optype1 == MEMOP)
3503 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3505 /* Ok, we can do one word at a time.
3506 Normally we do the low-numbered word first,
3507 but if either operand is autodecrementing then we
3508 do the high-numbered word first.
3510 In either case, set up in LATEHALF the operands to use
3511 for the high-numbered word and in some cases alter the
3512 operands in OPERANDS to be suitable for the low-numbered word. */
3514 if (size == 12)
3516 if (optype0 == REGOP)
3518 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3519 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3521 else if (optype0 == OFFSOP)
3523 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3524 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3526 else
3528 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3529 latehalf[0] = adjust_address (operands[0], SImode, 0);
3532 if (optype1 == REGOP)
3534 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3535 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3537 else if (optype1 == OFFSOP)
3539 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3540 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3542 else if (optype1 == CNSTOP)
3544 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3546 long l[3];
3548 REAL_VALUE_TO_TARGET_LONG_DOUBLE
3549 (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
3550 operands[1] = GEN_INT (l[0]);
3551 middlehalf[1] = GEN_INT (l[1]);
3552 latehalf[1] = GEN_INT (l[2]);
3554 else
3556 /* No non-CONST_DOUBLE constant should ever appear
3557 here. */
3558 gcc_assert (!CONSTANT_P (operands[1]));
3561 else
3563 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3564 latehalf[1] = adjust_address (operands[1], SImode, 0);
3567 else
3568 /* size is not 12: */
3570 if (optype0 == REGOP)
3571 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3572 else if (optype0 == OFFSOP)
3573 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3574 else
3575 latehalf[0] = adjust_address (operands[0], SImode, 0);
3577 if (optype1 == REGOP)
3578 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3579 else if (optype1 == OFFSOP)
3580 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3581 else if (optype1 == CNSTOP)
3582 split_double (operands[1], &operands[1], &latehalf[1]);
3583 else
3584 latehalf[1] = adjust_address (operands[1], SImode, 0);
3587 /* If insn is effectively movd N(REG),-(REG) then we will do the high
3588 word first. We should use the adjusted operand 1 (which is N+4(REG))
3589 for the low word as well, to compensate for the first decrement of
3590 REG. */
3591 if (optype0 == PUSHOP
3592 && reg_overlap_mentioned_p (XEXP (XEXP (operands[0], 0), 0), operands[1]))
3593 operands[1] = middlehalf[1] = latehalf[1];
3595 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3596 if the upper part of reg N does not appear in the MEM, arrange to
3597 emit the move late-half first. Otherwise, compute the MEM address
3598 into the upper part of N and use that as a pointer to the memory
3599 operand. */
3600 if (optype0 == REGOP
3601 && (optype1 == OFFSOP || optype1 == MEMOP))
3603 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3605 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3606 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3608 /* If both halves of dest are used in the src memory address,
3609 compute the address into latehalf of dest.
3610 Note that this can't happen if the dest is two data regs. */
3611 compadr:
3612 xops[0] = latehalf[0];
3613 xops[1] = XEXP (operands[1], 0);
3615 handle_compadr (xops);
3616 if (GET_MODE (operands[1]) == XFmode)
3618 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3619 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3620 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3622 else
3624 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3625 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3628 else if (size == 12
3629 && reg_overlap_mentioned_p (middlehalf[0],
3630 XEXP (operands[1], 0)))
3632 /* Check for two regs used by both source and dest.
3633 Note that this can't happen if the dest is all data regs.
3634 It can happen if the dest is d6, d7, a0.
3635 But in that case, latehalf is an addr reg, so
3636 the code at compadr does ok. */
3638 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3639 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3640 goto compadr;
3642 /* JRV says this can't happen: */
3643 gcc_assert (!addreg0 && !addreg1);
3645 /* Only the middle reg conflicts; simply put it last. */
3646 handle_movsi (operands);
3647 handle_movsi (latehalf);
3648 handle_movsi (middlehalf);
3650 return;
3652 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3653 /* If the low half of dest is mentioned in the source memory
3654 address, the arrange to emit the move late half first. */
3655 dest_overlapped_low = 1;
3658 /* If one or both operands autodecrementing,
3659 do the two words, high-numbered first. */
3661 /* Likewise, the first move would clobber the source of the second one,
3662 do them in the other order. This happens only for registers;
3663 such overlap can't happen in memory unless the user explicitly
3664 sets it up, and that is an undefined circumstance. */
3666 if (optype0 == PUSHOP || optype1 == PUSHOP
3667 || (optype0 == REGOP && optype1 == REGOP
3668 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3669 || REGNO (operands[0]) == REGNO (latehalf[1])))
3670 || dest_overlapped_low)
3672 /* Make any unoffsettable addresses point at high-numbered word. */
3673 if (addreg0)
3674 handle_reg_adjust (addreg0, size - 4);
3675 if (addreg1)
3676 handle_reg_adjust (addreg1, size - 4);
3678 /* Do that word. */
3679 handle_movsi (latehalf);
3681 /* Undo the adds we just did. */
3682 if (addreg0)
3683 handle_reg_adjust (addreg0, -4);
3684 if (addreg1)
3685 handle_reg_adjust (addreg1, -4);
3687 if (size == 12)
3689 handle_movsi (middlehalf);
3691 if (addreg0)
3692 handle_reg_adjust (addreg0, -4);
3693 if (addreg1)
3694 handle_reg_adjust (addreg1, -4);
3697 /* Do low-numbered word. */
3699 handle_movsi (operands);
3700 return;
3703 /* Normal case: do the two words, low-numbered first. */
3705 m68k_final_prescan_insn (NULL, operands, 2);
3706 handle_movsi (operands);
3708 /* Do the middle one of the three words for long double */
3709 if (size == 12)
3711 if (addreg0)
3712 handle_reg_adjust (addreg0, 4);
3713 if (addreg1)
3714 handle_reg_adjust (addreg1, 4);
3716 m68k_final_prescan_insn (NULL, middlehalf, 2);
3717 handle_movsi (middlehalf);
3720 /* Make any unoffsettable addresses point at high-numbered word. */
3721 if (addreg0)
3722 handle_reg_adjust (addreg0, 4);
3723 if (addreg1)
3724 handle_reg_adjust (addreg1, 4);
3726 /* Do that word. */
3727 m68k_final_prescan_insn (NULL, latehalf, 2);
3728 handle_movsi (latehalf);
3730 /* Undo the adds we just did. */
3731 if (addreg0)
3732 handle_reg_adjust (addreg0, -(size - 4));
3733 if (addreg1)
3734 handle_reg_adjust (addreg1, -(size - 4));
3736 return;
3739 /* Output assembler code to adjust REG by N. */
3740 static void
3741 output_reg_adjust (rtx reg, int n)
3743 const char *s;
3745 gcc_assert (GET_MODE (reg) == SImode && n >= -12 && n != 0 && n <= 12);
3747 switch (n)
3749 case 12:
3750 s = "add%.l #12,%0";
3751 break;
3753 case 8:
3754 s = "addq%.l #8,%0";
3755 break;
3757 case 4:
3758 s = "addq%.l #4,%0";
3759 break;
3761 case -12:
3762 s = "sub%.l #12,%0";
3763 break;
3765 case -8:
3766 s = "subq%.l #8,%0";
3767 break;
3769 case -4:
3770 s = "subq%.l #4,%0";
3771 break;
3773 default:
3774 gcc_unreachable ();
3775 s = NULL;
3778 output_asm_insn (s, &reg);
3781 /* Emit rtl code to adjust REG by N. */
3782 static void
3783 emit_reg_adjust (rtx reg1, int n)
3785 rtx reg2;
3787 gcc_assert (GET_MODE (reg1) == SImode && n >= -12 && n != 0 && n <= 12);
3789 reg1 = copy_rtx (reg1);
3790 reg2 = copy_rtx (reg1);
3792 if (n < 0)
3793 emit_insn (gen_subsi3 (reg1, reg2, GEN_INT (-n)));
3794 else if (n > 0)
3795 emit_insn (gen_addsi3 (reg1, reg2, GEN_INT (n)));
3796 else
3797 gcc_unreachable ();
3800 /* Output assembler to load address OPERANDS[0] to register OPERANDS[1]. */
3801 static void
3802 output_compadr (rtx operands[2])
3804 output_asm_insn ("lea %a1,%0", operands);
3807 /* Output the best assembler insn for moving operands[1] into operands[0]
3808 as a fullword. */
3809 static void
3810 output_movsi (rtx operands[2])
3812 output_asm_insn (singlemove_string (operands), operands);
3815 /* Copy OP and change its mode to MODE. */
3816 static rtx
3817 copy_operand (rtx op, machine_mode mode)
3819 /* ??? This looks really ugly. There must be a better way
3820 to change a mode on the operand. */
3821 if (GET_MODE (op) != VOIDmode)
3823 if (REG_P (op))
3824 op = gen_rtx_REG (mode, REGNO (op));
3825 else
3827 op = copy_rtx (op);
3828 PUT_MODE (op, mode);
3832 return op;
3835 /* Emit rtl code for moving operands[1] into operands[0] as a fullword. */
3836 static void
3837 emit_movsi (rtx operands[2])
3839 operands[0] = copy_operand (operands[0], SImode);
3840 operands[1] = copy_operand (operands[1], SImode);
3842 emit_insn (gen_movsi (operands[0], operands[1]));
3845 /* Output assembler code to perform a doubleword move insn
3846 with operands OPERANDS. */
3847 const char *
3848 output_move_double (rtx *operands)
3850 handle_move_double (operands,
3851 output_reg_adjust, output_compadr, output_movsi);
3853 return "";
3856 /* Output rtl code to perform a doubleword move insn
3857 with operands OPERANDS. */
3858 void
3859 m68k_emit_move_double (rtx operands[2])
3861 handle_move_double (operands, emit_reg_adjust, emit_movsi, emit_movsi);
3864 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
3865 new rtx with the correct mode. */
3867 static rtx
3868 force_mode (machine_mode mode, rtx orig)
3870 if (mode == GET_MODE (orig))
3871 return orig;
3873 if (REGNO (orig) >= FIRST_PSEUDO_REGISTER)
3874 abort ();
3876 return gen_rtx_REG (mode, REGNO (orig));
3879 static int
3880 fp_reg_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
3882 return reg_renumber && FP_REG_P (op);
3885 /* Emit insns to move operands[1] into operands[0].
3887 Return 1 if we have written out everything that needs to be done to
3888 do the move. Otherwise, return 0 and the caller will emit the move
3889 normally.
3891 Note SCRATCH_REG may not be in the proper mode depending on how it
3892 will be used. This routine is responsible for creating a new copy
3893 of SCRATCH_REG in the proper mode. */
3896 emit_move_sequence (rtx *operands, machine_mode mode, rtx scratch_reg)
3898 rtx operand0 = operands[0];
3899 rtx operand1 = operands[1];
3900 rtx tem;
3902 if (scratch_reg
3903 && (reload_in_progress || lra_in_progress)
3904 && GET_CODE (operand0) == REG
3905 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
3906 operand0 = reg_equiv_mem (REGNO (operand0));
3907 else if (scratch_reg
3908 && (reload_in_progress || lra_in_progress)
3909 && GET_CODE (operand0) == SUBREG
3910 && GET_CODE (SUBREG_REG (operand0)) == REG
3911 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
3913 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3914 the code which tracks sets/uses for delete_output_reload. */
3915 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
3916 reg_equiv_mem (REGNO (SUBREG_REG (operand0))),
3917 SUBREG_BYTE (operand0));
3918 operand0 = alter_subreg (&temp, true);
3921 if (scratch_reg
3922 && (reload_in_progress || lra_in_progress)
3923 && GET_CODE (operand1) == REG
3924 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
3925 operand1 = reg_equiv_mem (REGNO (operand1));
3926 else if (scratch_reg
3927 && (reload_in_progress || lra_in_progress)
3928 && GET_CODE (operand1) == SUBREG
3929 && GET_CODE (SUBREG_REG (operand1)) == REG
3930 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
3932 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
3933 the code which tracks sets/uses for delete_output_reload. */
3934 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
3935 reg_equiv_mem (REGNO (SUBREG_REG (operand1))),
3936 SUBREG_BYTE (operand1));
3937 operand1 = alter_subreg (&temp, true);
3940 if (scratch_reg && (reload_in_progress || lra_in_progress)
3941 && GET_CODE (operand0) == MEM
3942 && ((tem = find_replacement (&XEXP (operand0, 0)))
3943 != XEXP (operand0, 0)))
3944 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
3945 if (scratch_reg && (reload_in_progress || lra_in_progress)
3946 && GET_CODE (operand1) == MEM
3947 && ((tem = find_replacement (&XEXP (operand1, 0)))
3948 != XEXP (operand1, 0)))
3949 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
3951 /* Handle secondary reloads for loads/stores of FP registers where
3952 the address is symbolic by using the scratch register */
3953 if (fp_reg_operand (operand0, mode)
3954 && ((GET_CODE (operand1) == MEM
3955 && ! memory_address_p (DFmode, XEXP (operand1, 0)))
3956 || ((GET_CODE (operand1) == SUBREG
3957 && GET_CODE (XEXP (operand1, 0)) == MEM
3958 && !memory_address_p (DFmode, XEXP (XEXP (operand1, 0), 0)))))
3959 && scratch_reg)
3961 if (GET_CODE (operand1) == SUBREG)
3962 operand1 = XEXP (operand1, 0);
3964 /* SCRATCH_REG will hold an address. We want
3965 it in SImode regardless of what mode it was originally given
3966 to us. */
3967 scratch_reg = force_mode (SImode, scratch_reg);
3969 /* D might not fit in 14 bits either; for such cases load D into
3970 scratch reg. */
3971 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
3973 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
3974 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
3975 Pmode,
3976 XEXP (XEXP (operand1, 0), 0),
3977 scratch_reg));
3979 else
3980 emit_move_insn (scratch_reg, XEXP (operand1, 0));
3981 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
3982 return 1;
3984 else if (fp_reg_operand (operand1, mode)
3985 && ((GET_CODE (operand0) == MEM
3986 && ! memory_address_p (DFmode, XEXP (operand0, 0)))
3987 || ((GET_CODE (operand0) == SUBREG)
3988 && GET_CODE (XEXP (operand0, 0)) == MEM
3989 && !memory_address_p (DFmode, XEXP (XEXP (operand0, 0), 0))))
3990 && scratch_reg)
3992 if (GET_CODE (operand0) == SUBREG)
3993 operand0 = XEXP (operand0, 0);
3995 /* SCRATCH_REG will hold an address and maybe the actual data. We want
3996 it in SIMODE regardless of what mode it was originally given
3997 to us. */
3998 scratch_reg = force_mode (SImode, scratch_reg);
4000 /* D might not fit in 14 bits either; for such cases load D into
4001 scratch reg. */
4002 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
4004 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
4005 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
4006 0)),
4007 Pmode,
4008 XEXP (XEXP (operand0, 0),
4010 scratch_reg));
4012 else
4013 emit_move_insn (scratch_reg, XEXP (operand0, 0));
4014 emit_insn (gen_rtx_SET (gen_rtx_MEM (mode, scratch_reg), operand1));
4015 return 1;
4017 /* Handle secondary reloads for loads of FP registers from constant
4018 expressions by forcing the constant into memory.
4020 use scratch_reg to hold the address of the memory location.
4022 The proper fix is to change PREFERRED_RELOAD_CLASS to return
4023 NO_REGS when presented with a const_int and an register class
4024 containing only FP registers. Doing so unfortunately creates
4025 more problems than it solves. Fix this for 2.5. */
4026 else if (fp_reg_operand (operand0, mode)
4027 && CONSTANT_P (operand1)
4028 && scratch_reg)
4030 rtx xoperands[2];
4032 /* SCRATCH_REG will hold an address and maybe the actual data. We want
4033 it in SIMODE regardless of what mode it was originally given
4034 to us. */
4035 scratch_reg = force_mode (SImode, scratch_reg);
4037 /* Force the constant into memory and put the address of the
4038 memory location into scratch_reg. */
4039 xoperands[0] = scratch_reg;
4040 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
4041 emit_insn (gen_rtx_SET (scratch_reg, xoperands[1]));
4043 /* Now load the destination register. */
4044 emit_insn (gen_rtx_SET (operand0, gen_rtx_MEM (mode, scratch_reg)));
4045 return 1;
4048 /* Now have insn-emit do whatever it normally does. */
4049 return 0;
4052 /* Split one or more DImode RTL references into pairs of SImode
4053 references. The RTL can be REG, offsettable MEM, integer constant, or
4054 CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
4055 split and "num" is its length. lo_half and hi_half are output arrays
4056 that parallel "operands". */
4058 void
4059 split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
4061 while (num--)
4063 rtx op = operands[num];
4065 /* simplify_subreg refuses to split volatile memory addresses,
4066 but we still have to handle it. */
4067 if (GET_CODE (op) == MEM)
4069 lo_half[num] = adjust_address (op, SImode, 4);
4070 hi_half[num] = adjust_address (op, SImode, 0);
4072 else
4074 lo_half[num] = simplify_gen_subreg (SImode, op,
4075 GET_MODE (op) == VOIDmode
4076 ? DImode : GET_MODE (op), 4);
4077 hi_half[num] = simplify_gen_subreg (SImode, op,
4078 GET_MODE (op) == VOIDmode
4079 ? DImode : GET_MODE (op), 0);
4084 /* Split X into a base and a constant offset, storing them in *BASE
4085 and *OFFSET respectively. */
4087 static void
4088 m68k_split_offset (rtx x, rtx *base, HOST_WIDE_INT *offset)
4090 *offset = 0;
4091 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
4093 *offset += INTVAL (XEXP (x, 1));
4094 x = XEXP (x, 0);
4096 *base = x;
4099 /* Return true if PATTERN is a PARALLEL suitable for a movem or fmovem
4100 instruction. STORE_P says whether the move is a load or store.
4102 If the instruction uses post-increment or pre-decrement addressing,
4103 AUTOMOD_BASE is the base register and AUTOMOD_OFFSET is the total
4104 adjustment. This adjustment will be made by the first element of
4105 PARALLEL, with the loads or stores starting at element 1. If the
4106 instruction does not use post-increment or pre-decrement addressing,
4107 AUTOMOD_BASE is null, AUTOMOD_OFFSET is 0, and the loads or stores
4108 start at element 0. */
4110 bool
4111 m68k_movem_pattern_p (rtx pattern, rtx automod_base,
4112 HOST_WIDE_INT automod_offset, bool store_p)
4114 rtx base, mem_base, set, mem, reg, last_reg;
4115 HOST_WIDE_INT offset, mem_offset;
4116 int i, first, len;
4117 enum reg_class rclass;
4119 len = XVECLEN (pattern, 0);
4120 first = (automod_base != NULL);
4122 if (automod_base)
4124 /* Stores must be pre-decrement and loads must be post-increment. */
4125 if (store_p != (automod_offset < 0))
4126 return false;
4128 /* Work out the base and offset for lowest memory location. */
4129 base = automod_base;
4130 offset = (automod_offset < 0 ? automod_offset : 0);
4132 else
4134 /* Allow any valid base and offset in the first access. */
4135 base = NULL;
4136 offset = 0;
4139 last_reg = NULL;
4140 rclass = NO_REGS;
4141 for (i = first; i < len; i++)
4143 /* We need a plain SET. */
4144 set = XVECEXP (pattern, 0, i);
4145 if (GET_CODE (set) != SET)
4146 return false;
4148 /* Check that we have a memory location... */
4149 mem = XEXP (set, !store_p);
4150 if (!MEM_P (mem) || !memory_operand (mem, VOIDmode))
4151 return false;
4153 /* ...with the right address. */
4154 if (base == NULL)
4156 m68k_split_offset (XEXP (mem, 0), &base, &offset);
4157 /* The ColdFire instruction only allows (An) and (d16,An) modes.
4158 There are no mode restrictions for 680x0 besides the
4159 automodification rules enforced above. */
4160 if (TARGET_COLDFIRE
4161 && !m68k_legitimate_base_reg_p (base, reload_completed))
4162 return false;
4164 else
4166 m68k_split_offset (XEXP (mem, 0), &mem_base, &mem_offset);
4167 if (!rtx_equal_p (base, mem_base) || offset != mem_offset)
4168 return false;
4171 /* Check that we have a register of the required mode and class. */
4172 reg = XEXP (set, store_p);
4173 if (!REG_P (reg)
4174 || !HARD_REGISTER_P (reg)
4175 || GET_MODE (reg) != reg_raw_mode[REGNO (reg)])
4176 return false;
4178 if (last_reg)
4180 /* The register must belong to RCLASS and have a higher number
4181 than the register in the previous SET. */
4182 if (!TEST_HARD_REG_BIT (reg_class_contents[rclass], REGNO (reg))
4183 || REGNO (last_reg) >= REGNO (reg))
4184 return false;
4186 else
4188 /* Work out which register class we need. */
4189 if (INT_REGNO_P (REGNO (reg)))
4190 rclass = GENERAL_REGS;
4191 else if (FP_REGNO_P (REGNO (reg)))
4192 rclass = FP_REGS;
4193 else
4194 return false;
4197 last_reg = reg;
4198 offset += GET_MODE_SIZE (GET_MODE (reg));
4201 /* If we have an automodification, check whether the final offset is OK. */
4202 if (automod_base && offset != (automod_offset < 0 ? 0 : automod_offset))
4203 return false;
4205 /* Reject unprofitable cases. */
4206 if (len < first + (rclass == FP_REGS ? MIN_FMOVEM_REGS : MIN_MOVEM_REGS))
4207 return false;
4209 return true;
4212 /* Return the assembly code template for a movem or fmovem instruction
4213 whose pattern is given by PATTERN. Store the template's operands
4214 in OPERANDS.
4216 If the instruction uses post-increment or pre-decrement addressing,
4217 AUTOMOD_OFFSET is the total adjustment, otherwise it is 0. STORE_P
4218 is true if this is a store instruction. */
4220 const char *
4221 m68k_output_movem (rtx *operands, rtx pattern,
4222 HOST_WIDE_INT automod_offset, bool store_p)
4224 unsigned int mask;
4225 int i, first;
4227 gcc_assert (GET_CODE (pattern) == PARALLEL);
4228 mask = 0;
4229 first = (automod_offset != 0);
4230 for (i = first; i < XVECLEN (pattern, 0); i++)
4232 /* When using movem with pre-decrement addressing, register X + D0_REG
4233 is controlled by bit 15 - X. For all other addressing modes,
4234 register X + D0_REG is controlled by bit X. Confusingly, the
4235 register mask for fmovem is in the opposite order to that for
4236 movem. */
4237 unsigned int regno;
4239 gcc_assert (MEM_P (XEXP (XVECEXP (pattern, 0, i), !store_p)));
4240 gcc_assert (REG_P (XEXP (XVECEXP (pattern, 0, i), store_p)));
4241 regno = REGNO (XEXP (XVECEXP (pattern, 0, i), store_p));
4242 if (automod_offset < 0)
4244 if (FP_REGNO_P (regno))
4245 mask |= 1 << (regno - FP0_REG);
4246 else
4247 mask |= 1 << (15 - (regno - D0_REG));
4249 else
4251 if (FP_REGNO_P (regno))
4252 mask |= 1 << (7 - (regno - FP0_REG));
4253 else
4254 mask |= 1 << (regno - D0_REG);
4257 CC_STATUS_INIT;
4259 if (automod_offset == 0)
4260 operands[0] = XEXP (XEXP (XVECEXP (pattern, 0, first), !store_p), 0);
4261 else if (automod_offset < 0)
4262 operands[0] = gen_rtx_PRE_DEC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4263 else
4264 operands[0] = gen_rtx_POST_INC (Pmode, SET_DEST (XVECEXP (pattern, 0, 0)));
4265 operands[1] = GEN_INT (mask);
4266 if (FP_REGNO_P (REGNO (XEXP (XVECEXP (pattern, 0, first), store_p))))
4268 if (store_p)
4269 return "fmovem %1,%a0";
4270 else
4271 return "fmovem %a0,%1";
4273 else
4275 if (store_p)
4276 return "movem%.l %1,%a0";
4277 else
4278 return "movem%.l %a0,%1";
4282 /* Return a REG that occurs in ADDR with coefficient 1.
4283 ADDR can be effectively incremented by incrementing REG. */
4285 static rtx
4286 find_addr_reg (rtx addr)
4288 while (GET_CODE (addr) == PLUS)
4290 if (GET_CODE (XEXP (addr, 0)) == REG)
4291 addr = XEXP (addr, 0);
4292 else if (GET_CODE (XEXP (addr, 1)) == REG)
4293 addr = XEXP (addr, 1);
4294 else if (CONSTANT_P (XEXP (addr, 0)))
4295 addr = XEXP (addr, 1);
4296 else if (CONSTANT_P (XEXP (addr, 1)))
4297 addr = XEXP (addr, 0);
4298 else
4299 gcc_unreachable ();
4301 gcc_assert (GET_CODE (addr) == REG);
4302 return addr;
4305 /* Output assembler code to perform a 32-bit 3-operand add. */
4307 const char *
4308 output_addsi3 (rtx *operands)
4310 if (! operands_match_p (operands[0], operands[1]))
4312 if (!ADDRESS_REG_P (operands[1]))
4314 rtx tmp = operands[1];
4316 operands[1] = operands[2];
4317 operands[2] = tmp;
4320 /* These insns can result from reloads to access
4321 stack slots over 64k from the frame pointer. */
4322 if (GET_CODE (operands[2]) == CONST_INT
4323 && (INTVAL (operands[2]) < -32768 || INTVAL (operands[2]) > 32767))
4324 return "move%.l %2,%0\n\tadd%.l %1,%0";
4325 if (GET_CODE (operands[2]) == REG)
4326 return MOTOROLA ? "lea (%1,%2.l),%0" : "lea %1@(0,%2:l),%0";
4327 return MOTOROLA ? "lea (%c2,%1),%0" : "lea %1@(%c2),%0";
4329 if (GET_CODE (operands[2]) == CONST_INT)
4331 if (INTVAL (operands[2]) > 0
4332 && INTVAL (operands[2]) <= 8)
4333 return "addq%.l %2,%0";
4334 if (INTVAL (operands[2]) < 0
4335 && INTVAL (operands[2]) >= -8)
4337 operands[2] = GEN_INT (- INTVAL (operands[2]));
4338 return "subq%.l %2,%0";
4340 /* On the CPU32 it is faster to use two addql instructions to
4341 add a small integer (8 < N <= 16) to a register.
4342 Likewise for subql. */
4343 if (TUNE_CPU32 && REG_P (operands[0]))
4345 if (INTVAL (operands[2]) > 8
4346 && INTVAL (operands[2]) <= 16)
4348 operands[2] = GEN_INT (INTVAL (operands[2]) - 8);
4349 return "addq%.l #8,%0\n\taddq%.l %2,%0";
4351 if (INTVAL (operands[2]) < -8
4352 && INTVAL (operands[2]) >= -16)
4354 operands[2] = GEN_INT (- INTVAL (operands[2]) - 8);
4355 return "subq%.l #8,%0\n\tsubq%.l %2,%0";
4358 if (ADDRESS_REG_P (operands[0])
4359 && INTVAL (operands[2]) >= -0x8000
4360 && INTVAL (operands[2]) < 0x8000)
4362 if (TUNE_68040)
4363 return "add%.w %2,%0";
4364 else
4365 return MOTOROLA ? "lea (%c2,%0),%0" : "lea %0@(%c2),%0";
4368 return "add%.l %2,%0";
4371 /* Emit a comparison between OP0 and OP1. Return true iff the comparison
4372 was reversed. SC1 is an SImode scratch reg, and SC2 a DImode scratch reg,
4373 as needed. CODE is the code of the comparison, we return it unchanged or
4374 swapped, as necessary. */
4375 rtx_code
4376 m68k_output_compare_di (rtx op0, rtx op1, rtx sc1, rtx sc2, rtx_insn *insn,
4377 rtx_code code)
4379 rtx ops[4];
4380 ops[0] = op0;
4381 ops[1] = op1;
4382 ops[2] = sc1;
4383 ops[3] = sc2;
4384 if (op1 == const0_rtx)
4386 if (!REG_P (op0) || ADDRESS_REG_P (op0))
4388 rtx xoperands[2];
4390 xoperands[0] = sc2;
4391 xoperands[1] = op0;
4392 output_move_double (xoperands);
4393 output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", xoperands);
4394 return swap_condition (code);
4396 if (find_reg_note (insn, REG_DEAD, op0))
4398 output_asm_insn ("neg%.l %R0\n\tnegx%.l %0", ops);
4399 return swap_condition (code);
4401 else
4403 /* 'sub' clears %1, and also clears the X cc bit.
4404 'tst' sets the Z cc bit according to the low part of the DImode
4405 operand.
4406 'subx %1' (i.e. subx #0) acts as a (non-existent) tstx on the high
4407 part. */
4408 output_asm_insn ("sub%.l %2,%2\n\ttst%.l %R0\n\tsubx%.l %2,%0", ops);
4409 return code;
4413 if (rtx_equal_p (sc2, op0))
4415 output_asm_insn ("sub%.l %R1,%R3\n\tsubx%.l %1,%3", ops);
4416 return code;
4418 else
4420 output_asm_insn ("sub%.l %R0,%R3\n\tsubx%.l %0,%3", ops);
4421 return swap_condition (code);
4425 static void
4426 remember_compare_flags (rtx op0, rtx op1)
4428 if (side_effects_p (op0) || side_effects_p (op1))
4429 CC_STATUS_INIT;
4430 else
4432 flags_compare_op0 = op0;
4433 flags_compare_op1 = op1;
4434 flags_operand1 = flags_operand2 = NULL_RTX;
4435 flags_valid = FLAGS_VALID_SET;
4439 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4440 comparison. It is returned, potentially modified if necessary. */
4441 rtx_code
4442 m68k_output_compare_si (rtx op0, rtx op1, rtx_code code)
4444 rtx_code tmp = m68k_find_flags_value (op0, op1, code);
4445 if (tmp != UNKNOWN)
4446 return tmp;
4448 remember_compare_flags (op0, op1);
4450 rtx ops[2];
4451 ops[0] = op0;
4452 ops[1] = op1;
4453 if (op1 == const0_rtx && (TARGET_68020 || TARGET_COLDFIRE || !ADDRESS_REG_P (op0)))
4454 output_asm_insn ("tst%.l %0", ops);
4455 else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
4456 output_asm_insn ("cmpm%.l %1,%0", ops);
4457 else if (REG_P (op1)
4458 || (!REG_P (op0) && GET_CODE (op0) != MEM))
4460 output_asm_insn ("cmp%.l %d0,%d1", ops);
4461 std::swap (flags_compare_op0, flags_compare_op1);
4462 return swap_condition (code);
4464 else if (!TARGET_COLDFIRE
4465 && ADDRESS_REG_P (op0)
4466 && GET_CODE (op1) == CONST_INT
4467 && INTVAL (op1) < 0x8000
4468 && INTVAL (op1) >= -0x8000)
4469 output_asm_insn ("cmp%.w %1,%0", ops);
4470 else
4471 output_asm_insn ("cmp%.l %d1,%d0", ops);
4472 return code;
4475 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4476 comparison. It is returned, potentially modified if necessary. */
4477 rtx_code
4478 m68k_output_compare_hi (rtx op0, rtx op1, rtx_code code)
4480 rtx_code tmp = m68k_find_flags_value (op0, op1, code);
4481 if (tmp != UNKNOWN)
4482 return tmp;
4484 remember_compare_flags (op0, op1);
4486 rtx ops[2];
4487 ops[0] = op0;
4488 ops[1] = op1;
4489 if (op1 == const0_rtx)
4490 output_asm_insn ("tst%.w %d0", ops);
4491 else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
4492 output_asm_insn ("cmpm%.w %1,%0", ops);
4493 else if ((REG_P (op1) && !ADDRESS_REG_P (op1))
4494 || (!REG_P (op0) && GET_CODE (op0) != MEM))
4496 output_asm_insn ("cmp%.w %d0,%d1", ops);
4497 std::swap (flags_compare_op0, flags_compare_op1);
4498 return swap_condition (code);
4500 else
4501 output_asm_insn ("cmp%.w %d1,%d0", ops);
4502 return code;
4505 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4506 comparison. It is returned, potentially modified if necessary. */
4507 rtx_code
4508 m68k_output_compare_qi (rtx op0, rtx op1, rtx_code code)
4510 rtx_code tmp = m68k_find_flags_value (op0, op1, code);
4511 if (tmp != UNKNOWN)
4512 return tmp;
4514 remember_compare_flags (op0, op1);
4516 rtx ops[2];
4517 ops[0] = op0;
4518 ops[1] = op1;
4519 if (op1 == const0_rtx)
4520 output_asm_insn ("tst%.b %d0", ops);
4521 else if (GET_CODE (op0) == MEM && GET_CODE (op1) == MEM)
4522 output_asm_insn ("cmpm%.b %1,%0", ops);
4523 else if (REG_P (op1) || (!REG_P (op0) && GET_CODE (op0) != MEM))
4525 output_asm_insn ("cmp%.b %d0,%d1", ops);
4526 std::swap (flags_compare_op0, flags_compare_op1);
4527 return swap_condition (code);
4529 else
4530 output_asm_insn ("cmp%.b %d1,%d0", ops);
4531 return code;
4534 /* Emit a comparison between OP0 and OP1. CODE is the code of the
4535 comparison. It is returned, potentially modified if necessary. */
4536 rtx_code
4537 m68k_output_compare_fp (rtx op0, rtx op1, rtx_code code)
4539 rtx_code tmp = m68k_find_flags_value (op0, op1, code);
4540 if (tmp != UNKNOWN)
4541 return tmp;
4543 rtx ops[2];
4544 ops[0] = op0;
4545 ops[1] = op1;
4547 remember_compare_flags (op0, op1);
4549 machine_mode mode = GET_MODE (op0);
4550 std::string prec = mode == SFmode ? "s" : mode == DFmode ? "d" : "x";
4552 if (op1 == CONST0_RTX (GET_MODE (op0)))
4554 if (FP_REG_P (op0))
4556 if (TARGET_COLDFIRE_FPU)
4557 output_asm_insn ("ftst%.d %0", ops);
4558 else
4559 output_asm_insn ("ftst%.x %0", ops);
4561 else
4562 output_asm_insn (("ftst%." + prec + " %0").c_str (), ops);
4563 return code;
4566 switch (which_alternative)
4568 case 0:
4569 if (TARGET_COLDFIRE_FPU)
4570 output_asm_insn ("fcmp%.d %1,%0", ops);
4571 else
4572 output_asm_insn ("fcmp%.x %1,%0", ops);
4573 break;
4574 case 1:
4575 output_asm_insn (("fcmp%." + prec + " %f1,%0").c_str (), ops);
4576 break;
4577 case 2:
4578 output_asm_insn (("fcmp%." + prec + " %0,%f1").c_str (), ops);
4579 std::swap (flags_compare_op0, flags_compare_op1);
4580 return swap_condition (code);
4581 case 3:
4582 /* This is the ftst case, handled earlier. */
4583 gcc_unreachable ();
4585 return code;
4588 /* Return an output template for a branch with CODE. */
4589 const char *
4590 m68k_output_branch_integer (rtx_code code)
4592 switch (code)
4594 case EQ:
4595 return "jeq %l3";
4596 case NE:
4597 return "jne %l3";
4598 case GT:
4599 return "jgt %l3";
4600 case GTU:
4601 return "jhi %l3";
4602 case LT:
4603 return "jlt %l3";
4604 case LTU:
4605 return "jcs %l3";
4606 case GE:
4607 return "jge %l3";
4608 case GEU:
4609 return "jcc %l3";
4610 case LE:
4611 return "jle %l3";
4612 case LEU:
4613 return "jls %l3";
4614 case PLUS:
4615 return "jpl %l3";
4616 case MINUS:
4617 return "jmi %l3";
4618 default:
4619 gcc_unreachable ();
4623 /* Return an output template for a reversed branch with CODE. */
4624 const char *
4625 m68k_output_branch_integer_rev (rtx_code code)
4627 switch (code)
4629 case EQ:
4630 return "jne %l3";
4631 case NE:
4632 return "jeq %l3";
4633 case GT:
4634 return "jle %l3";
4635 case GTU:
4636 return "jls %l3";
4637 case LT:
4638 return "jge %l3";
4639 case LTU:
4640 return "jcc %l3";
4641 case GE:
4642 return "jlt %l3";
4643 case GEU:
4644 return "jcs %l3";
4645 case LE:
4646 return "jgt %l3";
4647 case LEU:
4648 return "jhi %l3";
4649 case PLUS:
4650 return "jmi %l3";
4651 case MINUS:
4652 return "jpl %l3";
4653 default:
4654 gcc_unreachable ();
4658 /* Return an output template for a scc instruction with CODE. */
4659 const char *
4660 m68k_output_scc (rtx_code code)
4662 switch (code)
4664 case EQ:
4665 return "seq %0";
4666 case NE:
4667 return "sne %0";
4668 case GT:
4669 return "sgt %0";
4670 case GTU:
4671 return "shi %0";
4672 case LT:
4673 return "slt %0";
4674 case LTU:
4675 return "scs %0";
4676 case GE:
4677 return "sge %0";
4678 case GEU:
4679 return "scc %0";
4680 case LE:
4681 return "sle %0";
4682 case LEU:
4683 return "sls %0";
4684 case PLUS:
4685 return "spl %0";
4686 case MINUS:
4687 return "smi %0";
4688 default:
4689 gcc_unreachable ();
4693 /* Return an output template for a floating point branch
4694 instruction with CODE. */
4695 const char *
4696 m68k_output_branch_float (rtx_code code)
4698 switch (code)
4700 case EQ:
4701 return "fjeq %l3";
4702 case NE:
4703 return "fjne %l3";
4704 case GT:
4705 return "fjgt %l3";
4706 case LT:
4707 return "fjlt %l3";
4708 case GE:
4709 return "fjge %l3";
4710 case LE:
4711 return "fjle %l3";
4712 case ORDERED:
4713 return "fjor %l3";
4714 case UNORDERED:
4715 return "fjun %l3";
4716 case UNEQ:
4717 return "fjueq %l3";
4718 case UNGE:
4719 return "fjuge %l3";
4720 case UNGT:
4721 return "fjugt %l3";
4722 case UNLE:
4723 return "fjule %l3";
4724 case UNLT:
4725 return "fjult %l3";
4726 case LTGT:
4727 return "fjogl %l3";
4728 default:
4729 gcc_unreachable ();
4733 /* Return an output template for a reversed floating point branch
4734 instruction with CODE. */
4735 const char *
4736 m68k_output_branch_float_rev (rtx_code code)
4738 switch (code)
4740 case EQ:
4741 return "fjne %l3";
4742 case NE:
4743 return "fjeq %l3";
4744 case GT:
4745 return "fjngt %l3";
4746 case LT:
4747 return "fjnlt %l3";
4748 case GE:
4749 return "fjnge %l3";
4750 case LE:
4751 return "fjnle %l3";
4752 case ORDERED:
4753 return "fjun %l3";
4754 case UNORDERED:
4755 return "fjor %l3";
4756 case UNEQ:
4757 return "fjogl %l3";
4758 case UNGE:
4759 return "fjolt %l3";
4760 case UNGT:
4761 return "fjole %l3";
4762 case UNLE:
4763 return "fjogt %l3";
4764 case UNLT:
4765 return "fjoge %l3";
4766 case LTGT:
4767 return "fjueq %l3";
4768 default:
4769 gcc_unreachable ();
4773 /* Return an output template for a floating point scc
4774 instruction with CODE. */
4775 const char *
4776 m68k_output_scc_float (rtx_code code)
4778 switch (code)
4780 case EQ:
4781 return "fseq %0";
4782 case NE:
4783 return "fsne %0";
4784 case GT:
4785 return "fsgt %0";
4786 case GTU:
4787 return "fshi %0";
4788 case LT:
4789 return "fslt %0";
4790 case GE:
4791 return "fsge %0";
4792 case LE:
4793 return "fsle %0";
4794 case ORDERED:
4795 return "fsor %0";
4796 case UNORDERED:
4797 return "fsun %0";
4798 case UNEQ:
4799 return "fsueq %0";
4800 case UNGE:
4801 return "fsuge %0";
4802 case UNGT:
4803 return "fsugt %0";
4804 case UNLE:
4805 return "fsule %0";
4806 case UNLT:
4807 return "fsult %0";
4808 case LTGT:
4809 return "fsogl %0";
4810 default:
4811 gcc_unreachable ();
4815 const char *
4816 output_move_const_double (rtx *operands)
4818 int code = standard_68881_constant_p (operands[1]);
4820 if (code != 0)
4822 static char buf[40];
4824 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4825 return buf;
4827 return "fmove%.d %1,%0";
4830 const char *
4831 output_move_const_single (rtx *operands)
4833 int code = standard_68881_constant_p (operands[1]);
4835 if (code != 0)
4837 static char buf[40];
4839 sprintf (buf, "fmovecr #0x%x,%%0", code & 0xff);
4840 return buf;
4842 return "fmove%.s %f1,%0";
4845 /* Return nonzero if X, a CONST_DOUBLE, has a value that we can get
4846 from the "fmovecr" instruction.
4847 The value, anded with 0xff, gives the code to use in fmovecr
4848 to get the desired constant. */
4850 /* This code has been fixed for cross-compilation. */
4852 static int inited_68881_table = 0;
4854 static const char *const strings_68881[7] = {
4855 "0.0",
4856 "1.0",
4857 "10.0",
4858 "100.0",
4859 "10000.0",
4860 "1e8",
4861 "1e16"
4864 static const int codes_68881[7] = {
4865 0x0f,
4866 0x32,
4867 0x33,
4868 0x34,
4869 0x35,
4870 0x36,
4871 0x37
4874 REAL_VALUE_TYPE values_68881[7];
4876 /* Set up values_68881 array by converting the decimal values
4877 strings_68881 to binary. */
4879 void
4880 init_68881_table (void)
4882 int i;
4883 REAL_VALUE_TYPE r;
4884 machine_mode mode;
4886 mode = SFmode;
4887 for (i = 0; i < 7; i++)
4889 if (i == 6)
4890 mode = DFmode;
4891 r = REAL_VALUE_ATOF (strings_68881[i], mode);
4892 values_68881[i] = r;
4894 inited_68881_table = 1;
4898 standard_68881_constant_p (rtx x)
4900 const REAL_VALUE_TYPE *r;
4901 int i;
4903 /* fmovecr must be emulated on the 68040 and 68060, so it shouldn't be
4904 used at all on those chips. */
4905 if (TUNE_68040_60)
4906 return 0;
4908 if (! inited_68881_table)
4909 init_68881_table ();
4911 r = CONST_DOUBLE_REAL_VALUE (x);
4913 /* Use real_identical instead of real_equal so that -0.0 is rejected. */
4914 for (i = 0; i < 6; i++)
4916 if (real_identical (r, &values_68881[i]))
4917 return (codes_68881[i]);
4920 if (GET_MODE (x) == SFmode)
4921 return 0;
4923 if (real_equal (r, &values_68881[6]))
4924 return (codes_68881[6]);
4926 /* larger powers of ten in the constants ram are not used
4927 because they are not equal to a `double' C constant. */
4928 return 0;
4931 /* If X is a floating-point constant, return the logarithm of X base 2,
4932 or 0 if X is not a power of 2. */
4935 floating_exact_log2 (rtx x)
4937 const REAL_VALUE_TYPE *r;
4938 REAL_VALUE_TYPE r1;
4939 int exp;
4941 r = CONST_DOUBLE_REAL_VALUE (x);
4943 if (real_less (r, &dconst1))
4944 return 0;
4946 exp = real_exponent (r);
4947 real_2expN (&r1, exp, DFmode);
4948 if (real_equal (&r1, r))
4949 return exp;
4951 return 0;
4954 /* A C compound statement to output to stdio stream STREAM the
4955 assembler syntax for an instruction operand X. X is an RTL
4956 expression.
4958 CODE is a value that can be used to specify one of several ways
4959 of printing the operand. It is used when identical operands
4960 must be printed differently depending on the context. CODE
4961 comes from the `%' specification that was used to request
4962 printing of the operand. If the specification was just `%DIGIT'
4963 then CODE is 0; if the specification was `%LTR DIGIT' then CODE
4964 is the ASCII code for LTR.
4966 If X is a register, this macro should print the register's name.
4967 The names can be found in an array `reg_names' whose type is
4968 `char *[]'. `reg_names' is initialized from `REGISTER_NAMES'.
4970 When the machine description has a specification `%PUNCT' (a `%'
4971 followed by a punctuation character), this macro is called with
4972 a null pointer for X and the punctuation character for CODE.
4974 The m68k specific codes are:
4976 '.' for dot needed in Motorola-style opcode names.
4977 '-' for an operand pushing on the stack:
4978 sp@-, -(sp) or -(%sp) depending on the style of syntax.
4979 '+' for an operand pushing on the stack:
4980 sp@+, (sp)+ or (%sp)+ depending on the style of syntax.
4981 '@' for a reference to the top word on the stack:
4982 sp@, (sp) or (%sp) depending on the style of syntax.
4983 '#' for an immediate operand prefix (# in MIT and Motorola syntax
4984 but & in SGS syntax).
4985 '!' for the cc register (used in an `and to cc' insn).
4986 '$' for the letter `s' in an op code, but only on the 68040.
4987 '&' for the letter `d' in an op code, but only on the 68040.
4988 '/' for register prefix needed by longlong.h.
4989 '?' for m68k_library_id_string
4991 'b' for byte insn (no effect, on the Sun; this is for the ISI).
4992 'd' to force memory addressing to be absolute, not relative.
4993 'f' for float insn (print a CONST_DOUBLE as a float rather than in hex)
4994 'x' for float insn (print a CONST_DOUBLE as a float rather than in hex),
4995 or print pair of registers as rx:ry.
4996 'p' print an address with @PLTPC attached, but only if the operand
4997 is not locally-bound. */
4999 void
5000 print_operand (FILE *file, rtx op, int letter)
5002 if (op != NULL_RTX)
5003 m68k_adjust_decorated_operand (op);
5005 if (letter == '.')
5007 if (MOTOROLA)
5008 fprintf (file, ".");
5010 else if (letter == '#')
5011 asm_fprintf (file, "%I");
5012 else if (letter == '-')
5013 asm_fprintf (file, MOTOROLA ? "-(%Rsp)" : "%Rsp@-");
5014 else if (letter == '+')
5015 asm_fprintf (file, MOTOROLA ? "(%Rsp)+" : "%Rsp@+");
5016 else if (letter == '@')
5017 asm_fprintf (file, MOTOROLA ? "(%Rsp)" : "%Rsp@");
5018 else if (letter == '!')
5019 asm_fprintf (file, "%Rfpcr");
5020 else if (letter == '$')
5022 if (TARGET_68040)
5023 fprintf (file, "s");
5025 else if (letter == '&')
5027 if (TARGET_68040)
5028 fprintf (file, "d");
5030 else if (letter == '/')
5031 asm_fprintf (file, "%R");
5032 else if (letter == '?')
5033 asm_fprintf (file, m68k_library_id_string);
5034 else if (letter == 'p')
5036 output_addr_const (file, op);
5037 if (!(GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_LOCAL_P (op)))
5038 fprintf (file, "@PLTPC");
5040 else if (GET_CODE (op) == REG)
5042 if (letter == 'R')
5043 /* Print out the second register name of a register pair.
5044 I.e., R (6) => 7. */
5045 fputs (M68K_REGNAME(REGNO (op) + 1), file);
5046 else
5047 fputs (M68K_REGNAME(REGNO (op)), file);
5049 else if (GET_CODE (op) == MEM)
5051 output_address (GET_MODE (op), XEXP (op, 0));
5052 if (letter == 'd' && ! TARGET_68020
5053 && CONSTANT_ADDRESS_P (XEXP (op, 0))
5054 && !(GET_CODE (XEXP (op, 0)) == CONST_INT
5055 && INTVAL (XEXP (op, 0)) < 0x8000
5056 && INTVAL (XEXP (op, 0)) >= -0x8000))
5057 fprintf (file, MOTOROLA ? ".l" : ":l");
5059 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == SFmode)
5061 long l;
5062 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5063 asm_fprintf (file, "%I0x%lx", l & 0xFFFFFFFF);
5065 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == XFmode)
5067 long l[3];
5068 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5069 asm_fprintf (file, "%I0x%lx%08lx%08lx", l[0] & 0xFFFFFFFF,
5070 l[1] & 0xFFFFFFFF, l[2] & 0xFFFFFFFF);
5072 else if (GET_CODE (op) == CONST_DOUBLE && GET_MODE (op) == DFmode)
5074 long l[2];
5075 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
5076 asm_fprintf (file, "%I0x%lx%08lx", l[0] & 0xFFFFFFFF, l[1] & 0xFFFFFFFF);
5078 else
5080 /* Use `print_operand_address' instead of `output_addr_const'
5081 to ensure that we print relevant PIC stuff. */
5082 asm_fprintf (file, "%I");
5083 if (TARGET_PCREL
5084 && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST))
5085 print_operand_address (file, op);
5086 else
5087 output_addr_const (file, op);
5091 /* Return string for TLS relocation RELOC. */
5093 static const char *
5094 m68k_get_reloc_decoration (enum m68k_reloc reloc)
5096 /* To my knowledge, !MOTOROLA assemblers don't support TLS. */
5097 gcc_assert (MOTOROLA || reloc == RELOC_GOT);
5099 switch (reloc)
5101 case RELOC_GOT:
5102 if (MOTOROLA)
5104 if (flag_pic == 1 && TARGET_68020)
5105 return "@GOT.w";
5106 else
5107 return "@GOT";
5109 else
5111 if (TARGET_68020)
5113 switch (flag_pic)
5115 case 1:
5116 return ":w";
5117 case 2:
5118 return ":l";
5119 default:
5120 return "";
5124 gcc_unreachable ();
5126 case RELOC_TLSGD:
5127 return "@TLSGD";
5129 case RELOC_TLSLDM:
5130 return "@TLSLDM";
5132 case RELOC_TLSLDO:
5133 return "@TLSLDO";
5135 case RELOC_TLSIE:
5136 return "@TLSIE";
5138 case RELOC_TLSLE:
5139 return "@TLSLE";
5141 default:
5142 gcc_unreachable ();
5146 /* m68k implementation of TARGET_OUTPUT_ADDR_CONST_EXTRA. */
5148 static bool
5149 m68k_output_addr_const_extra (FILE *file, rtx x)
5151 if (GET_CODE (x) == UNSPEC)
5153 switch (XINT (x, 1))
5155 case UNSPEC_RELOC16:
5156 case UNSPEC_RELOC32:
5157 output_addr_const (file, XVECEXP (x, 0, 0));
5158 fputs (m68k_get_reloc_decoration
5159 ((enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1))), file);
5160 return true;
5162 default:
5163 break;
5167 return false;
5170 /* M68K implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
5172 static void
5173 m68k_output_dwarf_dtprel (FILE *file, int size, rtx x)
5175 gcc_assert (size == 4);
5176 fputs ("\t.long\t", file);
5177 output_addr_const (file, x);
5178 fputs ("@TLSLDO+0x8000", file);
5181 /* In the name of slightly smaller debug output, and to cater to
5182 general assembler lossage, recognize various UNSPEC sequences
5183 and turn them back into a direct symbol reference. */
5185 static rtx
5186 m68k_delegitimize_address (rtx orig_x)
5188 rtx x;
5189 struct m68k_address addr;
5190 rtx unspec;
5192 orig_x = delegitimize_mem_from_attrs (orig_x);
5193 x = orig_x;
5194 if (MEM_P (x))
5195 x = XEXP (x, 0);
5197 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
5198 return orig_x;
5200 if (!m68k_decompose_address (GET_MODE (x), x, false, &addr)
5201 || addr.offset == NULL_RTX
5202 || GET_CODE (addr.offset) != CONST)
5203 return orig_x;
5205 unspec = XEXP (addr.offset, 0);
5206 if (GET_CODE (unspec) == PLUS && CONST_INT_P (XEXP (unspec, 1)))
5207 unspec = XEXP (unspec, 0);
5208 if (GET_CODE (unspec) != UNSPEC
5209 || (XINT (unspec, 1) != UNSPEC_RELOC16
5210 && XINT (unspec, 1) != UNSPEC_RELOC32))
5211 return orig_x;
5212 x = XVECEXP (unspec, 0, 0);
5213 gcc_assert (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF);
5214 if (unspec != XEXP (addr.offset, 0))
5215 x = gen_rtx_PLUS (Pmode, x, XEXP (XEXP (addr.offset, 0), 1));
5216 if (addr.index)
5218 rtx idx = addr.index;
5219 if (addr.scale != 1)
5220 idx = gen_rtx_MULT (Pmode, idx, GEN_INT (addr.scale));
5221 x = gen_rtx_PLUS (Pmode, idx, x);
5223 if (addr.base)
5224 x = gen_rtx_PLUS (Pmode, addr.base, x);
5225 if (MEM_P (orig_x))
5226 x = replace_equiv_address_nv (orig_x, x);
5227 return x;
5231 /* A C compound statement to output to stdio stream STREAM the
5232 assembler syntax for an instruction operand that is a memory
5233 reference whose address is ADDR. ADDR is an RTL expression.
5235 Note that this contains a kludge that knows that the only reason
5236 we have an address (plus (label_ref...) (reg...)) when not generating
5237 PIC code is in the insn before a tablejump, and we know that m68k.md
5238 generates a label LInnn: on such an insn.
5240 It is possible for PIC to generate a (plus (label_ref...) (reg...))
5241 and we handle that just like we would a (plus (symbol_ref...) (reg...)).
5243 This routine is responsible for distinguishing between -fpic and -fPIC
5244 style relocations in an address. When generating -fpic code the
5245 offset is output in word mode (e.g. movel a5@(_foo:w), a0). When generating
5246 -fPIC code the offset is output in long mode (e.g. movel a5@(_foo:l), a0) */
5248 void
5249 print_operand_address (FILE *file, rtx addr)
5251 struct m68k_address address;
5253 m68k_adjust_decorated_operand (addr);
5255 if (!m68k_decompose_address (QImode, addr, true, &address))
5256 gcc_unreachable ();
5258 if (address.code == PRE_DEC)
5259 fprintf (file, MOTOROLA ? "-(%s)" : "%s@-",
5260 M68K_REGNAME (REGNO (address.base)));
5261 else if (address.code == POST_INC)
5262 fprintf (file, MOTOROLA ? "(%s)+" : "%s@+",
5263 M68K_REGNAME (REGNO (address.base)));
5264 else if (!address.base && !address.index)
5266 /* A constant address. */
5267 gcc_assert (address.offset == addr);
5268 if (GET_CODE (addr) == CONST_INT)
5270 /* (xxx).w or (xxx).l. */
5271 if (IN_RANGE (INTVAL (addr), -0x8000, 0x7fff))
5272 fprintf (file, MOTOROLA ? "%d.w" : "%d:w", (int) INTVAL (addr));
5273 else
5274 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (addr));
5276 else if (TARGET_PCREL)
5278 /* (d16,PC) or (bd,PC,Xn) (with suppressed index register). */
5279 fputc ('(', file);
5280 output_addr_const (file, addr);
5281 asm_fprintf (file, flag_pic == 1 ? ":w,%Rpc)" : ":l,%Rpc)");
5283 else
5285 /* (xxx).l. We need a special case for SYMBOL_REF if the symbol
5286 name ends in `.<letter>', as the last 2 characters can be
5287 mistaken as a size suffix. Put the name in parentheses. */
5288 if (GET_CODE (addr) == SYMBOL_REF
5289 && strlen (XSTR (addr, 0)) > 2
5290 && XSTR (addr, 0)[strlen (XSTR (addr, 0)) - 2] == '.')
5292 putc ('(', file);
5293 output_addr_const (file, addr);
5294 putc (')', file);
5296 else
5297 output_addr_const (file, addr);
5300 else
5302 int labelno;
5304 /* If ADDR is a (d8,pc,Xn) address, this is the number of the
5305 label being accessed, otherwise it is -1. */
5306 labelno = (address.offset
5307 && !address.base
5308 && GET_CODE (address.offset) == LABEL_REF
5309 ? CODE_LABEL_NUMBER (XEXP (address.offset, 0))
5310 : -1);
5311 if (MOTOROLA)
5313 /* Print the "offset(base" component. */
5314 if (labelno >= 0)
5315 asm_fprintf (file, "%LL%d(%Rpc,", labelno);
5316 else
5318 if (address.offset)
5319 output_addr_const (file, address.offset);
5321 putc ('(', file);
5322 if (address.base)
5323 fputs (M68K_REGNAME (REGNO (address.base)), file);
5325 /* Print the ",index" component, if any. */
5326 if (address.index)
5328 if (address.base)
5329 putc (',', file);
5330 fprintf (file, "%s.%c",
5331 M68K_REGNAME (REGNO (address.index)),
5332 GET_MODE (address.index) == HImode ? 'w' : 'l');
5333 if (address.scale != 1)
5334 fprintf (file, "*%d", address.scale);
5336 putc (')', file);
5338 else /* !MOTOROLA */
5340 if (!address.offset && !address.index)
5341 fprintf (file, "%s@", M68K_REGNAME (REGNO (address.base)));
5342 else
5344 /* Print the "base@(offset" component. */
5345 if (labelno >= 0)
5346 asm_fprintf (file, "%Rpc@(%LL%d", labelno);
5347 else
5349 if (address.base)
5350 fputs (M68K_REGNAME (REGNO (address.base)), file);
5351 fprintf (file, "@(");
5352 if (address.offset)
5353 output_addr_const (file, address.offset);
5355 /* Print the ",index" component, if any. */
5356 if (address.index)
5358 fprintf (file, ",%s:%c",
5359 M68K_REGNAME (REGNO (address.index)),
5360 GET_MODE (address.index) == HImode ? 'w' : 'l');
5361 if (address.scale != 1)
5362 fprintf (file, ":%d", address.scale);
5364 putc (')', file);
5370 /* Check for cases where a clr insns can be omitted from code using
5371 strict_low_part sets. For example, the second clrl here is not needed:
5372 clrl d0; movw a0@+,d0; use d0; clrl d0; movw a0@+; use d0; ...
5374 MODE is the mode of this STRICT_LOW_PART set. FIRST_INSN is the clear
5375 insn we are checking for redundancy. TARGET is the register set by the
5376 clear insn. */
5378 bool
5379 strict_low_part_peephole_ok (machine_mode mode, rtx_insn *first_insn,
5380 rtx target)
5382 rtx_insn *p = first_insn;
5384 while ((p = PREV_INSN (p)))
5386 if (NOTE_INSN_BASIC_BLOCK_P (p))
5387 return false;
5389 if (NOTE_P (p))
5390 continue;
5392 /* If it isn't an insn, then give up. */
5393 if (!INSN_P (p))
5394 return false;
5396 if (reg_set_p (target, p))
5398 rtx set = single_set (p);
5399 rtx dest;
5401 /* If it isn't an easy to recognize insn, then give up. */
5402 if (! set)
5403 return false;
5405 dest = SET_DEST (set);
5407 /* If this sets the entire target register to zero, then our
5408 first_insn is redundant. */
5409 if (rtx_equal_p (dest, target)
5410 && SET_SRC (set) == const0_rtx)
5411 return true;
5412 else if (GET_CODE (dest) == STRICT_LOW_PART
5413 && GET_CODE (XEXP (dest, 0)) == REG
5414 && REGNO (XEXP (dest, 0)) == REGNO (target)
5415 && (GET_MODE_SIZE (GET_MODE (XEXP (dest, 0)))
5416 <= GET_MODE_SIZE (mode)))
5417 /* This is a strict low part set which modifies less than
5418 we are using, so it is safe. */
5420 else
5421 return false;
5425 return false;
5428 /* Operand predicates for implementing asymmetric pc-relative addressing
5429 on m68k. The m68k supports pc-relative addressing (mode 7, register 2)
5430 when used as a source operand, but not as a destination operand.
5432 We model this by restricting the meaning of the basic predicates
5433 (general_operand, memory_operand, etc) to forbid the use of this
5434 addressing mode, and then define the following predicates that permit
5435 this addressing mode. These predicates can then be used for the
5436 source operands of the appropriate instructions.
5438 n.b. While it is theoretically possible to change all machine patterns
5439 to use this addressing more where permitted by the architecture,
5440 it has only been implemented for "common" cases: SImode, HImode, and
5441 QImode operands, and only for the principle operations that would
5442 require this addressing mode: data movement and simple integer operations.
5444 In parallel with these new predicates, two new constraint letters
5445 were defined: 'S' and 'T'. 'S' is the -mpcrel analog of 'm'.
5446 'T' replaces 's' in the non-pcrel case. It is a no-op in the pcrel case.
5447 In the pcrel case 's' is only valid in combination with 'a' registers.
5448 See addsi3, subsi3, cmpsi, and movsi patterns for a better understanding
5449 of how these constraints are used.
5451 The use of these predicates is strictly optional, though patterns that
5452 don't will cause an extra reload register to be allocated where one
5453 was not necessary:
5455 lea (abc:w,%pc),%a0 ; need to reload address
5456 moveq &1,%d1 ; since write to pc-relative space
5457 movel %d1,%a0@ ; is not allowed
5459 lea (abc:w,%pc),%a1 ; no need to reload address here
5460 movel %a1@,%d0 ; since "movel (abc:w,%pc),%d0" is ok
5462 For more info, consult tiemann@cygnus.com.
5465 All of the ugliness with predicates and constraints is due to the
5466 simple fact that the m68k does not allow a pc-relative addressing
5467 mode as a destination. gcc does not distinguish between source and
5468 destination addresses. Hence, if we claim that pc-relative address
5469 modes are valid, e.g. TARGET_LEGITIMATE_ADDRESS_P accepts them, then we
5470 end up with invalid code. To get around this problem, we left
5471 pc-relative modes as invalid addresses, and then added special
5472 predicates and constraints to accept them.
5474 A cleaner way to handle this is to modify gcc to distinguish
5475 between source and destination addresses. We can then say that
5476 pc-relative is a valid source address but not a valid destination
5477 address, and hopefully avoid a lot of the predicate and constraint
5478 hackery. Unfortunately, this would be a pretty big change. It would
5479 be a useful change for a number of ports, but there aren't any current
5480 plans to undertake this.
5482 ***************************************************************************/
5485 const char *
5486 output_andsi3 (rtx *operands)
5488 int logval;
5489 CC_STATUS_INIT;
5490 if (GET_CODE (operands[2]) == CONST_INT
5491 && (INTVAL (operands[2]) | 0xffff) == -1
5492 && (DATA_REG_P (operands[0])
5493 || offsettable_memref_p (operands[0]))
5494 && !TARGET_COLDFIRE)
5496 if (GET_CODE (operands[0]) != REG)
5497 operands[0] = adjust_address (operands[0], HImode, 2);
5498 operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
5499 if (operands[2] == const0_rtx)
5500 return "clr%.w %0";
5501 return "and%.w %2,%0";
5503 if (GET_CODE (operands[2]) == CONST_INT
5504 && (logval = exact_log2 (~ INTVAL (operands[2]) & 0xffffffff)) >= 0
5505 && (DATA_REG_P (operands[0])
5506 || offsettable_memref_p (operands[0])))
5508 if (DATA_REG_P (operands[0]))
5509 operands[1] = GEN_INT (logval);
5510 else
5512 operands[0] = adjust_address (operands[0], QImode, 3 - (logval / 8));
5513 operands[1] = GEN_INT (logval % 8);
5515 return "bclr %1,%0";
5517 /* Only a standard logical operation on the whole word sets the
5518 condition codes in a way we can use. */
5519 if (!side_effects_p (operands[0]))
5520 flags_operand1 = operands[0];
5521 flags_valid = FLAGS_VALID_YES;
5522 return "and%.l %2,%0";
5525 const char *
5526 output_iorsi3 (rtx *operands)
5528 int logval;
5529 CC_STATUS_INIT;
5530 if (GET_CODE (operands[2]) == CONST_INT
5531 && INTVAL (operands[2]) >> 16 == 0
5532 && (DATA_REG_P (operands[0])
5533 || offsettable_memref_p (operands[0]))
5534 && !TARGET_COLDFIRE)
5536 if (GET_CODE (operands[0]) != REG)
5537 operands[0] = adjust_address (operands[0], HImode, 2);
5538 if (INTVAL (operands[2]) == 0xffff)
5539 return "mov%.w %2,%0";
5540 return "or%.w %2,%0";
5542 if (GET_CODE (operands[2]) == CONST_INT
5543 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5544 && (DATA_REG_P (operands[0])
5545 || offsettable_memref_p (operands[0])))
5547 if (DATA_REG_P (operands[0]))
5548 operands[1] = GEN_INT (logval);
5549 else
5551 operands[0] = adjust_address (operands[0], QImode, 3 - (logval / 8));
5552 operands[1] = GEN_INT (logval % 8);
5554 return "bset %1,%0";
5556 /* Only a standard logical operation on the whole word sets the
5557 condition codes in a way we can use. */
5558 if (!side_effects_p (operands[0]))
5559 flags_operand1 = operands[0];
5560 flags_valid = FLAGS_VALID_YES;
5561 return "or%.l %2,%0";
5564 const char *
5565 output_xorsi3 (rtx *operands)
5567 int logval;
5568 CC_STATUS_INIT;
5569 if (GET_CODE (operands[2]) == CONST_INT
5570 && INTVAL (operands[2]) >> 16 == 0
5571 && (offsettable_memref_p (operands[0]) || DATA_REG_P (operands[0]))
5572 && !TARGET_COLDFIRE)
5574 if (! DATA_REG_P (operands[0]))
5575 operands[0] = adjust_address (operands[0], HImode, 2);
5576 if (INTVAL (operands[2]) == 0xffff)
5577 return "not%.w %0";
5578 return "eor%.w %2,%0";
5580 if (GET_CODE (operands[2]) == CONST_INT
5581 && (logval = exact_log2 (INTVAL (operands[2]) & 0xffffffff)) >= 0
5582 && (DATA_REG_P (operands[0])
5583 || offsettable_memref_p (operands[0])))
5585 if (DATA_REG_P (operands[0]))
5586 operands[1] = GEN_INT (logval);
5587 else
5589 operands[0] = adjust_address (operands[0], QImode, 3 - (logval / 8));
5590 operands[1] = GEN_INT (logval % 8);
5592 return "bchg %1,%0";
5594 /* Only a standard logical operation on the whole word sets the
5595 condition codes in a way we can use. */
5596 if (!side_effects_p (operands[0]))
5597 flags_operand1 = operands[0];
5598 flags_valid = FLAGS_VALID_YES;
5599 return "eor%.l %2,%0";
5602 /* Return the instruction that should be used for a call to address X,
5603 which is known to be in operand 0. */
5605 const char *
5606 output_call (rtx x)
5608 if (symbolic_operand (x, VOIDmode))
5609 return m68k_symbolic_call;
5610 else
5611 return "jsr %a0";
5614 /* Likewise sibling calls. */
5616 const char *
5617 output_sibcall (rtx x)
5619 if (symbolic_operand (x, VOIDmode))
5620 return m68k_symbolic_jump;
5621 else
5622 return "jmp %a0";
5625 static void
5626 m68k_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
5627 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
5628 tree function)
5630 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk));
5631 rtx this_slot, offset, addr, mem, tmp;
5632 rtx_insn *insn;
5634 /* Avoid clobbering the struct value reg by using the
5635 static chain reg as a temporary. */
5636 tmp = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
5638 /* Pretend to be a post-reload pass while generating rtl. */
5639 reload_completed = 1;
5641 /* The "this" pointer is stored at 4(%sp). */
5642 this_slot = gen_rtx_MEM (Pmode, plus_constant (Pmode,
5643 stack_pointer_rtx, 4));
5645 /* Add DELTA to THIS. */
5646 if (delta != 0)
5648 /* Make the offset a legitimate operand for memory addition. */
5649 offset = GEN_INT (delta);
5650 if ((delta < -8 || delta > 8)
5651 && (TARGET_COLDFIRE || USE_MOVQ (delta)))
5653 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), offset);
5654 offset = gen_rtx_REG (Pmode, D0_REG);
5656 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5657 copy_rtx (this_slot), offset));
5660 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
5661 if (vcall_offset != 0)
5663 /* Set the static chain register to *THIS. */
5664 emit_move_insn (tmp, this_slot);
5665 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
5667 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
5668 addr = plus_constant (Pmode, tmp, vcall_offset);
5669 if (!m68k_legitimate_address_p (Pmode, addr, true))
5671 emit_insn (gen_rtx_SET (tmp, addr));
5672 addr = tmp;
5675 /* Load the offset into %d0 and add it to THIS. */
5676 emit_move_insn (gen_rtx_REG (Pmode, D0_REG),
5677 gen_rtx_MEM (Pmode, addr));
5678 emit_insn (gen_add3_insn (copy_rtx (this_slot),
5679 copy_rtx (this_slot),
5680 gen_rtx_REG (Pmode, D0_REG)));
5683 /* Jump to the target function. Use a sibcall if direct jumps are
5684 allowed, otherwise load the address into a register first. */
5685 mem = DECL_RTL (function);
5686 if (!sibcall_operand (XEXP (mem, 0), VOIDmode))
5688 gcc_assert (flag_pic);
5690 if (!TARGET_SEP_DATA)
5692 /* Use the static chain register as a temporary (call-clobbered)
5693 GOT pointer for this function. We can use the static chain
5694 register because it isn't live on entry to the thunk. */
5695 SET_REGNO (pic_offset_table_rtx, STATIC_CHAIN_REGNUM);
5696 emit_insn (gen_load_got (pic_offset_table_rtx));
5698 legitimize_pic_address (XEXP (mem, 0), Pmode, tmp);
5699 mem = replace_equiv_address (mem, tmp);
5701 insn = emit_call_insn (gen_sibcall (mem, const0_rtx));
5702 SIBLING_CALL_P (insn) = 1;
5704 /* Run just enough of rest_of_compilation. */
5705 insn = get_insns ();
5706 split_all_insns_noflow ();
5707 assemble_start_function (thunk, fnname);
5708 final_start_function (insn, file, 1);
5709 final (insn, file, 1);
5710 final_end_function ();
5711 assemble_end_function (thunk, fnname);
5713 /* Clean up the vars set above. */
5714 reload_completed = 0;
5716 /* Restore the original PIC register. */
5717 if (flag_pic)
5718 SET_REGNO (pic_offset_table_rtx, PIC_REG);
5721 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
5723 static rtx
5724 m68k_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
5725 int incoming ATTRIBUTE_UNUSED)
5727 return gen_rtx_REG (Pmode, M68K_STRUCT_VALUE_REGNUM);
5730 /* Return nonzero if register old_reg can be renamed to register new_reg. */
5732 m68k_hard_regno_rename_ok (unsigned int old_reg ATTRIBUTE_UNUSED,
5733 unsigned int new_reg)
5736 /* Interrupt functions can only use registers that have already been
5737 saved by the prologue, even if they would normally be
5738 call-clobbered. */
5740 if ((m68k_get_function_kind (current_function_decl)
5741 == m68k_fk_interrupt_handler)
5742 && !df_regs_ever_live_p (new_reg))
5743 return 0;
5745 return 1;
5748 /* Implement TARGET_HARD_REGNO_NREGS.
5750 On the m68k, ordinary registers hold 32 bits worth;
5751 for the 68881 registers, a single register is always enough for
5752 anything that can be stored in them at all. */
5754 static unsigned int
5755 m68k_hard_regno_nregs (unsigned int regno, machine_mode mode)
5757 if (regno >= 16)
5758 return GET_MODE_NUNITS (mode);
5759 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
5762 /* Implement TARGET_HARD_REGNO_MODE_OK. On the 68000, we let the cpu
5763 registers can hold any mode, but restrict the 68881 registers to
5764 floating-point modes. */
5766 static bool
5767 m68k_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
5769 if (DATA_REGNO_P (regno))
5771 /* Data Registers, can hold aggregate if fits in. */
5772 if (regno + GET_MODE_SIZE (mode) / 4 <= 8)
5773 return true;
5775 else if (ADDRESS_REGNO_P (regno))
5777 if (regno + GET_MODE_SIZE (mode) / 4 <= 16)
5778 return true;
5780 else if (FP_REGNO_P (regno))
5782 /* FPU registers, hold float or complex float of long double or
5783 smaller. */
5784 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
5785 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
5786 && GET_MODE_UNIT_SIZE (mode) <= TARGET_FP_REG_SIZE)
5787 return true;
5789 return false;
5792 /* Implement TARGET_MODES_TIEABLE_P. */
5794 static bool
5795 m68k_modes_tieable_p (machine_mode mode1, machine_mode mode2)
5797 return (!TARGET_HARD_FLOAT
5798 || ((GET_MODE_CLASS (mode1) == MODE_FLOAT
5799 || GET_MODE_CLASS (mode1) == MODE_COMPLEX_FLOAT)
5800 == (GET_MODE_CLASS (mode2) == MODE_FLOAT
5801 || GET_MODE_CLASS (mode2) == MODE_COMPLEX_FLOAT)));
5804 /* Implement SECONDARY_RELOAD_CLASS. */
5806 enum reg_class
5807 m68k_secondary_reload_class (enum reg_class rclass,
5808 machine_mode mode, rtx x)
5810 int regno;
5812 regno = true_regnum (x);
5814 /* If one operand of a movqi is an address register, the other
5815 operand must be a general register or constant. Other types
5816 of operand must be reloaded through a data register. */
5817 if (GET_MODE_SIZE (mode) == 1
5818 && reg_classes_intersect_p (rclass, ADDR_REGS)
5819 && !(INT_REGNO_P (regno) || CONSTANT_P (x)))
5820 return DATA_REGS;
5822 /* PC-relative addresses must be loaded into an address register first. */
5823 if (TARGET_PCREL
5824 && !reg_class_subset_p (rclass, ADDR_REGS)
5825 && symbolic_operand (x, VOIDmode))
5826 return ADDR_REGS;
5828 return NO_REGS;
5831 /* Implement PREFERRED_RELOAD_CLASS. */
5833 enum reg_class
5834 m68k_preferred_reload_class (rtx x, enum reg_class rclass)
5836 enum reg_class secondary_class;
5838 /* If RCLASS might need a secondary reload, try restricting it to
5839 a class that doesn't. */
5840 secondary_class = m68k_secondary_reload_class (rclass, GET_MODE (x), x);
5841 if (secondary_class != NO_REGS
5842 && reg_class_subset_p (secondary_class, rclass))
5843 return secondary_class;
5845 /* Prefer to use moveq for in-range constants. */
5846 if (GET_CODE (x) == CONST_INT
5847 && reg_class_subset_p (DATA_REGS, rclass)
5848 && IN_RANGE (INTVAL (x), -0x80, 0x7f))
5849 return DATA_REGS;
5851 /* ??? Do we really need this now? */
5852 if (GET_CODE (x) == CONST_DOUBLE
5853 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
5855 if (TARGET_HARD_FLOAT && reg_class_subset_p (FP_REGS, rclass))
5856 return FP_REGS;
5858 return NO_REGS;
5861 return rclass;
5864 /* Return floating point values in a 68881 register. This makes 68881 code
5865 a little bit faster. It also makes -msoft-float code incompatible with
5866 hard-float code, so people have to be careful not to mix the two.
5867 For ColdFire it was decided the ABI incompatibility is undesirable.
5868 If there is need for a hard-float ABI it is probably worth doing it
5869 properly and also passing function arguments in FP registers. */
5871 m68k_libcall_value (machine_mode mode)
5873 switch (mode) {
5874 case E_SFmode:
5875 case E_DFmode:
5876 case E_XFmode:
5877 if (TARGET_68881)
5878 return gen_rtx_REG (mode, FP0_REG);
5879 break;
5880 default:
5881 break;
5884 return gen_rtx_REG (mode, m68k_libcall_value_in_a0_p ? A0_REG : D0_REG);
5887 /* Location in which function value is returned.
5888 NOTE: Due to differences in ABIs, don't call this function directly,
5889 use FUNCTION_VALUE instead. */
5891 m68k_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
5893 machine_mode mode;
5895 mode = TYPE_MODE (valtype);
5896 switch (mode) {
5897 case E_SFmode:
5898 case E_DFmode:
5899 case E_XFmode:
5900 if (TARGET_68881)
5901 return gen_rtx_REG (mode, FP0_REG);
5902 break;
5903 default:
5904 break;
5907 /* If the function returns a pointer, push that into %a0. */
5908 if (func && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (func))))
5909 /* For compatibility with the large body of existing code which
5910 does not always properly declare external functions returning
5911 pointer types, the m68k/SVR4 convention is to copy the value
5912 returned for pointer functions from a0 to d0 in the function
5913 epilogue, so that callers that have neglected to properly
5914 declare the callee can still find the correct return value in
5915 d0. */
5916 return gen_rtx_PARALLEL
5917 (mode,
5918 gen_rtvec (2,
5919 gen_rtx_EXPR_LIST (VOIDmode,
5920 gen_rtx_REG (mode, A0_REG),
5921 const0_rtx),
5922 gen_rtx_EXPR_LIST (VOIDmode,
5923 gen_rtx_REG (mode, D0_REG),
5924 const0_rtx)));
5925 else if (POINTER_TYPE_P (valtype))
5926 return gen_rtx_REG (mode, A0_REG);
5927 else
5928 return gen_rtx_REG (mode, D0_REG);
5931 /* Worker function for TARGET_RETURN_IN_MEMORY. */
5932 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
5933 static bool
5934 m68k_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
5936 machine_mode mode = TYPE_MODE (type);
5938 if (mode == BLKmode)
5939 return true;
5941 /* If TYPE's known alignment is less than the alignment of MODE that
5942 would contain the structure, then return in memory. We need to
5943 do so to maintain the compatibility between code compiled with
5944 -mstrict-align and that compiled with -mno-strict-align. */
5945 if (AGGREGATE_TYPE_P (type)
5946 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (mode))
5947 return true;
5949 return false;
5951 #endif
5953 /* CPU to schedule the program for. */
5954 enum attr_cpu m68k_sched_cpu;
5956 /* MAC to schedule the program for. */
5957 enum attr_mac m68k_sched_mac;
5959 /* Operand type. */
5960 enum attr_op_type
5962 /* No operand. */
5963 OP_TYPE_NONE,
5965 /* Integer register. */
5966 OP_TYPE_RN,
5968 /* FP register. */
5969 OP_TYPE_FPN,
5971 /* Implicit mem reference (e.g. stack). */
5972 OP_TYPE_MEM1,
5974 /* Memory without offset or indexing. EA modes 2, 3 and 4. */
5975 OP_TYPE_MEM234,
5977 /* Memory with offset but without indexing. EA mode 5. */
5978 OP_TYPE_MEM5,
5980 /* Memory with indexing. EA mode 6. */
5981 OP_TYPE_MEM6,
5983 /* Memory referenced by absolute address. EA mode 7. */
5984 OP_TYPE_MEM7,
5986 /* Immediate operand that doesn't require extension word. */
5987 OP_TYPE_IMM_Q,
5989 /* Immediate 16 bit operand. */
5990 OP_TYPE_IMM_W,
5992 /* Immediate 32 bit operand. */
5993 OP_TYPE_IMM_L
5996 /* Return type of memory ADDR_RTX refers to. */
5997 static enum attr_op_type
5998 sched_address_type (machine_mode mode, rtx addr_rtx)
6000 struct m68k_address address;
6002 if (symbolic_operand (addr_rtx, VOIDmode))
6003 return OP_TYPE_MEM7;
6005 if (!m68k_decompose_address (mode, addr_rtx,
6006 reload_completed, &address))
6008 gcc_assert (!reload_completed);
6009 /* Reload will likely fix the address to be in the register. */
6010 return OP_TYPE_MEM234;
6013 if (address.scale != 0)
6014 return OP_TYPE_MEM6;
6016 if (address.base != NULL_RTX)
6018 if (address.offset == NULL_RTX)
6019 return OP_TYPE_MEM234;
6021 return OP_TYPE_MEM5;
6024 gcc_assert (address.offset != NULL_RTX);
6026 return OP_TYPE_MEM7;
6029 /* Return X or Y (depending on OPX_P) operand of INSN. */
6030 static rtx
6031 sched_get_operand (rtx_insn *insn, bool opx_p)
6033 int i;
6035 if (recog_memoized (insn) < 0)
6036 gcc_unreachable ();
6038 extract_constrain_insn_cached (insn);
6040 if (opx_p)
6041 i = get_attr_opx (insn);
6042 else
6043 i = get_attr_opy (insn);
6045 if (i >= recog_data.n_operands)
6046 return NULL;
6048 return recog_data.operand[i];
6051 /* Return type of INSN's operand X (if OPX_P) or operand Y (if !OPX_P).
6052 If ADDRESS_P is true, return type of memory location operand refers to. */
6053 static enum attr_op_type
6054 sched_attr_op_type (rtx_insn *insn, bool opx_p, bool address_p)
6056 rtx op;
6058 op = sched_get_operand (insn, opx_p);
6060 if (op == NULL)
6062 gcc_assert (!reload_completed);
6063 return OP_TYPE_RN;
6066 if (address_p)
6067 return sched_address_type (QImode, op);
6069 if (memory_operand (op, VOIDmode))
6070 return sched_address_type (GET_MODE (op), XEXP (op, 0));
6072 if (register_operand (op, VOIDmode))
6074 if ((!reload_completed && FLOAT_MODE_P (GET_MODE (op)))
6075 || (reload_completed && FP_REG_P (op)))
6076 return OP_TYPE_FPN;
6078 return OP_TYPE_RN;
6081 if (GET_CODE (op) == CONST_INT)
6083 int ival;
6085 ival = INTVAL (op);
6087 /* Check for quick constants. */
6088 switch (get_attr_type (insn))
6090 case TYPE_ALUQ_L:
6091 if (IN_RANGE (ival, 1, 8) || IN_RANGE (ival, -8, -1))
6092 return OP_TYPE_IMM_Q;
6094 gcc_assert (!reload_completed);
6095 break;
6097 case TYPE_MOVEQ_L:
6098 if (USE_MOVQ (ival))
6099 return OP_TYPE_IMM_Q;
6101 gcc_assert (!reload_completed);
6102 break;
6104 case TYPE_MOV3Q_L:
6105 if (valid_mov3q_const (ival))
6106 return OP_TYPE_IMM_Q;
6108 gcc_assert (!reload_completed);
6109 break;
6111 default:
6112 break;
6115 if (IN_RANGE (ival, -0x8000, 0x7fff))
6116 return OP_TYPE_IMM_W;
6118 return OP_TYPE_IMM_L;
6121 if (GET_CODE (op) == CONST_DOUBLE)
6123 switch (GET_MODE (op))
6125 case E_SFmode:
6126 return OP_TYPE_IMM_W;
6128 case E_VOIDmode:
6129 case E_DFmode:
6130 return OP_TYPE_IMM_L;
6132 default:
6133 gcc_unreachable ();
6137 if (GET_CODE (op) == CONST
6138 || symbolic_operand (op, VOIDmode)
6139 || LABEL_P (op))
6141 switch (GET_MODE (op))
6143 case E_QImode:
6144 return OP_TYPE_IMM_Q;
6146 case E_HImode:
6147 return OP_TYPE_IMM_W;
6149 case E_SImode:
6150 return OP_TYPE_IMM_L;
6152 default:
6153 if (symbolic_operand (m68k_unwrap_symbol (op, false), VOIDmode))
6154 /* Just a guess. */
6155 return OP_TYPE_IMM_W;
6157 return OP_TYPE_IMM_L;
6161 gcc_assert (!reload_completed);
6163 if (FLOAT_MODE_P (GET_MODE (op)))
6164 return OP_TYPE_FPN;
6166 return OP_TYPE_RN;
6169 /* Implement opx_type attribute.
6170 Return type of INSN's operand X.
6171 If ADDRESS_P is true, return type of memory location operand refers to. */
6172 enum attr_opx_type
6173 m68k_sched_attr_opx_type (rtx_insn *insn, int address_p)
6175 switch (sched_attr_op_type (insn, true, address_p != 0))
6177 case OP_TYPE_RN:
6178 return OPX_TYPE_RN;
6180 case OP_TYPE_FPN:
6181 return OPX_TYPE_FPN;
6183 case OP_TYPE_MEM1:
6184 return OPX_TYPE_MEM1;
6186 case OP_TYPE_MEM234:
6187 return OPX_TYPE_MEM234;
6189 case OP_TYPE_MEM5:
6190 return OPX_TYPE_MEM5;
6192 case OP_TYPE_MEM6:
6193 return OPX_TYPE_MEM6;
6195 case OP_TYPE_MEM7:
6196 return OPX_TYPE_MEM7;
6198 case OP_TYPE_IMM_Q:
6199 return OPX_TYPE_IMM_Q;
6201 case OP_TYPE_IMM_W:
6202 return OPX_TYPE_IMM_W;
6204 case OP_TYPE_IMM_L:
6205 return OPX_TYPE_IMM_L;
6207 default:
6208 gcc_unreachable ();
6212 /* Implement opy_type attribute.
6213 Return type of INSN's operand Y.
6214 If ADDRESS_P is true, return type of memory location operand refers to. */
6215 enum attr_opy_type
6216 m68k_sched_attr_opy_type (rtx_insn *insn, int address_p)
6218 switch (sched_attr_op_type (insn, false, address_p != 0))
6220 case OP_TYPE_RN:
6221 return OPY_TYPE_RN;
6223 case OP_TYPE_FPN:
6224 return OPY_TYPE_FPN;
6226 case OP_TYPE_MEM1:
6227 return OPY_TYPE_MEM1;
6229 case OP_TYPE_MEM234:
6230 return OPY_TYPE_MEM234;
6232 case OP_TYPE_MEM5:
6233 return OPY_TYPE_MEM5;
6235 case OP_TYPE_MEM6:
6236 return OPY_TYPE_MEM6;
6238 case OP_TYPE_MEM7:
6239 return OPY_TYPE_MEM7;
6241 case OP_TYPE_IMM_Q:
6242 return OPY_TYPE_IMM_Q;
6244 case OP_TYPE_IMM_W:
6245 return OPY_TYPE_IMM_W;
6247 case OP_TYPE_IMM_L:
6248 return OPY_TYPE_IMM_L;
6250 default:
6251 gcc_unreachable ();
6255 /* Return size of INSN as int. */
6256 static int
6257 sched_get_attr_size_int (rtx_insn *insn)
6259 int size;
6261 switch (get_attr_type (insn))
6263 case TYPE_IGNORE:
6264 /* There should be no references to m68k_sched_attr_size for 'ignore'
6265 instructions. */
6266 gcc_unreachable ();
6267 return 0;
6269 case TYPE_MUL_L:
6270 size = 2;
6271 break;
6273 default:
6274 size = 1;
6275 break;
6278 switch (get_attr_opx_type (insn))
6280 case OPX_TYPE_NONE:
6281 case OPX_TYPE_RN:
6282 case OPX_TYPE_FPN:
6283 case OPX_TYPE_MEM1:
6284 case OPX_TYPE_MEM234:
6285 case OPY_TYPE_IMM_Q:
6286 break;
6288 case OPX_TYPE_MEM5:
6289 case OPX_TYPE_MEM6:
6290 /* Here we assume that most absolute references are short. */
6291 case OPX_TYPE_MEM7:
6292 case OPY_TYPE_IMM_W:
6293 ++size;
6294 break;
6296 case OPY_TYPE_IMM_L:
6297 size += 2;
6298 break;
6300 default:
6301 gcc_unreachable ();
6304 switch (get_attr_opy_type (insn))
6306 case OPY_TYPE_NONE:
6307 case OPY_TYPE_RN:
6308 case OPY_TYPE_FPN:
6309 case OPY_TYPE_MEM1:
6310 case OPY_TYPE_MEM234:
6311 case OPY_TYPE_IMM_Q:
6312 break;
6314 case OPY_TYPE_MEM5:
6315 case OPY_TYPE_MEM6:
6316 /* Here we assume that most absolute references are short. */
6317 case OPY_TYPE_MEM7:
6318 case OPY_TYPE_IMM_W:
6319 ++size;
6320 break;
6322 case OPY_TYPE_IMM_L:
6323 size += 2;
6324 break;
6326 default:
6327 gcc_unreachable ();
6330 if (size > 3)
6332 gcc_assert (!reload_completed);
6334 size = 3;
6337 return size;
6340 /* Return size of INSN as attribute enum value. */
6341 enum attr_size
6342 m68k_sched_attr_size (rtx_insn *insn)
6344 switch (sched_get_attr_size_int (insn))
6346 case 1:
6347 return SIZE_1;
6349 case 2:
6350 return SIZE_2;
6352 case 3:
6353 return SIZE_3;
6355 default:
6356 gcc_unreachable ();
6360 /* Return operand X or Y (depending on OPX_P) of INSN,
6361 if it is a MEM, or NULL overwise. */
6362 static enum attr_op_type
6363 sched_get_opxy_mem_type (rtx_insn *insn, bool opx_p)
6365 if (opx_p)
6367 switch (get_attr_opx_type (insn))
6369 case OPX_TYPE_NONE:
6370 case OPX_TYPE_RN:
6371 case OPX_TYPE_FPN:
6372 case OPX_TYPE_IMM_Q:
6373 case OPX_TYPE_IMM_W:
6374 case OPX_TYPE_IMM_L:
6375 return OP_TYPE_RN;
6377 case OPX_TYPE_MEM1:
6378 case OPX_TYPE_MEM234:
6379 case OPX_TYPE_MEM5:
6380 case OPX_TYPE_MEM7:
6381 return OP_TYPE_MEM1;
6383 case OPX_TYPE_MEM6:
6384 return OP_TYPE_MEM6;
6386 default:
6387 gcc_unreachable ();
6390 else
6392 switch (get_attr_opy_type (insn))
6394 case OPY_TYPE_NONE:
6395 case OPY_TYPE_RN:
6396 case OPY_TYPE_FPN:
6397 case OPY_TYPE_IMM_Q:
6398 case OPY_TYPE_IMM_W:
6399 case OPY_TYPE_IMM_L:
6400 return OP_TYPE_RN;
6402 case OPY_TYPE_MEM1:
6403 case OPY_TYPE_MEM234:
6404 case OPY_TYPE_MEM5:
6405 case OPY_TYPE_MEM7:
6406 return OP_TYPE_MEM1;
6408 case OPY_TYPE_MEM6:
6409 return OP_TYPE_MEM6;
6411 default:
6412 gcc_unreachable ();
6417 /* Implement op_mem attribute. */
6418 enum attr_op_mem
6419 m68k_sched_attr_op_mem (rtx_insn *insn)
6421 enum attr_op_type opx;
6422 enum attr_op_type opy;
6424 opx = sched_get_opxy_mem_type (insn, true);
6425 opy = sched_get_opxy_mem_type (insn, false);
6427 if (opy == OP_TYPE_RN && opx == OP_TYPE_RN)
6428 return OP_MEM_00;
6430 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM1)
6432 switch (get_attr_opx_access (insn))
6434 case OPX_ACCESS_R:
6435 return OP_MEM_10;
6437 case OPX_ACCESS_W:
6438 return OP_MEM_01;
6440 case OPX_ACCESS_RW:
6441 return OP_MEM_11;
6443 default:
6444 gcc_unreachable ();
6448 if (opy == OP_TYPE_RN && opx == OP_TYPE_MEM6)
6450 switch (get_attr_opx_access (insn))
6452 case OPX_ACCESS_R:
6453 return OP_MEM_I0;
6455 case OPX_ACCESS_W:
6456 return OP_MEM_0I;
6458 case OPX_ACCESS_RW:
6459 return OP_MEM_I1;
6461 default:
6462 gcc_unreachable ();
6466 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_RN)
6467 return OP_MEM_10;
6469 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM1)
6471 switch (get_attr_opx_access (insn))
6473 case OPX_ACCESS_W:
6474 return OP_MEM_11;
6476 default:
6477 gcc_assert (!reload_completed);
6478 return OP_MEM_11;
6482 if (opy == OP_TYPE_MEM1 && opx == OP_TYPE_MEM6)
6484 switch (get_attr_opx_access (insn))
6486 case OPX_ACCESS_W:
6487 return OP_MEM_1I;
6489 default:
6490 gcc_assert (!reload_completed);
6491 return OP_MEM_1I;
6495 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_RN)
6496 return OP_MEM_I0;
6498 if (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM1)
6500 switch (get_attr_opx_access (insn))
6502 case OPX_ACCESS_W:
6503 return OP_MEM_I1;
6505 default:
6506 gcc_assert (!reload_completed);
6507 return OP_MEM_I1;
6511 gcc_assert (opy == OP_TYPE_MEM6 && opx == OP_TYPE_MEM6);
6512 gcc_assert (!reload_completed);
6513 return OP_MEM_I1;
6516 /* Data for ColdFire V4 index bypass.
6517 Producer modifies register that is used as index in consumer with
6518 specified scale. */
6519 static struct
6521 /* Producer instruction. */
6522 rtx pro;
6524 /* Consumer instruction. */
6525 rtx con;
6527 /* Scale of indexed memory access within consumer.
6528 Or zero if bypass should not be effective at the moment. */
6529 int scale;
6530 } sched_cfv4_bypass_data;
6532 /* An empty state that is used in m68k_sched_adjust_cost. */
6533 static state_t sched_adjust_cost_state;
6535 /* Implement adjust_cost scheduler hook.
6536 Return adjusted COST of dependency LINK between DEF_INSN and INSN. */
6537 static int
6538 m68k_sched_adjust_cost (rtx_insn *insn, int, rtx_insn *def_insn, int cost,
6539 unsigned int)
6541 int delay;
6543 if (recog_memoized (def_insn) < 0
6544 || recog_memoized (insn) < 0)
6545 return cost;
6547 if (sched_cfv4_bypass_data.scale == 1)
6548 /* Handle ColdFire V4 bypass for indexed address with 1x scale. */
6550 /* haifa-sched.cc: insn_cost () calls bypass_p () just before
6551 targetm.sched.adjust_cost (). Hence, we can be relatively sure
6552 that the data in sched_cfv4_bypass_data is up to date. */
6553 gcc_assert (sched_cfv4_bypass_data.pro == def_insn
6554 && sched_cfv4_bypass_data.con == insn);
6556 if (cost < 3)
6557 cost = 3;
6559 sched_cfv4_bypass_data.pro = NULL;
6560 sched_cfv4_bypass_data.con = NULL;
6561 sched_cfv4_bypass_data.scale = 0;
6563 else
6564 gcc_assert (sched_cfv4_bypass_data.pro == NULL
6565 && sched_cfv4_bypass_data.con == NULL
6566 && sched_cfv4_bypass_data.scale == 0);
6568 /* Don't try to issue INSN earlier than DFA permits.
6569 This is especially useful for instructions that write to memory,
6570 as their true dependence (default) latency is better to be set to 0
6571 to workaround alias analysis limitations.
6572 This is, in fact, a machine independent tweak, so, probably,
6573 it should be moved to haifa-sched.cc: insn_cost (). */
6574 delay = min_insn_conflict_delay (sched_adjust_cost_state, def_insn, insn);
6575 if (delay > cost)
6576 cost = delay;
6578 return cost;
6581 /* Return maximal number of insns that can be scheduled on a single cycle. */
6582 static int
6583 m68k_sched_issue_rate (void)
6585 switch (m68k_sched_cpu)
6587 case CPU_CFV1:
6588 case CPU_CFV2:
6589 case CPU_CFV3:
6590 return 1;
6592 case CPU_CFV4:
6593 return 2;
6595 default:
6596 gcc_unreachable ();
6597 return 0;
6601 /* Maximal length of instruction for current CPU.
6602 E.g. it is 3 for any ColdFire core. */
6603 static int max_insn_size;
6605 /* Data to model instruction buffer of CPU. */
6606 struct _sched_ib
6608 /* True if instruction buffer model is modeled for current CPU. */
6609 bool enabled_p;
6611 /* Size of the instruction buffer in words. */
6612 int size;
6614 /* Number of filled words in the instruction buffer. */
6615 int filled;
6617 /* Additional information about instruction buffer for CPUs that have
6618 a buffer of instruction records, rather then a plain buffer
6619 of instruction words. */
6620 struct _sched_ib_records
6622 /* Size of buffer in records. */
6623 int n_insns;
6625 /* Array to hold data on adjustments made to the size of the buffer. */
6626 int *adjust;
6628 /* Index of the above array. */
6629 int adjust_index;
6630 } records;
6632 /* An insn that reserves (marks empty) one word in the instruction buffer. */
6633 rtx insn;
6636 static struct _sched_ib sched_ib;
6638 /* ID of memory unit. */
6639 static int sched_mem_unit_code;
6641 /* Implementation of the targetm.sched.variable_issue () hook.
6642 It is called after INSN was issued. It returns the number of insns
6643 that can possibly get scheduled on the current cycle.
6644 It is used here to determine the effect of INSN on the instruction
6645 buffer. */
6646 static int
6647 m68k_sched_variable_issue (FILE *sched_dump ATTRIBUTE_UNUSED,
6648 int sched_verbose ATTRIBUTE_UNUSED,
6649 rtx_insn *insn, int can_issue_more)
6651 int insn_size;
6653 if (recog_memoized (insn) >= 0 && get_attr_type (insn) != TYPE_IGNORE)
6655 switch (m68k_sched_cpu)
6657 case CPU_CFV1:
6658 case CPU_CFV2:
6659 insn_size = sched_get_attr_size_int (insn);
6660 break;
6662 case CPU_CFV3:
6663 insn_size = sched_get_attr_size_int (insn);
6665 /* ColdFire V3 and V4 cores have instruction buffers that can
6666 accumulate up to 8 instructions regardless of instructions'
6667 sizes. So we should take care not to "prefetch" 24 one-word
6668 or 12 two-words instructions.
6669 To model this behavior we temporarily decrease size of the
6670 buffer by (max_insn_size - insn_size) for next 7 instructions. */
6672 int adjust;
6674 adjust = max_insn_size - insn_size;
6675 sched_ib.size -= adjust;
6677 if (sched_ib.filled > sched_ib.size)
6678 sched_ib.filled = sched_ib.size;
6680 sched_ib.records.adjust[sched_ib.records.adjust_index] = adjust;
6683 ++sched_ib.records.adjust_index;
6684 if (sched_ib.records.adjust_index == sched_ib.records.n_insns)
6685 sched_ib.records.adjust_index = 0;
6687 /* Undo adjustment we did 7 instructions ago. */
6688 sched_ib.size
6689 += sched_ib.records.adjust[sched_ib.records.adjust_index];
6691 break;
6693 case CPU_CFV4:
6694 gcc_assert (!sched_ib.enabled_p);
6695 insn_size = 0;
6696 break;
6698 default:
6699 gcc_unreachable ();
6702 if (insn_size > sched_ib.filled)
6703 /* Scheduling for register pressure does not always take DFA into
6704 account. Workaround instruction buffer not being filled enough. */
6706 gcc_assert (sched_pressure == SCHED_PRESSURE_WEIGHTED);
6707 insn_size = sched_ib.filled;
6710 --can_issue_more;
6712 else if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6713 || asm_noperands (PATTERN (insn)) >= 0)
6714 insn_size = sched_ib.filled;
6715 else
6716 insn_size = 0;
6718 sched_ib.filled -= insn_size;
6720 return can_issue_more;
6723 /* Return how many instructions should scheduler lookahead to choose the
6724 best one. */
6725 static int
6726 m68k_sched_first_cycle_multipass_dfa_lookahead (void)
6728 return m68k_sched_issue_rate () - 1;
6731 /* Implementation of targetm.sched.init_global () hook.
6732 It is invoked once per scheduling pass and is used here
6733 to initialize scheduler constants. */
6734 static void
6735 m68k_sched_md_init_global (FILE *sched_dump ATTRIBUTE_UNUSED,
6736 int sched_verbose ATTRIBUTE_UNUSED,
6737 int n_insns ATTRIBUTE_UNUSED)
6739 /* Check that all instructions have DFA reservations and
6740 that all instructions can be issued from a clean state. */
6741 if (flag_checking)
6743 rtx_insn *insn;
6744 state_t state;
6746 state = alloca (state_size ());
6748 for (insn = get_insns (); insn != NULL; insn = NEXT_INSN (insn))
6750 if (INSN_P (insn) && recog_memoized (insn) >= 0)
6752 gcc_assert (insn_has_dfa_reservation_p (insn));
6754 state_reset (state);
6755 if (state_transition (state, insn) >= 0)
6756 gcc_unreachable ();
6761 /* Setup target cpu. */
6763 /* ColdFire V4 has a set of features to keep its instruction buffer full
6764 (e.g., a separate memory bus for instructions) and, hence, we do not model
6765 buffer for this CPU. */
6766 sched_ib.enabled_p = (m68k_sched_cpu != CPU_CFV4);
6768 switch (m68k_sched_cpu)
6770 case CPU_CFV4:
6771 sched_ib.filled = 0;
6773 /* FALLTHRU */
6775 case CPU_CFV1:
6776 case CPU_CFV2:
6777 max_insn_size = 3;
6778 sched_ib.records.n_insns = 0;
6779 sched_ib.records.adjust = NULL;
6780 break;
6782 case CPU_CFV3:
6783 max_insn_size = 3;
6784 sched_ib.records.n_insns = 8;
6785 sched_ib.records.adjust = XNEWVEC (int, sched_ib.records.n_insns);
6786 break;
6788 default:
6789 gcc_unreachable ();
6792 sched_mem_unit_code = get_cpu_unit_code ("cf_mem1");
6794 sched_adjust_cost_state = xmalloc (state_size ());
6795 state_reset (sched_adjust_cost_state);
6797 start_sequence ();
6798 emit_insn (gen_ib ());
6799 sched_ib.insn = get_insns ();
6800 end_sequence ();
6803 /* Scheduling pass is now finished. Free/reset static variables. */
6804 static void
6805 m68k_sched_md_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6806 int verbose ATTRIBUTE_UNUSED)
6808 sched_ib.insn = NULL;
6810 free (sched_adjust_cost_state);
6811 sched_adjust_cost_state = NULL;
6813 sched_mem_unit_code = 0;
6815 free (sched_ib.records.adjust);
6816 sched_ib.records.adjust = NULL;
6817 sched_ib.records.n_insns = 0;
6818 max_insn_size = 0;
6821 /* Implementation of targetm.sched.init () hook.
6822 It is invoked each time scheduler starts on the new block (basic block or
6823 extended basic block). */
6824 static void
6825 m68k_sched_md_init (FILE *sched_dump ATTRIBUTE_UNUSED,
6826 int sched_verbose ATTRIBUTE_UNUSED,
6827 int n_insns ATTRIBUTE_UNUSED)
6829 switch (m68k_sched_cpu)
6831 case CPU_CFV1:
6832 case CPU_CFV2:
6833 sched_ib.size = 6;
6834 break;
6836 case CPU_CFV3:
6837 sched_ib.size = sched_ib.records.n_insns * max_insn_size;
6839 memset (sched_ib.records.adjust, 0,
6840 sched_ib.records.n_insns * sizeof (*sched_ib.records.adjust));
6841 sched_ib.records.adjust_index = 0;
6842 break;
6844 case CPU_CFV4:
6845 gcc_assert (!sched_ib.enabled_p);
6846 sched_ib.size = 0;
6847 break;
6849 default:
6850 gcc_unreachable ();
6853 if (sched_ib.enabled_p)
6854 /* haifa-sched.cc: schedule_block () calls advance_cycle () just before
6855 the first cycle. Workaround that. */
6856 sched_ib.filled = -2;
6859 /* Implementation of targetm.sched.dfa_pre_advance_cycle () hook.
6860 It is invoked just before current cycle finishes and is used here
6861 to track if instruction buffer got its two words this cycle. */
6862 static void
6863 m68k_sched_dfa_pre_advance_cycle (void)
6865 if (!sched_ib.enabled_p)
6866 return;
6868 if (!cpu_unit_reservation_p (curr_state, sched_mem_unit_code))
6870 sched_ib.filled += 2;
6872 if (sched_ib.filled > sched_ib.size)
6873 sched_ib.filled = sched_ib.size;
6877 /* Implementation of targetm.sched.dfa_post_advance_cycle () hook.
6878 It is invoked just after new cycle begins and is used here
6879 to setup number of filled words in the instruction buffer so that
6880 instructions which won't have all their words prefetched would be
6881 stalled for a cycle. */
6882 static void
6883 m68k_sched_dfa_post_advance_cycle (void)
6885 int i;
6887 if (!sched_ib.enabled_p)
6888 return;
6890 /* Setup number of prefetched instruction words in the instruction
6891 buffer. */
6892 i = max_insn_size - sched_ib.filled;
6894 while (--i >= 0)
6896 if (state_transition (curr_state, sched_ib.insn) >= 0)
6897 /* Pick up scheduler state. */
6898 ++sched_ib.filled;
6902 /* Return X or Y (depending on OPX_P) operand of INSN,
6903 if it is an integer register, or NULL overwise. */
6904 static rtx
6905 sched_get_reg_operand (rtx_insn *insn, bool opx_p)
6907 rtx op = NULL;
6909 if (opx_p)
6911 if (get_attr_opx_type (insn) == OPX_TYPE_RN)
6913 op = sched_get_operand (insn, true);
6914 gcc_assert (op != NULL);
6916 if (!reload_completed && !REG_P (op))
6917 return NULL;
6920 else
6922 if (get_attr_opy_type (insn) == OPY_TYPE_RN)
6924 op = sched_get_operand (insn, false);
6925 gcc_assert (op != NULL);
6927 if (!reload_completed && !REG_P (op))
6928 return NULL;
6932 return op;
6935 /* Return true, if X or Y (depending on OPX_P) operand of INSN
6936 is a MEM. */
6937 static bool
6938 sched_mem_operand_p (rtx_insn *insn, bool opx_p)
6940 switch (sched_get_opxy_mem_type (insn, opx_p))
6942 case OP_TYPE_MEM1:
6943 case OP_TYPE_MEM6:
6944 return true;
6946 default:
6947 return false;
6951 /* Return X or Y (depending on OPX_P) operand of INSN,
6952 if it is a MEM, or NULL overwise. */
6953 static rtx
6954 sched_get_mem_operand (rtx_insn *insn, bool must_read_p, bool must_write_p)
6956 bool opx_p;
6957 bool opy_p;
6959 opx_p = false;
6960 opy_p = false;
6962 if (must_read_p)
6964 opx_p = true;
6965 opy_p = true;
6968 if (must_write_p)
6970 opx_p = true;
6971 opy_p = false;
6974 if (opy_p && sched_mem_operand_p (insn, false))
6975 return sched_get_operand (insn, false);
6977 if (opx_p && sched_mem_operand_p (insn, true))
6978 return sched_get_operand (insn, true);
6980 gcc_unreachable ();
6981 return NULL;
6984 /* Return non-zero if PRO modifies register used as part of
6985 address in CON. */
6987 m68k_sched_address_bypass_p (rtx_insn *pro, rtx_insn *con)
6989 rtx pro_x;
6990 rtx con_mem_read;
6992 pro_x = sched_get_reg_operand (pro, true);
6993 if (pro_x == NULL)
6994 return 0;
6996 con_mem_read = sched_get_mem_operand (con, true, false);
6997 gcc_assert (con_mem_read != NULL);
6999 if (reg_mentioned_p (pro_x, con_mem_read))
7000 return 1;
7002 return 0;
7005 /* Helper function for m68k_sched_indexed_address_bypass_p.
7006 if PRO modifies register used as index in CON,
7007 return scale of indexed memory access in CON. Return zero overwise. */
7008 static int
7009 sched_get_indexed_address_scale (rtx_insn *pro, rtx_insn *con)
7011 rtx reg;
7012 rtx mem;
7013 struct m68k_address address;
7015 reg = sched_get_reg_operand (pro, true);
7016 if (reg == NULL)
7017 return 0;
7019 mem = sched_get_mem_operand (con, true, false);
7020 gcc_assert (mem != NULL && MEM_P (mem));
7022 if (!m68k_decompose_address (GET_MODE (mem), XEXP (mem, 0), reload_completed,
7023 &address))
7024 gcc_unreachable ();
7026 if (REGNO (reg) == REGNO (address.index))
7028 gcc_assert (address.scale != 0);
7029 return address.scale;
7032 return 0;
7035 /* Return non-zero if PRO modifies register used
7036 as index with scale 2 or 4 in CON. */
7038 m68k_sched_indexed_address_bypass_p (rtx_insn *pro, rtx_insn *con)
7040 gcc_assert (sched_cfv4_bypass_data.pro == NULL
7041 && sched_cfv4_bypass_data.con == NULL
7042 && sched_cfv4_bypass_data.scale == 0);
7044 switch (sched_get_indexed_address_scale (pro, con))
7046 case 1:
7047 /* We can't have a variable latency bypass, so
7048 remember to adjust the insn cost in adjust_cost hook. */
7049 sched_cfv4_bypass_data.pro = pro;
7050 sched_cfv4_bypass_data.con = con;
7051 sched_cfv4_bypass_data.scale = 1;
7052 return 0;
7054 case 2:
7055 case 4:
7056 return 1;
7058 default:
7059 return 0;
7063 /* We generate a two-instructions program at M_TRAMP :
7064 movea.l &CHAIN_VALUE,%a0
7065 jmp FNADDR
7066 where %a0 can be modified by changing STATIC_CHAIN_REGNUM. */
7068 static void
7069 m68k_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
7071 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
7072 rtx mem;
7074 gcc_assert (ADDRESS_REGNO_P (STATIC_CHAIN_REGNUM));
7076 mem = adjust_address (m_tramp, HImode, 0);
7077 emit_move_insn (mem, GEN_INT(0x207C + ((STATIC_CHAIN_REGNUM-8) << 9)));
7078 mem = adjust_address (m_tramp, SImode, 2);
7079 emit_move_insn (mem, chain_value);
7081 mem = adjust_address (m_tramp, HImode, 6);
7082 emit_move_insn (mem, GEN_INT(0x4EF9));
7083 mem = adjust_address (m_tramp, SImode, 8);
7084 emit_move_insn (mem, fnaddr);
7086 FINALIZE_TRAMPOLINE (XEXP (m_tramp, 0));
7089 /* On the 68000, the RTS insn cannot pop anything.
7090 On the 68010, the RTD insn may be used to pop them if the number
7091 of args is fixed, but if the number is variable then the caller
7092 must pop them all. RTD can't be used for library calls now
7093 because the library is compiled with the Unix compiler.
7094 Use of RTD is a selectable option, since it is incompatible with
7095 standard Unix calling sequences. If the option is not selected,
7096 the caller must always pop the args. */
7098 static poly_int64
7099 m68k_return_pops_args (tree fundecl, tree funtype, poly_int64 size)
7101 return ((TARGET_RTD
7102 && (!fundecl
7103 || TREE_CODE (fundecl) != IDENTIFIER_NODE)
7104 && (!stdarg_p (funtype)))
7105 ? (HOST_WIDE_INT) size : 0);
7108 /* Make sure everything's fine if we *don't* have a given processor.
7109 This assumes that putting a register in fixed_regs will keep the
7110 compiler's mitts completely off it. We don't bother to zero it out
7111 of register classes. */
7113 static void
7114 m68k_conditional_register_usage (void)
7116 int i;
7117 HARD_REG_SET x;
7118 if (!TARGET_HARD_FLOAT)
7120 x = reg_class_contents[FP_REGS];
7121 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
7122 if (TEST_HARD_REG_BIT (x, i))
7123 fixed_regs[i] = call_used_regs[i] = 1;
7125 if (flag_pic)
7126 fixed_regs[PIC_REG] = call_used_regs[PIC_REG] = 1;
7129 static void
7130 m68k_init_sync_libfuncs (void)
7132 init_sync_libfuncs (UNITS_PER_WORD);
7135 /* Implements EPILOGUE_USES. All registers are live on exit from an
7136 interrupt routine. */
7137 bool
7138 m68k_epilogue_uses (int regno ATTRIBUTE_UNUSED)
7140 return (reload_completed
7141 && (m68k_get_function_kind (current_function_decl)
7142 == m68k_fk_interrupt_handler));
7146 /* Implement TARGET_C_EXCESS_PRECISION.
7148 Set the value of FLT_EVAL_METHOD in float.h. When using 68040 fp
7149 instructions, we get proper intermediate rounding, otherwise we
7150 get extended precision results. */
7152 static enum flt_eval_method
7153 m68k_excess_precision (enum excess_precision_type type)
7155 switch (type)
7157 case EXCESS_PRECISION_TYPE_FAST:
7158 /* The fastest type to promote to will always be the native type,
7159 whether that occurs with implicit excess precision or
7160 otherwise. */
7161 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
7162 case EXCESS_PRECISION_TYPE_STANDARD:
7163 case EXCESS_PRECISION_TYPE_IMPLICIT:
7164 /* Otherwise, the excess precision we want when we are
7165 in a standards compliant mode, and the implicit precision we
7166 provide can be identical. */
7167 if (TARGET_68040 || ! TARGET_68881)
7168 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
7170 return FLT_EVAL_METHOD_PROMOTE_TO_LONG_DOUBLE;
7171 case EXCESS_PRECISION_TYPE_FLOAT16:
7172 error ("%<-fexcess-precision=16%> is not supported on this target");
7173 break;
7174 default:
7175 gcc_unreachable ();
7177 return FLT_EVAL_METHOD_UNPREDICTABLE;
7180 /* Implement PUSH_ROUNDING. On the 680x0, sp@- in a byte insn really pushes
7181 a word. On the ColdFire, sp@- in a byte insn pushes just a byte. */
7183 poly_int64
7184 m68k_push_rounding (poly_int64 bytes)
7186 if (TARGET_COLDFIRE)
7187 return bytes;
7188 return (bytes + 1) & ~1;
7191 /* Implement TARGET_PROMOTE_FUNCTION_MODE. */
7193 static machine_mode
7194 m68k_promote_function_mode (const_tree type, machine_mode mode,
7195 int *punsignedp ATTRIBUTE_UNUSED,
7196 const_tree fntype ATTRIBUTE_UNUSED,
7197 int for_return)
7199 /* Promote libcall arguments narrower than int to match the normal C
7200 ABI (for which promotions are handled via
7201 TARGET_PROMOTE_PROTOTYPES). */
7202 if (type == NULL_TREE && !for_return && (mode == QImode || mode == HImode))
7203 return SImode;
7204 return mode;
7207 /* Implement TARGET_ZERO_CALL_USED_REGS. */
7209 static HARD_REG_SET
7210 m68k_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
7212 rtx zero_fpreg = NULL_RTX;
7214 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
7215 if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
7217 rtx reg, zero;
7219 if (INT_REGNO_P (regno))
7221 reg = regno_reg_rtx[regno];
7222 zero = CONST0_RTX (SImode);
7224 else if (FP_REGNO_P (regno))
7226 reg = gen_raw_REG (SFmode, regno);
7227 if (zero_fpreg == NULL_RTX)
7229 /* On the 040/060 clearing an FP reg loads a large
7230 immediate. To reduce code size use the first
7231 cleared FP reg to clear remaining ones. Don't do
7232 this on cores which use fmovecr. */
7233 zero = CONST0_RTX (SFmode);
7234 if (TUNE_68040_60)
7235 zero_fpreg = reg;
7237 else
7238 zero = zero_fpreg;
7240 else
7241 gcc_unreachable ();
7243 emit_move_insn (reg, zero);
7246 return need_zeroed_hardregs;
7249 /* Implement TARGET_C_MODE_FOR_FLOATING_TYPE. Return XFmode or DFmode
7250 for TI_LONG_DOUBLE_TYPE which is for long double type, go with the
7251 default one for the others. */
7253 static machine_mode
7254 m68k_c_mode_for_floating_type (enum tree_index ti)
7256 if (ti == TI_LONG_DOUBLE_TYPE)
7257 return LONG_DOUBLE_TYPE_MODE;
7258 return default_mode_for_floating_type (ti);
7261 /* Implement TARGET_LRA_P. */
7263 static bool
7264 m68k_use_lra_p ()
7266 return m68k_lra_p;
7269 #include "gt-m68k.h"